k8s系列-10-k8s集群验证和图形化界面访问k8s
老板们,点个关注吧。
上一篇我们成功使用kubespary部署了最新版本的k8s,那么这篇我们就查看下到底安装了哪些服务,以及如何图形化界面访问k8s系统。
查看集群内容
1、查看命名空间
[ ]
NAME STATUS AGE
default Active 23h
ingress-nginx Active 23h
kube-node-lease Active 23h
kube-public Active 23h
kube-system Active 23h
[ ]
2、查看default有什么内容
[rootget all -n default ~]# kubectl
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.200.0.1 <none> 443/TCP 23h
[root ~]#
3、查看ingress-nginx有什么内容
[ ]
NAME READY STATUS RESTARTS AGE
pod/ingress-nginx-controller-68hbq 1/1 Running 1 (14m ago) 23h
pod/ingress-nginx-controller-clt8p 1/1 Running 1 (14m ago) 23h
pod/ingress-nginx-controller-hmcf6 1/1 Running 1 (14m ago) 23h
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/ingress-nginx-controller 3 3 3 3 3 kubernetes.io/os=linux 23h
[ ]
4、查看kube-node-lease和kube-public有什么内容
[rootget all -n kube-node-lease ~]# kubectl
No resources found in kube-node-lease namespace.
[rootget all -n kube-public ~]# kubectl
No resources found in kube-public namespace.
[root ~]#
5、查看kube-system有什么内容
[root@node1 ~]# kubectl get all -n kube-system
NAME READY STATUS RESTARTS AGE
pod/calico-kube-controllers-5788f6558-tv62d 1/1 Running 2 (21m ago) 24h
pod/calico-node-lv2mq 1/1 Running 1 (21m ago) 24h
pod/calico-node-nvlvd 1/1 Running 1 (21m ago) 24h
pod/calico-node-r9znq 1/1 Running 1 (21m ago) 24h
pod/coredns-8474476ff8-2zmkb 1/1 Running 1 (21m ago) 23h
pod/coredns-8474476ff8-bjssc 1/1 Running 1 (21m ago) 23h
pod/dns-autoscaler-5ffdc7f89d-pstjw 1/1 Running 1 (21m ago) 23h
pod/kube-apiserver-node1 1/1 Running 2 (21m ago) 24h
pod/kube-apiserver-node2 1/1 Running 2 (21m ago) 24h
pod/kube-controller-manager-node1 1/1 Running 2 (21m ago) 24h
pod/kube-controller-manager-node2 1/1 Running 2 (21m ago) 24h
pod/kube-proxy-8qfm5 1/1 Running 1 (21m ago) 24h
pod/kube-proxy-d8d7d 1/1 Running 1 (21m ago) 24h
pod/kube-proxy-vlfb2 1/1 Running 1 (21m ago) 24h
pod/kube-scheduler-node1 1/1 Running 3 (21m ago) 24h
pod/kube-scheduler-node2 1/1 Running 2 (21m ago) 24h
pod/kubernetes-dashboard-548847967d-5fk9w 1/1 Running 1 (21m ago) 23h
pod/kubernetes-metrics-scraper-6d49f96c97-thk7v 1/1 Running 1 (21m ago) 23h
pod/nginx-proxy-node3 1/1 Running 1 (21m ago) 24h
pod/nodelocaldns-5lfw2 0/1 Pending 0 23h
pod/nodelocaldns-9pk5m 1/1 Running 1 (21m ago) 23h
pod/nodelocaldns-d8m7h 0/1 Pending 0 23h
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/coredns ClusterIP 10.200.0.3 <none> 53/UDP,53/TCP,9153/TCP 23h
service/dashboard-metrics-scraper ClusterIP 10.200.177.189 <none> 8000/TCP 23h
service/kubernetes-dashboard ClusterIP 10.200.30.147 <none> 443/TCP 23h
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/calico-node 3 3 3 3 3 kubernetes.io/os=linux 24h
daemonset.apps/kube-proxy 3 3 3 3 3 kubernetes.io/os=linux 24h
daemonset.apps/nodelocaldns 3 3 1 3 1 kubernetes.io/os=linux 23h
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/calico-kube-controllers 1/1 1 1 24h
deployment.apps/coredns 2/2 2 2 23h
deployment.apps/dns-autoscaler 1/1 1 1 23h
deployment.apps/kubernetes-dashboard 1/1 1 1 23h
deployment.apps/kubernetes-metrics-scraper 1/1 1 1 23h
NAME DESIRED CURRENT READY AGE
replicaset.apps/calico-kube-controllers-5788f6558 1 1 1 24h
replicaset.apps/coredns-8474476ff8 2 2 2 23h
replicaset.apps/dns-autoscaler-5ffdc7f89d 1 1 1 23h
replicaset.apps/kubernetes-dashboard-548847967d 1 1 1 23h
replicaset.apps/kubernetes-metrics-scraper-6d49f96c97 1 1 1 23h
[root@node1 ~]#
6、资源分类
pod:是k8s中最小的单元
ReplicaSet:调度器,通过标签控制 pod 的副本数目
Deployment:控制器,管理无状态的应用
StatefulSet:管理有状态的应用
DaemonSet:可以在每个节点运行 pod 主键
Job:批处理
CronJob:批处理
ingress-nginx
~]# crictl ps
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
570b3da43f846 a9f76bcccfb5f 35 minutes ago Running ingress-nginx-controller 1 a6c5c45508d8c
a11842fca1afe 6570786a0fd3b 36 minutes ago Running calico-node 1 641441cd33f53
c6b5f26c06708 7801cfc6d5c07 36 minutes ago Running kubernetes-metrics-scraper 1 526b653df8827
dae61986d67eb 296a6d5035e2d 36 minutes ago Running coredns 1 a04b5d5cf27a2
f3f2e6a0677c1 72f07539ffb58 36 minutes ago Running kubernetes-dashboard 1 6d28d27cd415b
ce6269e3270f8 fcd3512f2a7c5 36 minutes ago Running calico-kube-controllers 2 5f489f39e9c14
fc1f16997de4a 5bae806f8f123 36 minutes ago Running node-cache 1 331eac1c6a981
7dd92d5eeeebf 8f8fdd6672d48 36 minutes ago Running kube-proxy 1 69655ac71243d
1d5821872d6ac f6987c8d6ed59 36 minutes ago Running nginx-proxy 1 f5e8c2f953ca7
~]# cat /etc/kubernetes/manifests/nginx-proxy.yml
apiVersion: v1
kind: Pod
metadata:
name: nginx-proxy
namespace: kube-system
labels:
Reconcile :
kube-nginx :
annotations:
"a9814dd8ff52d61bc33226a61d3159315ba1c9ad" :
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
linux :
priorityClassName: system-node-critical
containers:
name: nginx-proxy
image: docker.io/library/nginx:1.21.4
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 25m
memory: 32M
livenessProbe:
httpGet:
path: /healthz
port: 8081
readinessProbe:
httpGet:
path: /healthz
port: 8081
volumeMounts:
mountPath: /etc/nginx
name: etc-nginx
readOnly: true
volumes:
name: etc-nginx
hostPath:
path: /etc/nginx
~]#
从上面“volumeMounts”参数可以看到,挂载的目录是/etc/nginx,那么我们去看下那个里面有什么:
~]# cat /etc/nginx/nginx.conf
error_log stderr notice;
worker_processes 2;
worker_rlimit_nofile 130048;
worker_shutdown_timeout 10s;
events {
multi_accept on;
use epoll;
worker_connections 16384;
}
stream {
upstream kube_apiserver {
least_conn;
server 192.168.112.130:6443;
server 192.168.112.131:6443;
}
server {
listen 127.0.0.1:6443;
proxy_pass kube_apiserver;
proxy_timeout 10m;
proxy_connect_timeout 1s;
}
}
http {
aio threads;
aio_write on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 5m;
keepalive_requests 100;
reset_timedout_connection on;
server_tokens off;
autoindex off;
server {
listen 8081;
location /healthz {
access_log off;
return 200;
}
location /stub_status {
stub_status on;
access_log off;
}
}
}
~]#
我们重点关注下以下这一段:
stream {
upstream kube_apiserver {
least_conn;
server 192.168.112.130:6443;
server 192.168.112.131:6443;
}
server {
listen 127.0.0.1:6443;
proxy_pass kube_apiserver;
proxy_timeout 10m;
proxy_connect_timeout 1s;
}
}
清理代理
[ ]
[ ]
[ ]
[ ]
/etc/yum.conf:proxy=http://192.168.112.130:8118
[ ]
[ ]
[ ]
测试集群
[ ]
[ ]
[ ]
[ ]
apiVersion: v1
kind: Service
metadata:
name: nginx-ds
labels:
app: nginx-ds
spec:
type: NodePort
selector:
app: nginx-ds
ports:
- name: http
port: 80
targetPort: 80
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nginx-ds
spec:
selector:
matchLabels:
app: nginx-ds
template:
metadata:
labels:
app: nginx-ds
spec:
containers:
- name: my-nginx
image: nginx:1.19
ports:
- containerPort: 80
[ ]
service/nginx-ds created
daemonset.apps/nginx-ds created
[ ]
# 参数 -o wide 表示输出额外信息。对于Pod,将输出Pod所在的Node名
[root@node1 k8s]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-ds-kwdp6 1/1 Running 0 8m11s 10.233.28.9 node3 <none> <none>
nginx-ds-tf6mh 1/1 Running 0 8m11s 10.233.44.5 node2 <none> <none>
nginx-ds-v5w8c 1/1 Running 0 8m11s 10.233.154.5 node1 <none> <none>
[root@node1 k8s]#
[root@node1 k8s]# ping 10.233.28.9
PING 10.233.28.9 (10.233.28.9) 56(84) bytes of data.
64 bytes from 10.233.28.9: icmp_seq=1 ttl=63 time=0.662 ms
64 bytes from 10.233.28.9: icmp_seq=2 ttl=63 time=0.505 ms
^C
--- 10.233.28.9 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.505/0.583/0.662/0.082 ms
[root@node1 k8s]# ping 10.233.44.5
PING 10.233.44.5 (10.233.44.5) 56(84) bytes of data.
64 bytes from 10.233.44.5: icmp_seq=1 ttl=63 time=0.409 ms
64 bytes from 10.233.44.5: icmp_seq=2 ttl=63 time=1.40 ms
^C
--- 10.233.44.5 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.409/0.908/1.407/0.499 ms
[root@node1 k8s]# ping 10.233.154.5
PING 10.233.154.5 (10.233.154.5) 56(84) bytes of data.
64 bytes from 10.233.154.5: icmp_seq=1 ttl=64 time=0.163 ms
64 bytes from 10.233.154.5: icmp_seq=2 ttl=64 time=0.061 ms
^C
--- 10.233.154.5 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.061/0.112/0.163/0.051 ms
[root@node1 k8s]#
[root@node1 k8s]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.200.0.1 <none> 443/TCP 27h
nginx-ds NodePort 10.200.255.83 <none> 80:30962/TCP 32m
[root@node1 k8s]#
[root@node1 k8s]# curl http://10.200.255.83:80
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[root@node1 k8s]#
[ ]
[ ]
[ ]
[root@node1 k8s]# vim nginx-pod.yml
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
containers:
- name: nginx
image: docker.io/library/nginx:1.19
ports:
- containerPort: 80
[root@node1 k8s]#
[ ]
pod/nginx created
[ ]
[root@node1 k8s]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx 1/1 Running 0 5m14s 10.233.28.10 node3 <none> <none>
nginx-ds-kwdp6 1/1 Running 0 51m 10.233.28.9 node3 <none> <none>
nginx-ds-tf6mh 1/1 Running 0 51m 10.233.44.5 node2 <none> <none>
nginx-ds-v5w8c 1/1 Running 0 51m 10.233.154.5 node1 <none> <none>
[root@node1 k8s]# kubectl exec nginx -it -- /bin/bash
root@nginx:/# cat /etc/resolv.conf
search default.svc.cluster.local svc.cluster.local cluster.local localdomain
nameserver 169.254.25.10
options ndots:5
root@nginx:/#
root@nginx:/# curl nginx-ds
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
root@nginx:/#
root@nginx:/# exit
exit
[root@node1 k8s]#
日志功能
[ ]
NAME READY STATUS RESTARTS AGE
nginx 1/1 Running 0 61m
nginx-ds-kwdp6 1/1 Running 0 108m
nginx-ds-tf6mh 1/1 Running 0 108m
nginx-ds-v5w8c 1/1 Running 0 108m
[ ]
/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf
10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh
/docker-entrypoint.sh: Configuration complete; ready for start up
[ ]
Exec功能
[ ]
NAME READY STATUS RESTARTS AGE
nginx-ds-kwdp6 1/1 Running 0 109m
nginx-ds-tf6mh 1/1 Running 0 109m
nginx-ds-v5w8c 1/1 Running 0 109m
[ ]
nginx version: nginx/1.19.10
[ ]
访问dashboard
k8s]# vim dashboard-svc.yml
apiVersion: v1
kind: Service
metadata:
namespace: kube-system
name: dashboard
labels:
app: dashboard
spec:
type: NodePort
selector:
kubernetes-dashboard :
ports:
name: https
nodePort: 30000
port: 443
targetPort: 8443
k8s]# kubectl apply -f dashboard-svc.yml
created
k8s]#
https://192.168.112.130:30000/#/login
[ ]
serviceaccount/dashboard-admin created
[ ]
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin created
[ ]
[ ]
eyJhbGciOiJSUzI1NiIsImtpZCI6Ik9JcGxDOGtHeFZ3YWVZN2FpY19sek5CTVh4dVI5NmRKRURnMGV5dUZTN3cifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tdGN3c3EiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZGMxOTMzZWQtMDRlMC00NGE4LTg2MmYtOWFmNWVhNTJiNGJkIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.VyDiRYKTppNxemq9cVXHNTSeAxeqmtlHjVq5VD8steWg9Az8KcPrryk0bL42XruHZEZi6vZUEd-iZfl0BPCp4UdNHqYSsdPKnUzNzwD-kwBoZfZEtnI9poqwVjaSWakiDTolKeBEMOaHT1TWqA4rffu0DAlxoXkTs8Vu42bc0sfAN2A6ER57VR115-DeGRRvqG4cjrLC5QdLIOiB7w9KHgo1mngk5lffEBLWRZUz3jv6ecFDytSYaGFJ5FdrwYFqID-dKGShQu9y6DXZu8sjkiAr4tUhtga35m4OakbYWCrxFq29jdCj5zbSDQr1Bokxe9Z2zXOSu3rCqoI_3ODIPg
[ ]
至此,本文结束,多实操,多实操,多实操。
往期推荐
添加关注,带你高效运维