k8s部署-17-网络插件和dns解析服务配置
经过前几篇的文章,我们通过二进制方式部署的k8s集群,差不多所有的组件都部署好了,接下来就差我们的网络插件和dns了,在这里我们的网络插件使用calico,dns解析使用coredns。
网络插件安装
https://projectcalico.docs.tigera.io/getting-started/kubernetes/self-managed-onprem/onpremises
[ ]
[ ]
calico.yaml
[ ]
[ ]
[ ]
calico.yaml
[ ]
[root@node1 ~]# vim calico.yaml # 有两个配置需要修改
# 修改前
- name: IP
value: "autodetect"
# 修改成
- name: IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
# 修改前是被注释的状态
# - name: CALICO_IPV4POOL_CIDR
# value: "192.168.0.0/16"
# 修改成
- name: CALICO_IPV4POOL_CIDR
value: "10.200.0.0/16"
[root@node1 ~]#
[ ]
[root@node1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node2 Ready <none> 15h v1.20.2
node3 Ready <none> 15h v1.20.2
[root@node1 ~]#
[root@node1 ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-858c9597c8-6gzd5 1/1 Running 0 43m
calico-node-6k479 1/1 Running 0 43m
calico-node-bnbxx 1/1 Running 0 43m
nginx-proxy-node3 1/1 Running 1 15h
[root@node1 ~]#
DNS解析
[ ]
# 以下配置文件不需要修改任何信息
[root@node1 ~]# vim coredns.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
EnsureExists :
data:
Corefile: |
53 { :
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
prefer_udp
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
Reconcile :
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac-defaults :
Reconcile :
name: system:coredns
rules:
apiGroups:
""
resources:
endpoints
services
pods
namespaces
verbs:
list
watch
apiGroups:
""
resources:
nodes
verbs:
get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
"true" :
labels:
rbac-defaults :
EnsureExists :
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
name: coredns
namespace: kube-system
labels:
kube-dns :
"coredns" :
Reconcile :
annotations:
"9153" :
"true" :
spec:
selector:
kube-dns :
clusterIP: ${COREDNS_CLUSTER_IP}
ports:
name: dns
port: 53
protocol: UDP
name: dns-tcp
port: 53
protocol: TCP
name: metrics
port: 9153
protocol: TCP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: "coredns"
namespace: kube-system
labels:
"kube-dns" :
Reconcile :
"coredns" :
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
maxSurge: 10%
selector:
matchLabels:
kube-dns :
template:
metadata:
labels:
kube-dns :
annotations:
'runtime/default' :
spec:
priorityClassName: system-cluster-critical
nodeSelector:
linux :
serviceAccountName: coredns
tolerations:
key: node-role.kubernetes.io/master
effect: NoSchedule
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
kube-dns :
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
weight: 100
preference:
matchExpressions:
key: node-role.kubernetes.io/master
operator: In
values:
""
containers:
name: coredns
image: "docker.io/coredns/coredns:1.6.7"
imagePullPolicy: IfNotPresent
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
name: config-volume
mountPath: /etc/coredns
ports:
containerPort: 53
name: dns
protocol: UDP
containerPort: 53
name: dns-tcp
protocol: TCP
containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
NET_BIND_SERVICE
drop:
all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
dnsPolicy: Default
volumes:
name: config-volume
configMap:
name: coredns
items:
key: Corefile
path: Corefile
[root@node1 ~]# sed -i "s/\${COREDNS_CLUSTER_IP}/${COREDNS_CLUSTER_IP}/g" coredns.yaml
~]#
[ ]
部署NodeLocal DNSCache:
[ ]
[ ]
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nodelocaldns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
cluster.local:53 {
errors
cache {
success 9984 30
denial 9984 5
}
reload
loop
bind 169.254.25.10
forward . ${COREDNS_CLUSTER_IP} {
force_tcp
}
prometheus :9253
health 169.254.25.10:9254
}
in-addr.arpa:53 {
errors
cache 30
reload
loop
bind 169.254.25.10
forward . ${COREDNS_CLUSTER_IP} {
force_tcp
}
prometheus :9253
}
ip6.arpa:53 {
errors
cache 30
reload
loop
bind 169.254.25.10
forward . ${COREDNS_CLUSTER_IP} {
force_tcp
}
prometheus :9253
}
.:53 {
errors
cache 30
reload
loop
bind 169.254.25.10
forward . /etc/resolv.conf
prometheus :9253
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nodelocaldns
namespace: kube-system
labels:
k8s-app: kube-dns
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: nodelocaldns
template:
metadata:
labels:
k8s-app: nodelocaldns
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '9253'
spec:
priorityClassName: system-cluster-critical
serviceAccountName: nodelocaldns
hostNetwork: true
dnsPolicy: Default # Don't use cluster DNS.
tolerations:
- effect: NoSchedule
operator: "Exists"
- effect: NoExecute
operator: "Exists"
containers:
- name: node-cache
image: "registry.cn-hangzhou.aliyuncs.com/kubernetes-kubespray/dns_k8s-dns-node-cache:1.16.0"
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-localip", "169.254.25.10", "-conf", "/etc/coredns/Corefile", "-upstreamsvc", "coredns" ]
securityContext:
privileged: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9253
name: metrics
protocol: TCP
livenessProbe:
httpGet:
host: 169.254.25.10
path: /health
port: 9254
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
host: 169.254.25.10
path: /health
port: 9254
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: config-volume
configMap:
name: nodelocaldns
items:
- key: Corefile
path: Corefile
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
updateStrategy:
rollingUpdate:
maxUnavailable: 20%
type: RollingUpdate
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nodelocaldns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
[root@node1 ~]#
[root@node1 ~]# sed -i "s/\${COREDNS_CLUSTER_IP}/${COREDNS_CLUSTER_IP}/g" nodelocaldns.yaml
[root@node1 ~]#
[ ]
[ ]
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-858c9597c8-6gzd5 1/1 Running 0 62m
calico-node-6k479 1/1 Running 0 62m
calico-node-bnbxx 1/1 Running 0 62m
coredns-84646c885d-6fsjk 1/1 Running 0 7m11s
coredns-84646c885d-sdb6l 1/1 Running 0 7m11s
nginx-proxy-node3 1/1 Running 1 15h
nodelocaldns-gj9xf 1/1 Running 0 3m17s
nodelocaldns-sw9jh 1/1 Running 0 3m17s
[ ]
往期推荐
添加关注,带你高效运维