k8s部署-17-网络插件和dns解析服务配置
经过前几篇的文章,我们通过二进制方式部署的k8s集群,差不多所有的组件都部署好了,接下来就差我们的网络插件和dns了,在这里我们的网络插件使用calico,dns解析使用coredns。
网络插件安装
https://projectcalico.docs.tigera.io/getting-started/kubernetes/self-managed-onprem/onpremises
[][]calico.yaml[]
[][]calico.yaml[]
[root@node1 ~]# vim calico.yaml # 有两个配置需要修改# 修改前- name: IPvalue: "autodetect"# 修改成- name: IPvalueFrom:fieldRef:fieldPath: status.hostIP# 修改前是被注释的状态# - name: CALICO_IPV4POOL_CIDR# value: "192.168.0.0/16"# 修改成- name: CALICO_IPV4POOL_CIDRvalue: "10.200.0.0/16"[root@node1 ~]#
[]
[root@node1 ~]# kubectl get nodesNAME STATUS ROLES AGE VERSIONnode2 Ready <none> 15h v1.20.2node3 Ready <none> 15h v1.20.2[root@node1 ~]#[root@node1 ~]# kubectl get pod -n kube-systemNAME READY STATUS RESTARTS AGEcalico-kube-controllers-858c9597c8-6gzd5 1/1 Running 0 43mcalico-node-6k479 1/1 Running 0 43mcalico-node-bnbxx 1/1 Running 0 43mnginx-proxy-node3 1/1 Running 1 15h[root@node1 ~]#
DNS解析
[]
# 以下配置文件不需要修改任何信息[root@node1 ~]# vim coredns.yaml---apiVersion: v1kind: ConfigMapmetadata:name: corednsnamespace: kube-systemlabels:: EnsureExistsdata:Corefile: |:53 {errorshealth {lameduck 5s}readykubernetes cluster.local in-addr.arpa ip6.arpa {pods insecurefallthrough in-addr.arpa ip6.arpa}prometheus :9153forward . /etc/resolv.conf {prefer_udp}cache 30loopreloadloadbalance}---apiVersion: v1kind: ServiceAccountmetadata:name: corednsnamespace: kube-systemlabels:: Reconcile---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:labels:: rbac-defaults: Reconcilename: system:corednsrules:apiGroups:""resources:endpointsservicespodsnamespacesverbs:listwatchapiGroups:""resources:nodesverbs:get---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:annotations:: "true"labels:: rbac-defaults: EnsureExistsname: system:corednsroleRef:apiGroup: rbac.authorization.k8s.iokind: ClusterRolename: system:corednssubjects:kind: ServiceAccountname: corednsnamespace: kube-system---apiVersion: v1kind: Servicemetadata:name: corednsnamespace: kube-systemlabels:: kube-dns: "coredns": Reconcileannotations:: "9153": "true"spec:selector:: kube-dnsclusterIP: ${COREDNS_CLUSTER_IP}ports:name: dnsport: 53protocol: UDPname: dns-tcpport: 53protocol: TCPname: metricsport: 9153protocol: TCP---apiVersion: apps/v1kind: Deploymentmetadata:name: "coredns"namespace: kube-systemlabels:: "kube-dns": Reconcile: "coredns"spec:replicas: 2strategy:type: RollingUpdaterollingUpdate:maxUnavailable: 0maxSurge: 10%selector:matchLabels:: kube-dnstemplate:metadata:labels:: kube-dnsannotations:: 'runtime/default'spec:priorityClassName: system-cluster-criticalnodeSelector:: linuxserviceAccountName: corednstolerations:key: node-role.kubernetes.io/mastereffect: NoScheduleaffinity:podAntiAffinity:requiredDuringSchedulingIgnoredDuringExecution:topologyKey: "kubernetes.io/hostname"labelSelector:matchLabels:: kube-dnsnodeAffinity:preferredDuringSchedulingIgnoredDuringExecution:weight: 100preference:matchExpressions:key: node-role.kubernetes.io/masteroperator: Invalues:""containers:name: corednsimage: "docker.io/coredns/coredns:1.6.7"imagePullPolicy: IfNotPresentresources:# TODO: Set memory limits when we've profiled the container for large# clusters, then set request = limit to keep this container in# guaranteed class. Currently, this container falls into the# "burstable" category so the kubelet doesn't backoff from restarting it.limits:memory: 170Mirequests:cpu: 100mmemory: 70Miargs: [ "-conf", "/etc/coredns/Corefile" ]volumeMounts:name: config-volumemountPath: /etc/corednsports:containerPort: 53name: dnsprotocol: UDPcontainerPort: 53name: dns-tcpprotocol: TCPcontainerPort: 9153name: metricsprotocol: TCPsecurityContext:allowPrivilegeEscalation: falsecapabilities:add:NET_BIND_SERVICEdrop:allreadOnlyRootFilesystem: truelivenessProbe:httpGet:path: /healthport: 8080scheme: HTTPtimeoutSeconds: 5successThreshold: 1failureThreshold: 10readinessProbe:httpGet:path: /readyport: 8181scheme: HTTPtimeoutSeconds: 5successThreshold: 1failureThreshold: 10dnsPolicy: Defaultvolumes:name: config-volumeconfigMap:name: corednsitems:key: Corefilepath: Corefile[root@node1 ~]# sed -i "s/\${COREDNS_CLUSTER_IP}/${COREDNS_CLUSTER_IP}/g" coredns.yaml~]#
[]
部署NodeLocal DNSCache:
[][]---apiVersion: v1kind: ConfigMapmetadata:name: nodelocaldnsnamespace: kube-systemlabels:addonmanager.kubernetes.io/mode: EnsureExistsdata:Corefile: |cluster.local:53 {errorscache {success 9984 30denial 9984 5}reloadloopbind 169.254.25.10forward . ${COREDNS_CLUSTER_IP} {force_tcp}prometheus :9253health 169.254.25.10:9254}in-addr.arpa:53 {errorscache 30reloadloopbind 169.254.25.10forward . ${COREDNS_CLUSTER_IP} {force_tcp}prometheus :9253}ip6.arpa:53 {errorscache 30reloadloopbind 169.254.25.10forward . ${COREDNS_CLUSTER_IP} {force_tcp}prometheus :9253}.:53 {errorscache 30reloadloopbind 169.254.25.10forward . /etc/resolv.confprometheus :9253}---apiVersion: apps/v1kind: DaemonSetmetadata:name: nodelocaldnsnamespace: kube-systemlabels:k8s-app: kube-dnsaddonmanager.kubernetes.io/mode: Reconcilespec:selector:matchLabels:k8s-app: nodelocaldnstemplate:metadata:labels:k8s-app: nodelocaldnsannotations:prometheus.io/scrape: 'true'prometheus.io/port: '9253'spec:priorityClassName: system-cluster-criticalserviceAccountName: nodelocaldnshostNetwork: truednsPolicy: Default # Don't use cluster DNS.tolerations:- effect: NoScheduleoperator: "Exists"- effect: NoExecuteoperator: "Exists"containers:- name: node-cacheimage: "registry.cn-hangzhou.aliyuncs.com/kubernetes-kubespray/dns_k8s-dns-node-cache:1.16.0"resources:limits:memory: 170Mirequests:cpu: 100mmemory: 70Miargs: [ "-localip", "169.254.25.10", "-conf", "/etc/coredns/Corefile", "-upstreamsvc", "coredns" ]securityContext:privileged: trueports:- containerPort: 53name: dnsprotocol: UDP- containerPort: 53name: dns-tcpprotocol: TCP- containerPort: 9253name: metricsprotocol: TCPlivenessProbe:httpGet:host: 169.254.25.10path: /healthport: 9254scheme: HTTPtimeoutSeconds: 5successThreshold: 1failureThreshold: 10readinessProbe:httpGet:host: 169.254.25.10path: /healthport: 9254scheme: HTTPtimeoutSeconds: 5successThreshold: 1failureThreshold: 10volumeMounts:- name: config-volumemountPath: /etc/coredns- name: xtables-lockmountPath: /run/xtables.lockvolumes:- name: config-volumeconfigMap:name: nodelocaldnsitems:- key: Corefilepath: Corefile- name: xtables-lockhostPath:path: /run/xtables.locktype: FileOrCreate# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.terminationGracePeriodSeconds: 0updateStrategy:rollingUpdate:maxUnavailable: 20%type: RollingUpdate---apiVersion: v1kind: ServiceAccountmetadata:name: nodelocaldnsnamespace: kube-systemlabels:addonmanager.kubernetes.io/mode: Reconcile[root@node1 ~]#[root@node1 ~]# sed -i "s/\${COREDNS_CLUSTER_IP}/${COREDNS_CLUSTER_IP}/g" nodelocaldns.yaml[root@node1 ~]#
[]
[]NAME READY STATUS RESTARTS AGEcalico-kube-controllers-858c9597c8-6gzd5 1/1 Running 0 62mcalico-node-6k479 1/1 Running 0 62mcalico-node-bnbxx 1/1 Running 0 62mcoredns-84646c885d-6fsjk 1/1 Running 0 7m11scoredns-84646c885d-sdb6l 1/1 Running 0 7m11snginx-proxy-node3 1/1 Running 1 15hnodelocaldns-gj9xf 1/1 Running 0 3m17snodelocaldns-sw9jh 1/1 Running 0 3m17s[]
往期推荐
添加关注,带你高效运维
