k8s系列-13-生成证书和各组件的认证配置
老板们,点个关注吧。
要知道我们相互访问需要的是什么,需要的是安全性,那么我们就使用https来控制相互间的访问吧,那么我们就需要使用证书,我们这里采用自建证书来实现。
安装证书生成服务
[][][][][]Version: 1.2.0Revision: devRuntime: go1.6[]
根证书
[][][][]{"signing": {"default": {"expiry": "876000h"},"profiles": {"kubernetes": {"usages": ["signing", "key encipherment", "server auth", "client auth"],"expiry": "876000h"}}}}EOF[][]{"CN": "Kubernetes","key": {"algo": "rsa","size": 2048},"names": [{"C": "US","L": "Portland","O": "Kubernetes","OU": "CA","ST": "Oregon"}]}EOF[]
[][]ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem[]
admin客户端证书
[root@node1 pki]# cat > admin-csr.json <<EOF{"CN": "admin","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "BeiJing","L": "BeiJing","O": "system:masters","OU": "seven"}]}EOF[root@node1 pki]#
[root@node1 pki]# cfssl gencert \-ca=ca.pem \-ca-key=ca-key.pem \-config=ca-config.json \-profile=kubernetes \admin-csr.json | cfssljson -bare admin[root@node1 pki]# lsadmin.csr admin-csr.json admin-key.pem admin.pem ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem[root@node1 pki]#
kubelet客户端证书
# 设置你的worker节点列表[root@node1 pki]# for ((i=0;i<${#WORKERS[@]};i++)); docat > ${WORKERS[$i]}-csr.json <<EOF{"CN": "system:node:${WORKERS[$i]}","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","L": "Beijing","O": "system:nodes","OU": "seven","ST": "Beijing"}]}EOFcfssl gencert \-ca=ca.pem \-ca-key=ca-key.pem \-config=ca-config.json \-hostname=${WORKERS[$i]},${WORKER_IPS[$i]} \-profile=kubernetes \${WORKERS[$i]}-csr.json | cfssljson -bare ${WORKERS[$i]}done[root@node1 pki]#
[root@node1 pki]# lsadmin.csr admin-key.pem ca-config.json ca-csr.json ca.pem node2-csr.json node2.pem node3-csr.json node3.pemadmin-csr.json admin.pem ca.csr ca-key.pem node2.csr node2-key.pem node3.csr node3-key.pem[root@node1 pki]#
kube-controller-manager证书
[root@node1 pki]# cat > kube-controller-manager-csr.json <<EOF{"CN": "system:kube-controller-manager","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "BeiJing","L": "BeiJing","O": "system:kube-controller-manager","OU": "seven"}]}EOF[root@node1 pki]#
[root@node1 pki]# cfssl gencert \-ca=ca.pem \-ca-key=ca-key.pem \-config=ca-config.json \-profile=kubernetes \kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager[root@node1 pki]#
[root@node1 pki]# lsadmin.csr admin.pem ca-csr.json kube-controller-manager.csr kube-controller-manager.pem node2-key.pem node3-csr.jsonadmin-csr.json ca-config.json ca-key.pem kube-controller-manager-csr.json node2.csr node2.pem node3-key.pemadmin-key.pem ca.csr ca.pem kube-controller-manager-key.pem node2-csr.json node3.csr node3.pem[root@node1 pki]#
kube-proxy客户端证书
[root@node1 pki]# cat > kube-proxy-csr.json <<EOF{"CN": "system:kube-proxy","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "BeiJing","L": "BeiJing","O": "k8s","OU": "seven"}]}EOF[root@node1 pki]#
[root@node1 pki]# cfssl gencert \-ca=ca.pem \-ca-key=ca-key.pem \-config=ca-config.json \-profile=kubernetes \kube-proxy-csr.json | cfssljson -bare kube-proxy
[root@node1 pki]# lsadmin.csr admin.pem ca-csr.json kube-controller-manager.csr kube-controller-manager.pem kube-proxy-key.pem node2-csr.json node3.csr node3.pemadmin-csr.json ca-config.json ca-key.pem kube-controller-manager-csr.json kube-proxy.csr kube-proxy.pem node2-key.pem node3-csr.jsonadmin-key.pem ca.csr ca.pem kube-controller-manager-key.pem kube-proxy-csr.json node2.csr node2.pem node3-key.pem[root@node1 pki]#
kube-scheduler证书
[root@node1 pki]# cat > kube-scheduler-csr.json <<EOF{"CN": "system:kube-scheduler","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "BeiJing","L": "BeiJing","O": "system:kube-scheduler","OU": "seven"}]}EOF[root@node1 pki]#
[root@node1 pki]# cfssl gencert \-ca=ca.pem \-ca-key=ca-key.pem \-config=ca-config.json \-profile=kubernetes \kube-scheduler-csr.json | cfssljson -bare kube-scheduler[root@node1 pki]#
[root@node1 pki]# lsadmin.csr ca-config.json ca.pem kube-controller-manager.pem kube-proxy.pem kube-scheduler.pem node2.pem node3.pemadmin-csr.json ca.csr kube-controller-manager.csr kube-proxy.csr kube-scheduler.csr node2.csr node3.csradmin-key.pem ca-csr.json kube-controller-manager-csr.json kube-proxy-csr.json kube-scheduler-csr.json node2-csr.json node3-csr.jsonadmin.pem ca-key.pem kube-controller-manager-key.pem kube-proxy-key.pem kube-scheduler-key.pem node2-key.pem node3-key.pem[root@node1 pki]#
kube-apiserver证书
[root@node1 pki]# cat > kubernetes-csr.json <<EOF{"CN": "kubernetes","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "BeiJing","L": "BeiJing","O": "k8s","OU": "seven"}]}EOF[root@node1 pki]#
apiserver的service ip地址(一般是svc网段的第一个ip地址)如果和现有网络有冲突,记得进行替换哈[root@node1 pki]# KUBERNETES_SVC_IP=10.233.0.1所有master的IP,如果有外网也写上,我这里没有外网地址,就只写内网了可以多写几个IP,为了防止后续集群扩建其实只需要写master即可,也就是node1和node2,我这里写了三个节点的[root@node1 pki]# MASTER_IPS=192.168.112.130,192.168.112.131,192.168.112.132[root@node1 pki]# cfssl gencert \-ca=ca.pem \-ca-key=ca-key.pem \-config=ca-config.json \-hostname=${KUBERNETES_SVC_IP},${MASTER_IPS},127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.svc.cluster.local \-profile=kubernetes \kubernetes-csr.json | cfssljson -bare kubernetes[root@node1 pki]#
[root@node1 pki]# lsadmin.csr ca.csr kube-controller-manager-csr.json kube-proxy-key.pem kubernetes.pem node2.csr node3-csr.jsonadmin-csr.json ca-csr.json kube-controller-manager-key.pem kube-proxy.pem kube-scheduler.csr node2-csr.json node3-key.pemadmin-key.pem ca-key.pem kube-controller-manager.pem kubernetes.csr kube-scheduler-csr.json node2-key.pem node3.pemadmin.pem ca.pem kube-proxy.csr kubernetes-csr.json kube-scheduler-key.pem node2.pemca-config.json kube-controller-manager.csr kube-proxy-csr.json kubernetes-key.pem kube-scheduler.pem node3.csr[root@node1 pki]#
Service Account证书
[root@node1 pki]# cat > service-account-csr.json <<EOF{"CN": "service-accounts","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "BeiJing","L": "BeiJing","O": "k8s","OU": "seven"}]}EOF[root@node1 pki]#
[root@node1 pki]# cfssl gencert \-ca=ca.pem \-ca-key=ca-key.pem \-config=ca-config.json \-profile=kubernetes \service-account-csr.json | cfssljson -bare service-account[root@node1 pki]#
[root@node1 pki]# lsadmin.csr ca-csr.json kube-controller-manager.pem kubernetes-csr.json kube-scheduler.pem node3-csr.json service-account.pemadmin-csr.json ca-key.pem kube-proxy.csr kubernetes-key.pem node2.csr node3-key.pemadmin-key.pem ca.pem kube-proxy-csr.json kubernetes.pem node2-csr.json node3.pemadmin.pem kube-controller-manager.csr kube-proxy-key.pem kube-scheduler.csr node2-key.pem service-account.csrca-config.json kube-controller-manager-csr.json kube-proxy.pem kube-scheduler-csr.json node2.pem service-account-csr.jsonca.csr kube-controller-manager-key.pem kubernetes.csr kube-scheduler-key.pem node3.csr service-account-key.pem[root@node1 pki]#
proxy-client 证书
[root@node1 pki]# cat > proxy-client-csr.json <<EOF{"CN": "aggregator","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "BeiJing","L": "BeiJing","O": "k8s","OU": "seven"}]}EOF[root@node1 pki]#
[root@node1 pki]# cfssl gencert \-ca=ca.pem \-ca-key=ca-key.pem \-config=ca-config.json \-profile=kubernetes \proxy-client-csr.json | cfssljson -bare proxy-client[root@node1 pki]#
[root@node1 pki]# lsadmin.csr ca-csr.json kube-controller-manager.pem kubernetes-csr.json kube-scheduler.pem node3-csr.json proxy-client.pemadmin-csr.json ca-key.pem kube-proxy.csr kubernetes-key.pem node2.csr node3-key.pem service-account.csradmin-key.pem ca.pem kube-proxy-csr.json kubernetes.pem node2-csr.json node3.pem service-account-csr.jsonadmin.pem kube-controller-manager.csr kube-proxy-key.pem kube-scheduler.csr node2-key.pem proxy-client.csr service-account-key.pemca-config.json kube-controller-manager-csr.json kube-proxy.pem kube-scheduler-csr.json node2.pem proxy-client-csr.json service-account.pemca.csr kube-controller-manager-key.pem kubernetes.csr kube-scheduler-key.pem node3.csr proxy-client-key.pem[root@node1 pki]# ls | wc -l41[root@node1 pki]#
分发证书
[root@node1 pki]# for instance in ${WORKERS[@]}; doscp ca.pem ${instance}-key.pem ${instance}.pem root@${instance}:~/done[root@node1 pki]#
root@node1 pki]# OIFS=$IFS[root@node1 pki]# IFS=','[root@node1 pki]# for instance in ${MASTER_IPS}; doscp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \service-account-key.pem service-account.pem proxy-client.pem proxy-client-key.pem root@${instance}:~/done[root@node1 pki]#
[][]kubectl config set-cluster kubernetes \--certificate-authority=ca.pem \--embed-certs=true \--server=https://127.0.0.1:6443 \--kubeconfig=${instance}.kubeconfigkubectl config set-credentials system:node:${instance} \--client-certificate=${instance}.pem \--client-key=${instance}-key.pem \--embed-certs=true \--kubeconfig=${instance}.kubeconfigkubectl config set-context default \--cluster=kubernetes \--user=system:node:${instance} \--kubeconfig=${instance}.kubeconfigkubectl config use-context default --kubeconfig=${instance}.kubeconfigdone[][]node2.kubeconfignode3.kubeconfig[]
[]--certificate-authority=ca.pem \--embed-certs=true \--server=https://127.0.0.1:6443 \--kubeconfig=kube-proxy.kubeconfig[]--client-certificate=kube-proxy.pem \--client-key=kube-proxy-key.pem \--embed-certs=true \--kubeconfig=kube-proxy.kubeconfig[]--cluster=kubernetes \--user=system:kube-proxy \--kubeconfig=kube-proxy.kubeconfig[][][]kube-proxy.kubeconfignode2.kubeconfignode3.kubeconfig[]
[]--certificate-authority=ca.pem \--embed-certs=true \--server=https://127.0.0.1:6443 \--kubeconfig=kube-controller-manager.kubeconfig[][]--client-certificate=kube-controller-manager.pem \--client-key=kube-controller-manager-key.pem \--embed-certs=true \--kubeconfig=kube-controller-manager.kubeconfig[][]--cluster=kubernetes \--user=system:kube-controller-manager \--kubeconfig=kube-controller-manager.kubeconfig[][][][]kube-controller-manager.kubeconfigkube-proxy.kubeconfignode2.kubeconfignode3.kubeconfig[]
[]--certificate-authority=ca.pem \--embed-certs=true \--server=https://127.0.0.1:6443 \--kubeconfig=kube-scheduler.kubeconfig[]--client-certificate=kube-scheduler.pem \--client-key=kube-scheduler-key.pem \--embed-certs=true \--kubeconfig=kube-scheduler.kubeconfig[]--cluster=kubernetes \--user=system:kube-scheduler \--kubeconfig=kube-scheduler.kubeconfig[][][]kube-controller-manager.kubeconfigkube-proxy.kubeconfigkube-scheduler.kubeconfignode2.kubeconfignode3.kubeconfig[]
[]--certificate-authority=ca.pem \--embed-certs=true \--server=https://127.0.0.1:6443 \--kubeconfig=admin.kubeconfig[]--client-certificate=admin.pem \--client-key=admin-key.pem \--embed-certs=true \--kubeconfig=admin.kubeconfig[]--cluster=kubernetes \--user=admin \--kubeconfig=admin.kubeconfig[][][]admin.kubeconfigkube-controller-manager.kubeconfigkube-proxy.kubeconfigkube-scheduler.kubeconfignode2.kubeconfignode3.kubeconfig[]
把kubelet和kube-proxy需要的kubeconfig配置分发到每个worker节点。
[][]
把kube-controller-manager和kube-scheduler需要的kubeconfig配置分发到master节点。
[][]
至此,本文结束,证书是重中之重,一定要慎重,下一节我们说下etcd集群的安装。
往期推荐
添加关注,带你高效运维
