k8s 二进制部署 (四)
上一节部署完matser 并进行了RBAC 授权,可以通过kubectl get no 看到三台master,都是NotReady状态,这是因为没有部署网络插件,k8s没部署网络插件之前节点是NotReady的
这一节部署网络插件(flannel vxlan 模式):
记集群POD网段为 :
CLUSTER_CIDR=10.10.0.0/16
cat <<EOF | tee kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
docker/default :
docker/default :
runtime/default :
runtime/default :
spec:
privileged: false
volumes:
configMap
secret
emptyDir
hostPath
allowedHostPaths:
pathPrefix: "/etc/cni/net.d"
pathPrefix: "/etc/kube-flannel"
pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
apiGroups:
""
resources:
pods
verbs:
get
apiGroups:
""
resources:
nodes
verbs:
list
watch
apiGroups:
""
resources:
nodes/status
verbs:
patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
| :
{
"cbr0", :
"0.3.1", :
[ :
{
"flannel", :
{ :
true, :
true :
}
},
{
"portmap", :
{ :
true :
}
}
]
}
| :
{
"10.10.0.0/16", :
{ :
"vxlan" :
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
matchExpressions:
key: kubernetes.io/os
operator: In
values:
linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
name: install-cni-plugin
image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.0
command:
cp
args:
-f
/flannel
/opt/cni/bin/flannel
volumeMounts:
name: cni-plugin
mountPath: /opt/cni/bin
name: install-cni
image: rancher/mirrored-flannelcni-flannel:v0.16.1
command:
cp
args:
-f
/etc/kube-flannel/cni-conf.json
/etc/cni/net.d/10-flannel.conflist
volumeMounts:
name: cni
mountPath: /etc/cni/net.d
name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
name: kube-flannel
image: rancher/mirrored-flannelcni-flannel:v0.16.1
command:
/opt/bin/flanneld
args:
--ip-masq
--kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
name: run
mountPath: /run/flannel
name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
name: run
hostPath:
path: /run/flannel
name: cni-plugin
hostPath:
path: /opt/cni/bin
name: cni
hostPath:
path: /etc/cni/net.d
name: flannel-cfg
configMap:
name: kube-flannel-cfg
EOF
kubectl apply -f kube-flannel.yml
部署完网络插件可以 再get node 就是 Ready状态了