使用yaml文件将Pod部署到拥有指定标签的Kubernetes节点上
关于Kubernetes 1.15的讨论
贴上标签
目前的情况
[root@master1 ]# kubectl get node --show-labels
NAME STATUS ROLES AGE VERSION LABELS
node1 Ready <none> 43h v1.15.2 (略)kubernetes.io/hostname=node1,kubernetes.io/os=linux
node2 Ready <none> 2d21h v1.15.2 (略)kubernetes.io/hostname=ops01,kubernetes.io/os=linux
给予标签
# 例:kubectl label node "node名" node="ラベル名"
[root@master1 ]# kubectl label node node1 node=worker1
[root@master1 ]# kubectl label node node2 node=workerA
标签确认
[root@master1 ]# kubectl get node --show-labels
NAME STATUS ROLES AGE VERSION LABELS
node1 Ready <none> 43h v1.15.2 (略)kubernetes.io/hostname=node1,kubernetes.io/os=linux,node=worker1
node2 Ready <none> 2d21h v1.15.2 (略)kubernetes.io/hostname=ops01,kubernetes.io/os=linux,node=workerA
部署Pod
编辑yaml
以Core DNS的Pod为例进行部署。
源码可在以下链接处找到:
https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
由于环境问题的例子,
kubernetes cluster.local 10.254.0.0/16
forward . /etc/resolv.conf
clusterIP: 10.254.0.2
由于环境的不同,我认为周围要注意。
虽然稍微偏离了话题,
但在这种情况下,如果没有设置部署DNS Pod的节点的/etc/resolv.conf文件,
则无法进行与外部的通信(容器内→Google等)。
[root@master1 k8s]# cat coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health
ready
kubernetes cluster.local 10.254.0.0/16 {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
hostNetwork: true
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node
operator: In
values:
- workerA
# spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: coredns
image: coredns/coredns:1.6.2
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.254.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
这次的重点是上述Yaml文件中的部署部分。
spec:
hostNetwork: true
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node
operator: In
values:
- workerA
这个部分。
将“values”项目作为标签名称(例如:workerA)。
YAML 部署
#Deploy
[root@master1 ]# kubectl apply -f coredns.yaml
[root@master1 ]# kubectl get pods -o wide --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
default nginx-ds-3dsxd 1/1 Running 0 43h 192.168. worker1 <none> <none>
default nginx-ds-dhrcs 1/1 Running 0 43h 192.168. workerA <none> <none>
kube-system calico-kube-controllers-9859 1/1 Running 0 2d21h 192.168. worker1 <none> <none>
kube-system coredns-c3546349-7dh1d 1/1 Running 0 175m 192.168. workerA <none> <none>