一、水平自动扩容和缩容HPA
HPA全称是Horizontal Pod Autoscaler,翻译成中文是POD水平自动伸缩,HPA可以基于CPU利用率对replication controller、deployment和replicaset中的pod数量进行自动扩缩容(除了CPU利用率也可以基于其他应程序提供的度量指标custom metrics进行自动扩缩容)。pod自动缩放不适用于无法缩放的对象,比如DaemonSets。
HPA有版本限制,只有kubernetes>=1.23.X才有该功能。
HPA由Kubernetes API资源和控制器实现。资
wget https://github.com/kubernetes-sigs/metricsserver/
releases/latest/download/high-availability-1.21+.yaml
源决定了控制器的行为。控制器会周期性的获取目标资源指标(如,平均CPU利用率),并与目标值相比较后来调整Pod副本数量。
创建测试Deployment
vi php-apache.yaml
[root@aminglinux01 ~]# cat php-apache.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: php-apache
spec:selector:matchLabels:run: php-apachereplicas: 1template:metadata:labels:run: php-apachespec:containers:- name: php-apacheimage: registry.cn-hangzhou.aliyuncs.com/*/hpa-example:latestports:- containerPort: 80resources:limits:cpu: 500m ##限制Pod CPU资源最多使用500mrequests:cpu: 200m ##K8s要保证Pod使用的最小cpu资源为200m
---
apiVersion: v1
kind: Service
metadata:name: php-apachelabels:run: php-apache
spec:ports:- port: 80selector:run: php-apache
kubectl apply -f php-apache.yaml
安装merics-server(通过它才能获取到具体的资源使用情况)。merics-server可以对资源指标CPU,内存等进行监控。
下载yaml文件
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/high-availability-1.21+.yaml ###由于各种原因可以手动下载,修改镜像地址,上传服务器
修改YAML文件
vi high-availability-1.21+.yaml
将image: k8s.gcr.io/metrics-server/metrics-server:v0.6.2 修改为 image:
registry.cn-hangzhou.aliyuncs.com/*/metrics-server:v0.7.1
在image: 这行上面增加一行: - --kubelet-insecure-tls
[root@aminglinux01 ~]# cat high-availability-1.21+.yaml
apiVersion: v1
kind: ServiceAccount
metadata:labels:k8s-app: metrics-servername: metrics-servernamespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:labels:k8s-app: metrics-serverrbac.authorization.k8s.io/aggregate-to-admin: "true"rbac.authorization.k8s.io/aggregate-to-edit: "true"rbac.authorization.k8s.io/aggregate-to-view: "true"name: system:aggregated-metrics-reader
rules:
- apiGroups:- metrics.k8s.ioresources:- pods- nodesverbs:- get- list- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:labels:k8s-app: metrics-servername: system:metrics-server
rules:
- apiGroups:- ""resources:- nodes/metricsverbs:- get
- apiGroups:- ""resources:- pods- nodesverbs:- get- list- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:labels:k8s-app: metrics-servername: metrics-server-auth-readernamespace: kube-system
roleRef:apiGroup: rbac.authorization.k8s.iokind: Rolename: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccountname: metrics-servernamespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:labels:k8s-app: metrics-servername: metrics-server:system:auth-delegator
roleRef:apiGroup: rbac.authorization.k8s.iokind: ClusterRolename: system:auth-delegator
subjects:
- kind: ServiceAccountname: metrics-servernamespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:labels:k8s-app: metrics-servername: system:metrics-server
roleRef:apiGroup: rbac.authorization.k8s.iokind: ClusterRolename: system:metrics-server
subjects:
- kind: ServiceAccountname: metrics-servernamespace: kube-system
---
apiVersion: v1
kind: Service
metadata:labels:k8s-app: metrics-servername: metrics-servernamespace: kube-system
spec:ports:- name: httpsport: 443protocol: TCPtargetPort: httpsselector:k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:labels:k8s-app: metrics-servername: metrics-servernamespace: kube-system
spec:replicas: 2selector:matchLabels:k8s-app: metrics-serverstrategy:rollingUpdate:maxUnavailable: 1template:metadata:labels:k8s-app: metrics-serverspec:affinity:podAntiAffinity:requiredDuringSchedulingIgnoredDuringExecution:- labelSelector:matchLabels:k8s-app: metrics-servernamespaces:- kube-systemtopologyKey: kubernetes.io/hostnamecontainers:- args:- --cert-dir=/tmp- --secure-port=10250- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname- --kubelet-use-node-status-port- --metric-resolution=15s- --kubelet-insecure-tls ###不使用httpsimage: registry.cn-hangzhou.aliyuncs.com/*/metrics-server:v0.7.1imagePullPolicy: IfNotPresentlivenessProbe:failureThreshold: 3httpGet:path: /livezport: httpsscheme: HTTPSperiodSeconds: 10name: metrics-serverports:- containerPort: 10250name: httpsprotocol: TCPreadinessProbe:failureThreshold: 3httpGet:path: /readyzport: httpsscheme: HTTPSinitialDelaySeconds: 20periodSeconds: 10resources:requests:cpu: 100mmemory: 200MisecurityContext:allowPrivilegeEscalation: falsecapabilities:drop:- ALLreadOnlyRootFilesystem: truerunAsNonRoot: truerunAsUser: 1000seccompProfile:type: RuntimeDefaultvolumeMounts:- mountPath: /tmpname: tmp-dirnodeSelector:kubernetes.io/os: linuxpriorityClassName: system-cluster-criticalserviceAccountName: metrics-servervolumes:- emptyDir: {}name: tmp-dir
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:labels:k8s-app: metrics-servername: metrics-servernamespace: kube-system
spec:minAvailable: 1selector:matchLabels:k8s-app: metrics-server
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:labels:k8s-app: metrics-servername: v1beta1.metrics.k8s.io
spec:group: metrics.k8s.iogroupPriorityMinimum: 100insecureSkipTLSVerify: trueservice:name: metrics-servernamespace: kube-systemversion: v1beta1versionPriority: 100
[root@aminglinux01 ~]#
kubectl apply -f high-availability-1.21+.yaml
vi hpa-php-apache.yaml
[root@aminglinux01 ~]# cat hpa-php-apache.yaml
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:name: php-apache
spec:scaleTargetRef:apiVersion: apps/v1kind: Deploymentname: php-apacheminReplicas: 1 ##最小Pod数为1maxReplicas: 10 ##最大Pod数为10metrics:- type: Resourceresource: name: cputarget:type: UtilizationaverageUtilization: 50 ##当Pod的CPU使用率超过50%时,需要自动扩容
[root@aminglinux01 ~]#
kubectl apply -f hpa-php-apache.yaml
[root@aminglinux01 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
ds-demo-7kqhx 1/1 Running 2 (12h ago) 2d14h
ds-demo-js2rl 1/1 Running 2 (12h ago) 2d14h
ds-demo-pkpb6 1/1 Running 1 (10h ago) 2d14h
job-demo-fg2pg 0/1 Completed 0 47h
lucky-6cdcf8b9d4-qslbj 1/1 Running 0 6d11h
ng-deploy-6d94878b66-8t2hq 1/1 Running 0 3d15h
ng-deploy-6d94878b66-gh95m 1/1 Running 0 3d15h
ngnix 1/1 Running 0 5d18h
php-apache-69865cdc4c-jbzbg 1/1 Running 0 25m
pod-demo 1/1 Running 0 5d20h
pod-demo1 1/1 Running 0 5d19h
redis-sts-0 0/1 ContainerCreating 1 37h
redis-sts-1 0/1 ContainerCreating 1 2d13h
testpod2 1/1 Running 91 (7m44s ago) 38h
[root@aminglinux01 ~]#
[root@aminglinux01 ~]# kubectl get hpa
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
php-apache Deployment/php-apache 0%/50% 1 10 1 22m
[root@aminglinux01 ~]#
测试:
模拟php-apache Pod CPU使用率增加
再开一个终端,执行
kubectl run -i --tty load-generator --rm --image=registry.cn-hangzhou.aliyuncs.com/daliyused/busybox --restart=Never
-- /bin/sh -c "while sleep 0.01; do wget -q -O- http://php-apache; done"
[root@aminglinux01 ~]# kubectl get deployment,po,hpa |egrep 'NAME|php-apache'
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/php-apache 4/4 4 4 5h53m
NAME READY STATUS RESTARTS AGE
pod/php-apache-69865cdc4c-dc2sw 1/1 Running 0 28s
pod/php-apache-69865cdc4c-hjh8h 1/1 Running 0 13s
pod/php-apache-69865cdc4c-jbzbg 1/1 Running 0 5h53m
pod/php-apache-69865cdc4c-rxnr8 1/1 Running 0 13s
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
horizontalpodautoscaler.autoscaling/php-apache Deployment/php-apache 250%/50% 1 10 2 5h49m
[root@aminglinux01 ~]#
[root@aminglinux01 ~]# kubectl get deployment,po,hpa |egrep 'NAME|php-apache'
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/php-apache 5/5 5 5 5h54m
NAME READY STATUS RESTARTS AGE
pod/php-apache-69865cdc4c-dc2sw 1/1 Running 0 54s
pod/php-apache-69865cdc4c-hjh8h 1/1 Running 0 39s
pod/php-apache-69865cdc4c-jbzbg 1/1 Running 0 5h54m
pod/php-apache-69865cdc4c-mp7sg 1/1 Running 0 24s
pod/php-apache-69865cdc4c-rxnr8 1/1 Running 0 39s
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
horizontalpodautoscaler.autoscaling/php-apache Deployment/php-apache 29%/50% 1 10 5 5h49m
[root@aminglinux01 ~]#
[root@aminglinux01 ~]# kubectl get deployment,po,hpa |egrep 'NAME|php-apache'
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/php-apache 1/1 1 1 7h37m
NAME READY STATUS RESTARTS AGE
pod/php-apache-69865cdc4c-dc2sw 1/1 Running 0 104m
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
horizontalpodautoscaler.autoscaling/php-apache Deployment/php-apache 0%/50% 1 10 1 7h33m
[root@aminglinux01 ~]#
可以看到当cpu使用到250%时自动创建了4个副本,cpu使用率降下来后,又恢复到一个pod
二、API资源对象NetworkPolicy
NetworkPolicy用来控制Pod与Pod之间的网络通信(类似于linux中的iptables),它也支持针对Namespace进行限制。基于白名单模式,符合规则的对象通过,不符合的拒绝。
应用应用场景举例:
- Pod A不能访问Pod B;
- 开发环境所有Pod不能访问测试命名空间;
- 提供对外访问时,限制外部IP;
官方NetworkPolicy YAML示例:
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:name: test-network-policynamespace: default
spec:podSelector: ####针对哪些pod做的限定策略matchLabels:role: dbpolicyTypes:- Ingress ####入站流量规则- Egress ####出站流量规则ingress: ####入站流量规则- from:- ipBlock:cidr: 172.17.0.0/16except: ####排除- 172.17.1.0/24- namespaceSelector: ###针对哪些命令空间matchLabels:project: myproject- podSelector: ###针对哪些pod ##3这三种符合任意一个即可matchLabels:role: frontendports:- protocol: TCPport: 6379egress: ###出站流量规则- to:- ipBlock:cidr: 10.0.0.0/24ports:- protocol: TCPport: 5978
说明:必需字段:apiVersion、 kind 和 metadata 字段。
podSelector:定义目标Pod的匹配标签,即哪些Pod会生效此策略;
policyTypes:表示给定的策略是应用于目标Pod的入站流量(Ingress)
还是出站流量(Egress),或两者兼有。 如果NetworkPolicy未指定
policyTypes则默认情况下始终设置Ingress。
ingress:定义入流量限制规则,from用来定义白名单对象,比如网
段、命名空间、Pod标签,Ports定义目标端口。
egress:定义出流量限制规则,定义可以访问哪些IP和端口
案例一:
需求(未应用NetworkPolicy):yeyunyi命名空间下所有Pod可以互相访问,也可以访问其他命名空间Pod,但其他命名空间不能访问aming命名空间Pod。
创建默认命名空间下创建pod
kubectl run busybox --image=busybox -- sleep 3600
在yeyunyi命名空间下创建busybox pod
kubectl run busybox --image=busybox -n yeyunyi -- sleep 3600
在在yeyunyi命名空间下创建web pod
kubectl run web --image=nginx -n yeyunyi -- sleep 3600
查看3个pod的信息:
[root@aminglinux01 ~]# kubectl get pod -n yeyunyi -owide | grep web
web 1/1 Running 0 116s 10.18.206.244 aminglinux02 <none> <none>
[root@aminglinux01 ~]# kubectl get pod -n yeyunyi -owide | grep busybox
busybox 1/1 Running 0 3m2s 10.18.206.243 aminglinux02 <none> <none>
[root@aminglinux01 ~]# kubectl get pod -owide | grep busybox
busybox 1/1 Running 0 3m32s 10.18.206.242 aminglinux02 <none> <none>
yeyunyi命名空间下的pod busybox ping default命名空间下的busybox
[root@aminglinux01 ~]# kubectl exec busybox -n yeyunyi -- ping 10.18.206.242
PING 10.18.206.242 (10.18.206.242): 56 data bytes
64 bytes from 10.18.206.242: seq=0 ttl=63 time=0.259 ms
64 bytes from 10.18.206.242: seq=1 ttl=63 time=0.140 ms
^C
yeyunyi命名空间下的pod busybox ping yeyunyi命名空间下的web
[root@aminglinux01 ~]# kubectl exec busybox -n yeyunyi -- ping 10.18.206.244
PING 10.18.206.244 (10.18.206.244): 56 data bytes
64 bytes from 10.18.206.244: seq=0 ttl=63 time=0.204 ms
64 bytes from 10.18.206.244: seq=1 ttl=63 time=0.170 ms
^C
default命名空间下的busybox ping yeyunyi命名空间下的pod busybox
[root@aminglinux01 ~]# kubectl exec busybox -- ping 10.18.206.243
PING 10.18.206.243 (10.18.206.243): 56 data bytes
64 bytes from 10.18.206.243: seq=0 ttl=63 time=0.128 ms
^C
创建networkpolicy的YAML
vi deny-all-namespaces.yaml
[root@aminglinux01 ~]# cat deny-all-namespaces.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:name: deny-all-namespacesnamespace: yeyunyi
spec:podSelector: {} # 为空,表示匹配本命名空间所有PodpolicyTypes:- Ingressingress:- from:- podSelector: {} # 为空,表示匹配该命名空间所有Pod,即允许该命名空间所有Pod访问,没有定义namespaceSelector,也就是说不允许其它namespace的Pod访问。
[root@aminglinux01 ~]#
应用YAML并查看信息
kubectl apply -f deny-all-namespaces.yaml
[root@aminglinux01 ~]# kubectl apply -f deny-all-namespaces.yaml
networkpolicy.networking.k8s.io/deny-all-namespaces created
[root@aminglinux01 ~]# kubectl get NetworkPolicy -n yeyunyi
NAME POD-SELECTOR AGE
deny-all-namespaces <none> 30s
[root@aminglinux01 ~]#
yeyunyi命名空间下的pod busybox ping default命名空间下的busybox
[root@aminglinux01 ~]# kubectl exec busybox -n yeyunyi -- ping 10.18.206.242
PING 10.18.206.242 (10.18.206.242): 56 data bytes
64 bytes from 10.18.206.242: seq=0 ttl=63 time=0.125 ms
64 bytes from 10.18.206.242: seq=1 ttl=63 time=0.146 ms
^C
yeyunyi命名空间下的pod busybox ping yeyunyi命名空间下的web
[root@aminglinux01 ~]# kubectl exec busybox -n yeyunyi -- ping 10.18.206.244
PING 10.18.206.244 (10.18.206.244): 56 data bytes
64 bytes from 10.18.206.244: seq=0 ttl=63 time=0.156 ms
^C
[root@aminglinux01 ~]#
default命名空间下的busybox ping yeyunyi命名空间下的pod busybox和web
[root@aminglinux01 ~]# kubectl exec busybox -- ping 10.18.206.243
^C
[root@aminglinux01 ~]# kubectl exec busybox -- ping 10.18.206.244
^C
[root@aminglinux01 ~]#
案例二:
通过PodSelector限制
vi pod-selector.yaml ###label为test的pod允许label为dev访问
[root@aminglinux01 ~]# cat pod-selector.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:name: app-to-appnamespace: yeyunyi
spec:podSelector:matchLabels:app: testpolicyTypes:- Ingressingress:- from:- podSelector:matchLabels:app: devports:- protocol: TCPport: 80
[root@aminglinux01 ~]#
应用YAML:kubectl apply -f pod-selector.yaml
[root@aminglinux01 ~]# kubectl apply -f pod-selector.yaml
networkpolicy.networking.k8s.io/app-to-app created
[root@aminglinux01 ~]#
[root@aminglinux01 ~]# kubectl get NetworkPolicy -n yeyunyi
NAME POD-SELECTOR AGE
app-to-app app=test 10s
[root@aminglinux01 ~]#
创建测试pod
kubectl run web01 --image=nginx:latest -n yeyunyi -l 'app=test' #创建Pod时,指定label
kubectl run app01 --image=nginx:latest -n yeyunyi -l 'app=dev' #创建Pod时,指定label
kubectl run app02 --image=nginx:latest -n yeyunyi
# 如果label创建错了,也可以修改,在本实验中不需要做如下操作
# kubectl label pod busybox app=test123 --overwrite
[root@aminglinux01 ~]# kubectl run web01 --image=nginx:latest -n yeyunyi -l 'app=test'
pod/web01 created
[root@aminglinux01 ~]# kubectl run app01 --image=nginx:latest -n yeyunyi -l 'app=dev'
pod/app01 created
[root@aminglinux01 ~]# kubectl run app02 --image=nginx:latest -n yeyunyi
pod/app02 created
[root@aminglinux01 ~]# kubectl run app03 --image=nginx:latest -n yeyunyi -l 'app=dev1'
pod/app03 created
[root@aminglinux01 ~]# kubectl get pod web01 -n yeyunyi --show-labels
NAME READY STATUS RESTARTS AGE LABELS
web01 1/1 Running 0 103s app=test
[root@aminglinux01 ~]# kubectl get pod app01 -n yeyunyi --show-labels
NAME READY STATUS RESTARTS AGE LABELS
app01 1/1 Running 0 77s app=dev
[root@aminglinux01 ~]# kubectl get pod app03 -n yeyunyi --show-labels
NAME READY STATUS RESTARTS AGE LABELS
app03 1/1 Running 0 2m49s app=dev1
[root@aminglinux01 ~]#
[root@aminglinux01 ~]# kubectl describe po web01 -n yeyunyi |grep -i ipcni.projectcalico.org/podIP: 10.18.206.245/32cni.projectcalico.org/podIPs: 10.18.206.245/32
IP: 10.18.206.245
IPs:IP: 10.18.206.245Type: Projected (a volume that contains injected data from multiple sources)
[root@aminglinux01 ~]#
查看web01的IP
kubectl describe po web01 -n yeyunyi |grep -i ip
[root@aminglinux01 ~]# kubectl describe po web01 -n yeyunyi |grep -i ipcni.projectcalico.org/podIP: 10.18.206.245/32cni.projectcalico.org/podIPs: 10.18.206.245/32
IP: 10.18.206.245
IPs:IP: 10.18.206.245Type: Projected (a volume that contains injected data from multiple sources)
[root@aminglinux01 ~]#
测试
kubectl exec -n yeyunyi app01 -- curl 10.18.206.245
kubectl exec -n yeyunyi app02 -- curl 10.18.206.245kubectl exec -n yeyunyi app03 -- curl 10.18.206.245
根据解决结果来看,标签为dev的pod可以访问标签为test的pod,而没有标签的或者标签不匹配的pod无法访问。
[root@aminglinux01 ~]# kubectl exec -n yeyunyi app01 -- curl 10.18.206.245% Total % Received % Xferd Average Speed Time Time Time CurrentDload Upload Total Spent Left Speed0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p><p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p><p><em>Thank you for using nginx.</em></p>
</body>
</html>
100 615 100 615 0 0 317k 0 --:--:-- --:--:-- --:--:-- 600k
[root@aminglinux01 ~]# kubectl exec -n yeyunyi app02 -- curl 10.18.206.245% Total % Received % Xferd Average Speed Time Time Time CurrentDload Upload Total Spent Left Speed0 0 0 0 0 0 0 0 --:--:-- 0:00:42 --:--:-- 0^C
[root@aminglinux01 ~]#
[root@aminglinux01 ~]# kubectl exec -n yeyunyi app03 -- curl 10.18.206.245% Total % Received % Xferd Average Speed Time Time Time CurrentDload Upload Total Spent Left Speed0 0 0 0 0 0 0 0 --:--:-- 0:00:13 --:--:-- 0^C
[root@aminglinux01 ~]#
案例三:
限制namespace
vi allow-ns.yaml
[root@aminglinux01 ~]# cat allow-ns.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:name: allow-nsnamespace: yeyunyi
spec:podSelector: {} ####该命名空间下的所有podpolicyTypes:- Ingressingress:- from:- namespaceSelector: ####指定命名空间matchLabels:name: testports:- protocol: TCPport: 80
[root@aminglinux01 ~]#
本YAML:test命名空间下的pod可以访问yeyunyi命名空间下的pod(默认拒绝其他所有访问)
应用YAML
kubectl apply -f allow-ns.yaml
[root@aminglinux01 ~]# kubectl apply -f allow-ns.yaml
networkpolicy.networking.k8s.io/allow-ns created
创建测试ns
kubectl create ns test
[root@aminglinux01 ~]# kubectl create ns test
namespace/test created
创建测试pod
kubectl run web01 --image=nginx:latest -n yeyunyi
kubectl run web02 --image=nginx:latest -n test
kubectl run web03 --image=nginx:latest
kubectl run web04 --image=nginx:latest -n yeyunyi
[root@aminglinux01 ~]# kubectl run web01 --image=nginx:latest -n yeyunyi
pod/web01 created
[root@aminglinux01 ~]# kubectl run web02 --image=nginx:latest -n test
pod/web02 created
[root@aminglinux01 ~]# kubectl run web03 --image=nginx:latest
pod/web03 created
[root@aminglinux01 ~]# kubectl run web04 --image=nginx:latest -n yeyunyi
pod/web04 created
查看web01和web04的IP
[root@aminglinux01 ~]# kubectl describe po web01 -n yeyunyi |grep -i ipcni.projectcalico.org/podIP: 10.18.206.197/32cni.projectcalico.org/podIPs: 10.18.206.197/32
IP: 10.18.206.197
IPs:IP: 10.18.206.197Type: Projected (a volume that contains injected data from multiple sources)
[root@aminglinux01 ~]# kubectl describe po web04 -n yeyunyi |grep -i ipcni.projectcalico.org/podIP: 10.18.68.183/32cni.projectcalico.org/podIPs: 10.18.68.183/32
IP: 10.18.68.183
IPs:IP: 10.18.68.183
查看ns label
kubectl get ns --show-labels
[root@aminglinux01 ~]# kubectl get ns --show-labels
NAME STATUS AGE LABELS
default Active 7d kubernetes.io/metadata.name=default
kube-node-lease Active 7d kubernetes.io/metadata.name=kube-node-lease
kube-public Active 7d kubernetes.io/metadata.name=kube-public
kube-system Active 7d kubernetes.io/metadata.name=kube-system
test Active 109s kubernetes.io/metadata.name=test
yeyunyi Active 6d1h kubernetes.io/metadata.name=yeyunyi
给ns设置标签
kubectl label namespace test name=test
[root@aminglinux01 ~]# kubectl label namespace test name=test
namespace/test labeled
[root@aminglinux01 ~]# kubectl get ns --show-labels
NAME STATUS AGE LABELS
default Active 7d kubernetes.io/metadata.name=default
kube-node-lease Active 7d kubernetes.io/metadata.name=kube-node-lease
kube-public Active 7d kubernetes.io/metadata.name=kube-public
kube-system Active 7d kubernetes.io/metadata.name=kube-system
test Active 2m24s kubernetes.io/metadata.name=test,name=test
yeyunyi Active 6d1h kubernetes.io/metadata.name=yeyunyi
测试:
kubectl -n test exec web02 -- curl 10.18.206.197 #可以访问
kubectl exec web03 -- curl 10.18.206.197 #不可以访问
kubectl -n yeyunyi exec web04 -- curl 10.18.206.197 #不可以访问,即使同一个命名空间也无法访问kubectl -n test exec web02 -- curl 10.18.68.183 #可以访问
test命名空间下的pod web02 访问 yeyunyi命名空间下的pod web01
[root@aminglinux01 ~]# kubectl -n test exec web02 -- curl 10.18.206.197% Total % Received % Xferd Average Speed Time Time Time CurrentDload Upload Total Spent Left Speed
100 615 100 615 0 0 511k 0 --:--:-- --:--:-- --:--:-- 600k
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p><p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p><p><em>Thank you for using nginx.</em></p>
</body>
</html>
default命名空间下的pod web03 访问 yeyunyi命名空间下的pod web01,失败,default不符合NetworkPolicy中的策略源命名空间test
[root@aminglinux01 ~]# kubectl exec web03 -- curl 10.18.206.197% Total % Received % Xferd Average Speed Time Time Time CurrentDload Upload Total Spent Left Speed0 0 0 0 0 0 0 0 --:--:-- 0:00:06 --:--:-- 0^C
yeyunyi命名空间下的pod web04 访问 yeyunyi命名空间下的pod web01,失败,yeyunyi不符合NetworkPolicy中的策略源命名空间test
[root@aminglinux01 ~]# kubectl -n yeyunyi exec web04 -- curl 10.18.206.197% Total % Received % Xferd Average Speed Time Time Time CurrentDload Upload Total Spent Left Speed0 0 0 0 0 0 0 0 --:--:-- 0:00:06 --:--:-- 0^C
test命名空间下的pod web02 访问 yeyunyi命名空间下的pod web04
[root@aminglinux01 ~]# kubectl -n test exec web02 -- curl 10.18.68.183% Total % Received % Xferd Average Speed Time Time Time CurrentDload Upload Total Spent Left Speed0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p><p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p><p><em>Thank you for using nginx.</em></p>
</body>
</html>
100 615 100 615 0 0 133k 0 --:--:-- --:--:-- --:--:-- 150k
[root@aminglinux01 ~]#
三、Kubernetes用户安全控制
1)安全控制三阶段
三阶段: 认证(Authentication)、授权(Authorization)、准入控制(Admission Control)
① 所谓认证,就是先验证用户的身份是否合法,比如看看其证书是否
合法有效,看看其Token是否正确;
② 所谓授权,就是看看这个用户是否有权限来访问或者操作K8s的资
源;
③ 所谓准入控制,就是检查对应客户端的请求是否符合对应请求或操
作API规范,检查传递参数是否是正确的。比如创建Pod,它会检查提
交的信息是否符合创建Pod的规范,如果不符合规范就拒绝。另外,准
入控制还会帮助我们把我们没有明确指定的字段信息,通过默认值的方
式把对应的字段填充到客户端请求中,然后把填充好的信息一并由
APIserver把客户端请求更新到对应资源在etcd中的对应信息上。
2)K8s认证(Authentication)
两种认证方式:
① Kubeconfig 这种是基于https ca证书认证,咱们命令行管理K8s用的
就是这种认证
② Token 这种通过一个Token来识别用户,比如前面我们讲dashboard
时,有创建一个serviceaccount,然后再获取其token
3)K8s授权(Authorization)
授权模式:
AlwaysDeny:表示拒绝所有的请求,一般用于测试
AlwaysAllow:允许接收所有请求,如果集群不需要授权流程,则
可以采用该策略
ABAC(Attribute-Based Access Control):基于属性的访问控
制,表示使用用户配置的授权规则对用户请求进行匹配和控制
(老版本采用的方式,也就是定义属性的访问类型,如果用户拥
有这个属性就能访问对应的资源。需要定义一长串的属性,并且
ABAC修改完之后并不能生效,现在淘汰了)
Webbook:通过调用外部 REST 服务对用户进行授权(在集群外
部对集群鉴权)
RBAC(Role-Based Access Control):基于角色的访问控制,现
行的默认规则(拥有角色就代表拥有访问资源某些权限)
查看你的K8s授权模式:
cat /etc/kubernetes/manifests/kube-apiserver.yaml|grep authorization-mode
[root@aminglinux01 ~]# cat /etc/kubernetes/manifests/kube-apiserver.yaml|grep authorization-mode- --authorization-mode=Node,RBAC
[root@aminglinux01 ~]#
RBAC 授权模式
RBAC(Role-Based Access Control)基于角色的访问控制,在Kubernetes 1.5 中引入,现行版本成为默认标准。相对其它访问控制方式,拥有以下优势:
- 对集群中的资源(pod deploy cpu…)和非资源(元信息如pod状态)均拥有完整的覆盖
- 整个 RBAC 完全由几个 API 对象完成,同其它 API 对象一样,可以用 kubectl 或 API 进行操作
- 可以在运行时进行调整,无需重启API Server即可生效
RBAC 的 API 资源对象说明
RBAC资源对象:Subject(包括:User, Group, ServiceAccount)
Role(角色)、ClusterRole(集群角色)、RoleBinding(角色绑
定)、ClusterRoleBinding(集群角色绑定)
主体(subject)
- User:用户
- Group:用户组
- ServiceAccount:服务账号
角色
- Role:授权特定命名空间的访问权限
- ClusterRole:授权所有命名空间的访问权限
角色绑定
- RoleBinding:将角色绑定到主体(即subject)
- ClusterRoleBinding:将集群角色绑定到主体
4)K8s准入控制(Adminssion Control)
Adminssion Control实际上是一个准入控制器插件列表,发送到APIServer的请求都需要经过这个列表中的每个准入控制器插件的检查,检查不通过,则拒绝请求。
查看可以启用的准入控制器列表:
kubectl exec kube-apiserver-aminglinux01 -n kubesystem -- kube-apiserver -h | grep ' --enableadmission-plugins'
[root@aminglinux01 ~]# ps aux|grep apiserver|grep admission
root 1913 2.7 11.3 1193336 420840 ? Ssl Jul11 34:04 kube-apiserver --advertise-address=192.168.100.151 --allow-privileged=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/pki/ca.crt --enable-admission-plugins=NodeRestriction --enable-bootstrap-token-auth=true --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key --etcd-servers=https://127.0.0.1:2379 --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key --requestheader-allowed-names=front-proxy-client --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/etc/kubernetes/pki/sa.pub --service-account-signing-key-file=/etc/kubernetes/pki/sa.key --service-cluster-ip-range=10.15.0.0/16 --tls-cert-file=/etc/kubernetes/pki/apiserver.crt --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
[root@aminglinux01 ~]#
更改准入控制器:
四、Kubernetes创建普通用户示例
需求1:创建一个Role和ServiceAccount并把他们绑定起来。
ServiceAccount有get、list、watch的权限
创建YAML文件
[root@aminglinux01 ~]# cat testsa.yaml
apiVersion: v1
kind: ServiceAccount
metadata:name: testsa
--- ###yaml使用“---”分割多个文档
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:name: testsa-role
rules:
- apiGroups: # api组,例如apps组,空值表示是核心API组,像namespace、pod、service、pv、pvc都在里面- ""resources: #资源名称(复数),例如pods, deployments,services- podsverbs: # 允许的操作,这里允许get, list, watch- get- list- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding ####role和sa绑定一起的操作
metadata:name: testsa-rolebinding
roleRef: #### # “roleRef”指定Role/ClusterRole的绑定apiGroup: rbac.authorization.k8s.io ######API进行扩展,Kubernetes 使用API Groups (API组)进行标识kind: Rolename: testsa-role
subjects:
- kind: ServiceAccountname: testsa
[root@aminglinux01 ~]#
应用此YAML
kubectl apply -f testsa.yaml
生成token
kubectl create token testsa
需求2:
给user1用户授权yeyunyi命名空间Pod读取权限
① 生成ca证书
cd /etc/kubernetes/pki/
openssl genrsa -out user1.key 2048 ###生成私钥
openssl req -new -key user1.key -out user1.csr -subj "/CN=user1" ####导出CSR文件
openssl x509 -req -in user1.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out user1.crt -days 3650 ####基于搭建时生成的ca证书,生成公钥
[root@aminglinux01 pki]# openssl genrsa -out user1.key 2048
Generating RSA private key, 2048 bit long modulus (2 primes)
..............................................................+++++
.+++++
e is 65537 (0x010001)
[root@aminglinux01 pki]#
[root@aminglinux01 pki]# openssl req -new -key user1.key -out user1.csr -subj "/CN=user1"
[root@aminglinux01 pki]# openssl x509 -req -in user1.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out user1.crt -days 3650
Signature ok
subject=CN = user1
Getting CA Private Key
[root@aminglinux01 pki]#
[root@aminglinux01 pki]# ls
apiserver.crt apiserver-kubelet-client.key front-proxy-ca.crt sa.pub
apiserver-etcd-client.crt ca.crt front-proxy-ca.key user1.crt
apiserver-etcd-client.key ca.key front-proxy-client.crt user1.csr
apiserver.key ca.srl front-proxy-client.key user1.key
apiserver-kubelet-client.crt etcd sa.key
[root@aminglinux01 pki]#
② 生成kubeconfig授权文件
# 设置集群
kubectl config set-cluster myk8s \
--certificate-authority=/etc/kubernetes/pki/ca.crt \
--embed-certs=true \
--server=https://192.168.100.151:6443 \
--kubeconfig=/root/user1.kubecfg[root@aminglinux01 pki]# kubectl config set-cluster myk8s \ > --certificate-authority=/etc/kubernetes/pki/ca.crt \ > --embed-certs=true \ > --server=https://192.168.100.151:6443 \ > --kubeconfig=/root/user1.kubecfg Cluster "myk8s" set.
# 查看user1配置,users和context都为空
kubectl config view --kubeconfig=/root/user1.kubecfg[root@aminglinux01 pki]# kubectl config view --kubeconfig=/root/user1.kubecfg apiVersion: v1 clusters: - cluster:certificate-authority-data: DATA+OMITTEDserver: https://192.168.100.151:6443name: myk8s contexts: null ###空 current-context: "" kind: Config preferences: {} users: null ###空 [root@aminglinux01 pki]#
# 设置客户端认证
kubectl config set-credentials user1 \
--client-key=user1.key \ ###上一步已生成
--client-certificate=user1.crt \ ###上一步已生成
--embed-certs=true \
--kubeconfig=/root/user1.kubecfg[root@aminglinux01 pki]# kubectl config set-credentials user1 \ > --client-key=user1.key \ > --client-certificate=user1.crt \ > --embed-certs=true \ > --kubeconfig=/root/user1.kubecfg User "user1" set.
# 查看user1配置,users有内容了
kubectl config view --kubeconfig=/root/user1.kubecfg[root@aminglinux01 pki]# kubectl config view --kubeconfig=/root/user1.kubecfg apiVersion: v1 clusters: - cluster:certificate-authority-data: DATA+OMITTEDserver: https://192.168.100.151:6443name: myk8s contexts: null current-context: "" kind: Config preferences: {} users: - name: user1user:client-certificate-data: DATA+OMITTEDclient-key-data: DATA+OMITTED [root@aminglinux01 pki]#
# 设置context
kubectl config set-context user1@myk8s \
--cluster=myk8s \
--user=user1 \
--kubeconfig=/root/user1.kubecfg[root@aminglinux01 pki]# kubectl config set-context user1@myk8s \ > --cluster=myk8s \ > --user=user1 \ > --kubeconfig=/root/user1.kubecfg Context "user1@myk8s" created. [root@aminglinux01 pki]#
# 查看user1配置,context已经有内容了
kubectl config view --kubeconfig=/root/user1.kubecfg[root@aminglinux01 pki]# kubectl config view --kubeconfig=/root/user1.kubecfg apiVersion: v1 clusters: - cluster:certificate-authority-data: DATA+OMITTEDserver: https://192.168.100.151:6443name: myk8s contexts: - context:cluster: myk8suser: user1name: user1@myk8s current-context: "" kind: Config preferences: {} users: - name: user1user:client-certificate-data: DATA+OMITTEDclient-key-data: DATA+OMITTED [root@aminglinux01 pki]#
# 切换context
kubectl config use-context user1@myk8s --kubeconfig=/root/user1.kubecfg[root@aminglinux01 pki]# kubectl config use-context user1@myk8s --kubeconfig=/root/user1.kubecfg Switched to context "user1@myk8s". [root@aminglinux01 pki]#
③ 创建角色
cat > user1-role.yaml <<EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:namespace: yeyunyiname: user1-role
rules:
- apiGroups:- ""resources:- podsverbs: # 允许的操作- get- list- watch
EOFkubectl apply -f user1-role.yaml
④ 将用户与角色绑定
cat > user1-rolebinding.yaml <<EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:name: user1-rolebindingnamespace: yeyunyi
roleRef:apiGroup: rbac.authorization.k8s.iokind: Rolename: user1-role
subjects:
- kind: Username: user1apiGroup: rbac.authorization.k8s.io
EOFkubectl apply -f user1-rolebinding.yaml
⑤ 创建系统用户并使用(k8s用户)user1的配置
useradd yeyunyi
mkdir /home/yeyunyi/.kube
cp /root/user1.kubecfg /home/yeyunyi/.kube/config
chown -R yeyunyi:yeyunyi /home/yeyunyi/.kube/[root@aminglinux01 ~]# mkdir /home/yeyunyi/.kube [root@aminglinux01 ~]# cp /root/user1.kubecfg /home/yeyunyi/.kube/config [root@aminglinux01 ~]# chown -R yeyunyi:yeyunyi /home/yeyunyi/.kube/
⑥ 切换到普通用下并访问k8s
su - yeyunyi
$ kubectl get po #无法查看default命令空间下的pod
$ kubectl get po -n yeyunyi ##授权的yeyunyi命名空间下的pod
$ kubectl get deploy -n yeyunyi ##只授权了pod,无法查看deployment资源[yeyunyi@aminglinux01 ~]$ kubectl get po Error from server (Forbidden): pods is forbidden: User "user1" cannot list resource "pods" in API group "" in the namespace "default" [yeyunyi@aminglinux01 ~]$ kubectl get po -n yeyunyi NAME READY STATUS RESTARTS AGE lucky-6cdcf8b9d4-4ws4c 1/1 Running 2 (4d ago) 6d22h lucky-6cdcf8b9d4-6vc5g 1/1 Running 2 (4d ago) 6d22h lucky-6cdcf8b9d4-8fql2 1/1 Running 2 (4d ago) 6d22h lucky-6cdcf8b9d4-g8p26 1/1 Running 2 (4d ago) 6d22h lucky-6cdcf8b9d4-mxm97 1/1 Running 2 (4d ago) 6d22h lucky1-5cf7f459cf-6p9xz 1/1 Running 2 (4d ago) 6d22h lucky1-5cf7f459cf-7t2gj 1/1 Running 2 (4d ago) 6d22h lucky1-5cf7f459cf-bcsl5 1/1 Running 2 (4d ago) 6d22h lucky1-5cf7f459cf-dv5fz 1/1 Running 2 (4d ago) 6d22h lucky1-5cf7f459cf-jmvnr 1/1 Running 2 (4d ago) 6d22h quota-pod 1/1 Running 2 (4d ago) 6d22h [yeyunyi@aminglinux01 ~]$ kubectl get deploy -n yeyunyi Error from server (Forbidden): deployments.apps is forbidden: User "user1" cannot list resource "deployments" in API group "apps" in the namespace "yeyunyi" [yeyunyi@aminglinux01 ~]$