#生成控制器文件并建立控制器
[root@k8s-master ~]# kubectl create deployment bwmis --image timinglee/myapp:v1 --replicas 2 --dry-run=client -o yaml > bwmis.yaml
[root@k8s-master ~]# kubectl expose deployment bwmis --port 80 --target-port 80 --dry-run=client -o yaml >>bwmis.yml
[root@k8s-master ~]# vim bwmis.yaml
[root@k8s-master ~]# kubectl apply -f bwmis.yaml
deployment.apps/bwmis created
service/bwmis created
[root@k8s-master ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
bwmis ClusterIP 10.98.160.169 <none> 80/TCP 7s
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3d
[root@k8s-master ~]# kubectl get services -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
bwmis ClusterIP 10.98.160.169 <none> 80/TCP 21s app=bwmis
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3d <none>
[root@k8s-master ~]# iptables -t nat -nL
ipvs模式
在所有节点中安装ipvsadm
[root@k8s-master ~]# kubectl -n kube-system edit cm kube-proxy
configmap/kube-proxy edited
metricsBindAddress: ""
mode: "ipvs" #设置kube-proxy使用ipvs模式
nftables:## 重启pod,在pod运行时配置文件中采用默认配置,当改变配置文件后已经运行的pod状态不会变化,所以要重启pod
[root@k8s-master ~]# kubectl -n kube-system get pods | awk '/kube-proxy/{system("kubectl -n kube-system delete pods "$1)}'
pod "kube-proxy-6l5k9" deleted
pod "kube-proxy-q4jqx" deleted
pod "kube-proxy-rj9f2" deleted
[root@k8s-master ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.96.0.1:443 rr
-> 172.25.250.129:6443 Masq 1 0 0
TCP 10.96.0.10:53 rr
-> 10.244.0.32:53 Masq 1 0 0
-> 10.244.0.33:53 Masq 1 0 0
TCP 10.96.0.10:9153 rr
-> 10.244.0.32:9153 Masq 1 0 0
-> 10.244.0.33:9153 Masq 1 0 0
TCP 10.98.160.169:80 rr
-> 10.244.1.138:80 Masq 1 0 0
-> 10.244.2.14:80 Masq 1 0 0
UDP 10.96.0.10:53 rr
-> 10.244.0.32:53 Masq 1 0 0
-> 10.244.0.33:53 Masq 1 0 0#切换ipvs模式后,kube-proxy会在宿主机上添加一个虚拟网卡:kube-ipvs0,并分配所有service IP
[root@k8s-master ~]# ip a | tail
inet6 fe80::580f:daff:fe2c:4fe2/64 scope link
valid_lft forever preferred_lft forever
8: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default
link/ether 5a:8f:cd:08:31:04 brd ff:ff:ff:ff:ff:ff
inet 10.98.160.169/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.96.0.1/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.96.0.10/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
clusterip
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: bwmis
name: bwmis
spec:
replicas: 2
selector:
matchLabels:
app: bwmis
template:
metadata:
creationTimestamp: null
labels:
app: bwmis
spec:
containers:
- image: timinglee/myapp:v1
name: myapp
---
apiVersion: v1
kind: Service
metadata:
labels:
app: bwmis
name: bwmis
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: bwmis
type: ClusterIP
[root@k8s-master ~]# dig bwmis.default.svc.cluster.local @10.96.0.10
; <<>> DiG 9.16.23-RH <<>> bwmis.default.svc.cluster.local @10.96.0.10
;; global options: +cmd
;; Got answer:
;; WARNING: .local is reserved for Multicast DNS
;; You are currently testing what happens when an mDNS query is leaked to DNS
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 6458
;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
;; WARNING: recursion requested but not available;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
; COOKIE: 48542316b601281b (echoed)
;; QUESTION SECTION:
;bwmis.default.svc.cluster.local. IN A;; ANSWER SECTION:
bwmis.default.svc.cluster.local. 30 IN A 10.98.160.169;; Query time: 6 msec
;; SERVER: 10.96.0.10#53(10.96.0.10)
;; WHEN: Fri Sep 06 13:28:00 CST 2024
;; MSG SIZE rcvd: 119
ClusterIP中的特殊模式headless
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: bwmis
name: bwmis
spec:
replicas: 2
selector:
matchLabels:
app: bwmis
template:
metadata:
creationTimestamp: null
labels:
app: bwmis
spec:
containers:
- image: timinglee/myapp:v1
name: myapp
---
apiVersion: v1
kind: Service
metadata:
labels:
app: bwmis
name: bwmis
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: bwmis
type: ClusterIP
clusterIP: None[root@k8s-master ~]# kubectl delete -f bwmis.yaml
deployment.apps "bwmis" deleted
service "bwmis" deleted[root@k8s-master ~]# kubectl apply -f bwmis.yaml
deployment.apps/bwmis created
service/bwmis created
[root@k8s-master ~]# kubectl get services bwmis
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
bwmis ClusterIP None <none> 80/TCP 19s
[root@k8s-master ~]# dig bwmis.default.svc.cluster.local @10.96.0.10; <<>> DiG 9.16.23-RH <<>> bwmis.default.svc.cluster.local @10.96.0.10
;; global options: +cmd
;; Got answer:
;; WARNING: .local is reserved for Multicast DNS
;; You are currently testing what happens when an mDNS query is leaked to DNS
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 49305
;; flags: qr aa rd; QUERY: 1, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 1
;; WARNING: recursion requested but not available;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
; COOKIE: a03f6cae423f2066 (echoed)
;; QUESTION SECTION:
;bwmis.default.svc.cluster.local. IN A;; ANSWER SECTION:
bwmis.default.svc.cluster.local. 30 IN A 10.244.2.16 #直接解析到pod上
bwmis.default.svc.cluster.local. 30 IN A 10.244.1.140;; Query time: 6 msec
;; SERVER: 10.96.0.10#53(10.96.0.10)
;; WHEN: Fri Sep 06 13:31:26 CST 2024
;; MSG SIZE rcvd: 166
#开启一个busyboxplus的pod测试[root@k8s-master ~]# kubectl run test --image busyboxplus:latest -it
If you don't see a command prompt, try pressing enter.
/ # nslookup bwmis
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.localName: bwmis
Address 1: 10.244.2.16 10-244-2-16.bwmis.default.svc.cluster.local
Address 2: 10.244.1.140 10-244-1-140.bwmis.default.svc.cluster.local
/ # curl bwmis
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
/ # curl bwmis/hostname.html
bwmis-c49d689bd-6x6qv
nodeport
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: bwmis
name: bwmis
spec:
replicas: 2
selector:
matchLabels:
app: bwmis
template:
metadata:
creationTimestamp: null
labels:
app: bwmis
spec:
containers:
- image: timinglee/myapp:v1
name: myapp
---
apiVersion: v1
kind: Service
metadata:
labels:
app: bwmis
name: bwmis
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: bwmis
type: NodePort
[root@k8s-master ~]# kubectl apply -f bwmis.yaml
deployment.apps/bwmis created
service/bwmis created
[root@k8s-master ~]# kubectl get services bwmis
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
bwmis NodePort 10.101.128.31 <none> 80:32676/TCP 18snodeport在集群节点上绑定端口,一个端口对应一个服务
[root@k8s-master ~]# for i in {1..5}
> do curl 172.25.250.129:32676/hostname.html
> done
bwmis-c49d689bd-v27bl
bwmis-c49d689bd-kkjpd
bwmis-c49d689bd-v27bl
bwmis-c49d689bd-kkjpd
bwmis-c49d689bd-v27blnodeport默认端口是30000-32767,超出会报错
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: bwmis
name: bwmis
spec:
replicas: 2
selector:
matchLabels:
app: bwmis
template:
metadata:
creationTimestamp: null
labels:
app: bwmis
spec:
containers:
- image: timinglee/myapp:v1
name: myapp
---
apiVersion: v1
kind: Service
metadata:
labels:
app: bwmis
name: bwmis
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
nodePort: 33333
selector:
app: bwmis
type: NodePort[root@k8s-master ~]# kubectl apply -f bwmis.yaml
deployment.apps/bwmis created
The Service "bwmis" is invalid: spec.ports[0].nodePort: Invalid value: 33333: provided port is not in the valid range. The range of valid ports is 30000-32767[root@k8s-master ~]# vim /etc/kubernetes/manifests/kube-apiserver.yaml
- --service-node-port-range=30000-40000 #添加“--service-node-port-range=“ 参数,端口范围可以自定义
修改后api-server会自动重启,等apiserver正常启动后才能操作集群
[root@k8s-master ~]# kubectl apply -f bwmis.yaml
deployment.apps/bwmis unchanged
service/bwmis created
[root@k8s-master ~]# kubectl get service bwmis
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
bwmis NodePort 10.108.138.124 <none> 80:33333/TCP 28s
loadbalancer
[root@k8s-master ~]# vim timinglee.yaml
---
apiVersion: v1
kind: Service
metadata:
labels:
app: timinglee-service
name: timinglee-service
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: timinglee
type: LoadBalancer[root@k8s2 service]# kubectl apply -f myapp.yml
默认无法分配外部访问IP
metalLB
1.设置ipvs模式
[root@k8s-master ~]# kubectl edit cm -n kube-system kube-proxy
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
ipvs:
strictARP: true[root@k8s-master ~]# kubectl -n kube-system get pods | awk '/kube-proxy/{system("kubectl -n kube-system delete pods "$1)}'
2.下载部署文件
[root@k8s2 metallb]# wget https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml3.修改文件中镜像地址,与harbor仓库路径保持一致
[root@k8s-master ~]# vim metallb-native.yaml
...
image: metallb/controller:v0.14.8
image: metallb/speaker:v0.14.8[root@k8s-master metalLB]# docker tag quay.io/metallb/controller:v0.14.8 bwmis.org/metallb/controller:v0.14.8
[root@k8s-master metalLB]# docker push bwmis.org/metallb/controller:v0.14.8[root@k8s-master metalLB]# kubectl -n metallb-system get pods
NAME READY STATUS RESTARTS AGE
controller-65957f77c8-2k9w4 1/1 Running 0 27s
speaker-6nwbc 1/1 Running 0 27s
speaker-qngsn 1/1 Running 0 27s
speaker-vxkj4 1/1 Running 0 27s
[root@k8s-master ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3d4h
timinglee-service LoadBalancer 10.106.114.238 172.25.250.15 80:36612/TCP 4m16s
[root@k8s-master ~]# curl 172.25.250.15
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>l
externalname
apiVersion: v1
kind: Service
metadata:
labels:
app: timinglee-service
name: timinglee-service
spec:
selector:
app: timinglee
type: ExternalName
externalName: www.baidu.com[root@k8s-master ~]# kubectl apply -f bwmis.yaml
deployment.apps/timinglee unchanged
service/timinglee-service configured
[root@k8s-master ~]# kubectl get svc timinglee-service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
timinglee-service ExternalName <none> www.baidu.com <none> 30m
22m
ingress-nginx功能
[root@k8s-master ~]# wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.11.2/deploy/static/provider/baremetal/deploy.yaml
[root@k8s-master ~]# vim deploy.yaml
445 image: ingress-nginx/controller:v1.11.2
546 image: ingress-nginx/kube-webhook-certgen:v1.4.3
599 image: ingress-nginx/kube-webhook-certgen:v1.4.3[root@k8s-master ingress-1.11.2]# kubectl -n ingress-nginx get pods
NAME READY STATUS RESTARTS AGE
ingress-nginx-admission-create-fxhhp 0/1 Completed 0 29h
ingress-nginx-admission-patch-29bbc 0/1 Completed 1 29h
ingress-nginx-controller-bb7d8f97c-9htr2 1/1 Running 3 (150m ago) 29h[root@k8s-master ~]# kubectl -n ingress-nginx edit svc ingress-nginx-controller
49 type: LoadBalancer
[root@k8s-master ingress-1.11.2]# vim bwmis.ingress.yml
aapiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: test-ingress
spec:
ingressClassName: nginx
rules:
- http:
paths:
- backend:
service:
name: timinglee-svc
port:
number: 80
path: /
pathType: Prefix[root@k8s-master ingress-1.11.2]# kubectl apply -f bwmis.ingress.yml
ingress.networking.k8s.io/webcluster unchanged
[root@k8s-master ingress-1.11.2]# kubectl get ingress
NAME CLASS HOSTS ADDRESS PORTS AGE
webcluster nginx * 172.25.250.132 80 29h
[root@k8s-master ingress-1.11.2]# for i in {1..5}; do curl 172.25.250.16/hostname.html; done
myapp1-7685cb55cf-b9rdg
myapp1-7685cb55cf-npx8t
myapp1-7685cb55cf-b9rdg
myapp1-7685cb55cf-npx8t
myapp1-7685cb55cf-b9rdg
ingress 的高级用法
基于路径的访问
[root@k8s-master ~]# vim bwmis.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: myapp1
name: myapp1
spec:
replicas: 2
selector:
matchLabels:
app: myapp1
template:
metadata:
labels:
app: myapp1
spec:
containers:
- image: timinglee/myapp:v1
name: myapp1
---[root@k8s-master ~]# vim bwmis2.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: my-app1
name: my-app1
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: myapp1
type: LoadBalancerapiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: myapp2
name: myapp2
spec:
replicas: 2
selector:
matchLabels:
app: myapp2
template:
metadata:
labels:
app: myapp2
spec:
containers:
- image: timinglee/myapp:v2
name: myapp2
---
apiVersion: v1
kind: Service
metadata:
labels:
app: my-app2
name: my-app2
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: myapp2
type: LoadBalancer[root@k8s-master ingress-1.11.2]# vim ingress1.yml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
name: ingress1
spec:
ingressClassName: nginx
rules:
- host: haha.org
http:
paths:
- path: /v1
pathType: Prefix
backend:
service:
name: my-app1
port:
number: 80- path: /v2
pathType: Prefix
backend:
service:
name: my-app2
port:
number: 80[root@k8s-master ingress-1.11.2]# curl haha.org/v1
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
[root@k8s-master ingress-1.11.2]# curl haha.org/v2
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>
#nginx.ingress.kubernetes.io/rewrite-target: / 的功能实现[root@k8s-master ingress-1.11.2]# curl haha.org/v2/aaa
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>
[root@k8s-master ingress-1.11.2]# curl haha.org/v2/bbb
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>
[root@k8s-master ingress-1.11.2]# curl haha.org/v1/uuuaadsa
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
[root@k8s-master ingress-1.11.2]# vim ingress2.yml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
name: ingress2
spec:
ingressClassName: nginx
rules:
- host: www.org
http:
paths:
- path: /v1
pathType: Prefix
backend:
service:
name: my-app1
port:
number: 80
- host: aaa.org
http:
paths:
- path: /v2
pathType: Prefix
backend:
service:
name: my-app2
port:
number: 80
[root@k8s-master ingress-1.11.2]# kubectl describe ingress ingress2
Name: ingress2
Labels: <none>
Namespace: default
Address: 172.25.250.132
Ingress Class: nginx
Default backend: <default>
Rules:
Host Path Backends
---- ---- --------
www.org
/v1 my-app1:80 (10.244.1.164:80,10.244.2.45:80)
aaa.org
/v2 my-app2:80 (10.244.1.165:80,10.244.2.46:80)
Annotations: nginx.ingress.kubernetes.io/rewrite-target: /
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Sync 11s (x2 over 45s) nginx-ingress-controller Scheduled for sync
[root@k8s-master ingress-1.11.2]# curl www.org/v1
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
[root@k8s-master ingress-1.11.2]# curl aaa.org/v2
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>
[root@k8s-master ingress-1.11.2]# vim ingress3.yml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
name: ingress3
spec:
tls:
- hosts:
- myapp-tls.bwmis.org
secretName: web-tls-secret
ingressClassName: nginx
rules:
- host: myapp-tls.bwmis.org
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: my-app1
port:
number: 80[root@k8s-master ingress-1.11.2]# curl -k https://myapp-tls.bwmis.org
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
建立auth认证
[root@k8s-master ingress-1.11.2]# vim ingress4.ymlapiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/auth-type: basic
nginx.ingress.kubernetes.io/auth-secret: auth-web
nginx.ingress.kubernetes.io/auth-realm: "Please input username and password"
name: ingress4
spec:
tls:
- hosts:
- myapp-tls.bwmis.org
secretName: web-tls-secret
ingressClassName: nginx
rules:
- host: myapp-tls.bwmis.org
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: my-app1
port:
number: 80[root@k8s-master ingress-1.11.2]# kubectl apply -f ingress4.yml
ingress.networking.k8s.io/ingress4 created
[root@k8s-master ingress-1.11.2]# kubectl describe ingress ingress4
Name: ingress4
Labels: <none>
Namespace: default
Address: 172.25.250.132
Ingress Class: nginx
Default backend: <default>
TLS:
web-tls-secret terminates myapp-tls.bwmis.org
Rules:
Host Path Backends
---- ---- --------
myapp-tls.bwmis.org
/ my-app1:80 (10.244.1.167:80,10.244.2.50:80)
Annotations: nginx.ingress.kubernetes.io/auth-realm: Please input username and password
nginx.ingress.kubernetes.io/auth-secret: auth-web
nginx.ingress.kubernetes.io/auth-type: basic
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Sync 10s (x2 over 20s) nginx-ingress-controller Scheduled for sync
[root@k8s-master ingress-1.11.2]# curl -k https://myapp-tls.bwmis.org
<html>
<head><title>401 Authorization Required</title></head>
<body>
<center><h1>401 Authorization Required</h1></center>
<hr><center>nginx</center>
</body>
</html>
[root@k8s-master ingress-1.11.2]# vim ingress5.yml
[root@k8s-master ingress-1.11.2]# kubectl apply -f ingress5.yml
ingress.networking.k8s.io/ingress5 created
[root@k8s-master ingress-1.11.2]# kubectl describe ingress ingress5
Name: ingress5
Labels: <none>
Namespace: default
Address:
Ingress Class: nginx
Default backend: <default>
TLS:
web-tls-secret terminates myapp-tls.bwmis.org
Rules:
Host Path Backends
---- ---- --------
myapp-tls.bwmis.org
/ my-app1:80 (10.244.1.167:80,10.244.2.50:80)
Annotations: nginx.ingress.kubernetes.io/app-root: /hostname.html
nginx.ingress.kubernetes.io/auth-realm: Please input username and password
nginx.ingress.kubernetes.io/auth-secret: auth-web
nginx.ingress.kubernetes.io/auth-type: basic
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Sync 10s nginx-ingress-controller Scheduled for sync
[root@k8s-master ingress-1.11.2]# curl -Lk https://myapp-tls.bwmis.org -ulee:123
myapp1-7685cb55cf-frzmj
[root@k8s-master ingress-1.11.2]# curl -Lk https://myapp-tls.bwmis.org/lee/hostsname.html -ulee:123
<html>
<head><title>404 Not Found</title></head>
<body bgcolor="white">
<center><h1>404 Not Found</h1></center>
<hr><center>nginx/1.12.2</center>
</body>
</html>
[root@k8s-master ingress-1.11.2]# vim ingress5.ymlapiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/auth-type: basic
nginx.ingress.kubernetes.io/auth-secret: auth-web
nginx.ingress.kubernetes.io/auth-realm: "Please input username and password"
name: ingress5
spec:
tls:
- hosts:
- myapp-tls.bwmis.org
secretName: web-tls-secret
ingressClassName: nginx
rules:
- host: myapp-tls.bwmis.org
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: my-app1
port:
number: 80
- path: /lee(/|$)(.*)
pathType: ImplementationSpecific
backend:
service:
name: my-app1
port:
number: 80
[root@k8s-master ingress-1.11.2]# curl -Lk https://myapp-tls.bwmis.org/ -ulee:123
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
基于header(http包头)灰度
[root@k8s-master ingress-1.11.2]# vim ingress6.yml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
name: myapp-v1-ingress
spec:
ingressClassName: nginx
rules:
- host: myapp-tls.bwmis.org
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: my-app1
port:
number: 80
[root@k8s-master ingress-1.11.2]# kubectl describe ingress myapp-v1-ingress
Name: myapp-v1-ingress
Labels: <none>
Namespace: default
Address:
Ingress Class: nginx
Default backend: <default>
Rules:
Host Path Backends
---- ---- --------
myapp-tls.bwmis.org
/ my-app1:80 (10.244.1.167:80,10.244.2.50:80)
Annotations: <none>
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Sync 15s nginx-ingress-controller Scheduled for sync
[root@k8s-master ingress-1.11.2]# vim ingress7.yml
[root@k8s-master ingress-1.11.2]# kubectl apply -f ingress7.yml
ingress.networking.k8s.io/myapp-v2-ingress created
[root@k8s-master ingress-1.11.2]# kubectl describe ingress myapp-v2-ingress
Name: myapp-v2-ingress
Labels: <none>
Namespace: default
Address:
Ingress Class: nginx
Default backend: <default>
Rules:
Host Path Backends
---- ---- --------
myapp-tls.bwmis.org
/ my-app2:80 (10.244.1.166:80,10.244.2.48:80)
Annotations: nginx.ingress.kubernetes.io/canary: true
nginx.ingress.kubernetes.io/canary-by-header: version
nginx.ingress.kubernetes.io/canary-by-header-value: 2
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Sync 14s nginx-ingress-controller Scheduled for sync
[root@k8s-master ingress-1.11.2]# curl myapp-tls.bwmis.org
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
[root@k8s-master ingress-1.11.2]# curl -H "version:2" myapp-tls.bwmis.org
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>
基于权重的灰度发布
[root@k8s-master ingress-1.11.2]# vim ingress8.yml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: myapp-v2-ingress
annotations:
nginx.ingress.kubernetes.io/canary: "true"
nginx.ingress.kubernetes.io/canary-weight: "10"
nginx.ingress.kubernetes.io/canary-weight-total: "100"
spec:
ingressClassName: nginx
rules:
- host: myapp-tls.bwmis.org
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: my-app2
port:
number: 80
[root@k8s-master ingress-1.11.2]# vim check_ingress.sh
#!/bin/bash
v1=0
v2=0for (( i=0; i<100; i++))
do
response=`curl -s myapp-tls.bwmis.org | grep -c v1`v1=`expr $v1 + $response`
v2=`expr $v2 + 1 - $response`done
echo "v1:$v1, v2:$v2"
[root@k8s-master ingress-1.11.2]# bash check_ingress.sh
v1:93, v2:7
[root@k8s-master ingress-1.11.2]# kubectl create cm lee-config --from-literal fname=bwmis --from-literal lname=lee
configmap/lee-config created
[root@k8s-master ingress-1.11.2]# kubectl describe cm lee-config
Name: lee-config
Namespace: default
Labels: <none>
Annotations: <none>Data
====
fname:
----
bwmis
lname:
----
leeBinaryData
====Events: <none>
[root@k8s-master ingress-1.11.2]# cat /etc/resolv.conf
# Generated by NetworkManager
search localdomain
nameserver 172.25.250.2
[root@k8s-master ingress-1.11.2]# kubectl create cm lee2-config --from-file /etc/resolv.conf
configmap/lee2-config created
[root@k8s-master ingress-1.11.2]# kubectl describe cm lee2-config
Name: lee2-config
Namespace: default
Labels: <none>
Annotations: <none>Data
====
resolv.conf:
----
# Generated by NetworkManager
search localdomain
nameserver 172.25.250.2
BinaryData
====Events: <none>
mount /dev/sr0 /rhel9
[root@k8s-master ~]# kubectl create cm lee4-config --from-literal do_host=172.25.250.70 --from-literal db_port=3306 --dry-run=client -o yaml > lee-config.yaml
[root@k8s-master ~]# vim lee-config.yaml
[root@k8s-master ~]# cat lee-config.yaml
apiVersion: v1
data:
db_port: "3306"
do_host: 172.25.250.70
kind: ConfigMap
metadata:
creationTimestamp: null
name: lee4-config
[root@k8s-master ~]# kubectl apply -f lee
leeconfig/ lee-config.yaml
[root@k8s-master ~]# kubectl apply -f lee-config.yaml
configmap/lee4-config created
[root@k8s-master ~]# kubectl describe cm lee4-config
Name: lee4-config
Namespace: default
Labels: <none>
Annotations: <none>Data
====
db_port:
----
3306
do_host:
----
172.25.250.70BinaryData
====Events: <none>
**
[root@k8s-master ~]# vim testpod1.yml
[root@k8s-master ~]# kubectl delete -f testpod1.yml
pod "testpod" deleted
[root@k8s-master ~]# kubectl apply -f testpod1.yml
pod/testpod created
[root@k8s-master ~]# kubectl describe cm lee4-config
[root@k8s-master ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
myapp1-7685cb55cf-frzmj 1/1 Running 1 (18h ago) 18h
myapp1-7685cb55cf-zj67h 1/1 Running 1 (18h ago) 18h
myapp2-54b6db5f88-bzwmp 1/1 Running 1 (18h ago) 18h
myapp2-54b6db5f88-d74d7 1/1 Running 1 (18h ago) 18h
pod 0/1 CrashLoopBackOff 22 (72s ago) 90m
testpod 0/1 Completed 0 18s
timinglee-c7c6f676b-chcfh 1/1 Running 4 (18h ago) 2d1h
timinglee-c7c6f676b-d72m9 1/1 Running 4 (18h ago) 2d1h
[root@k8s-master ~]# kubectl logs pods/testpod
KUBERNETES_SERVICE_PORT=443
KUBERNETES_PORT=tcp://10.96.0.1:443
MY_APP1_SERVICE_PORT=80
MY_APP1_PORT=tcp://10.97.11.219:80
MY_APP2_SERVICE_PORT=80
MY_APP2_PORT=tcp://10.106.96.145:80
HOSTNAME=testpod
SHLVL=1
HOME=/
MY_APP1_PORT_80_TCP_ADDR=10.97.11.219
MY_APP2_PORT_80_TCP_ADDR=10.106.96.145
MY_APP1_PORT_80_TCP_PORT=80
MY_APP2_PORT_80_TCP_PORT=80
MY_APP1_PORT_80_TCP_PROTO=tcp
MY_APP2_PORT_80_TCP_PROTO=tcp
KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1
MY_APP1_PORT_80_TCP=tcp://10.97.11.219:80
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
MY_APP2_PORT_80_TCP=tcp://10.106.96.145:80
KUBERNETES_PORT_443_TCP_PORT=443
KUBERNETES_PORT_443_TCP_PROTO=tcp
key1=172.25.250.70
key2=3306
KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443
KUBERNETES_SERVICE_PORT_HTTPS=443
PWD=/
KUBERNETES_SERVICE_HOST=10.96.0.1
MY_APP1_SERVICE_HOST=10.97.11.219
MY_APP2_SERVICE_HOST=10.106.96.145
[root@k8s-master ~]# vim testpod2.yml
[root@k8s-master ~]# kubectl delete -f testpod1.yml
pod "testpod" deleted
[root@k8s-master ~]# kubectl apply -f testpod2.yml
pod/testpod created
[root@k8s-master ~]# kubectl logs pods/testpod
KUBERNETES_PORT=tcp://10.96.0.1:443
KUBERNETES_SERVICE_PORT=443
MY_APP1_SERVICE_PORT=80
MY_APP1_PORT=tcp://10.97.11.219:80
MY_APP2_PORT=tcp://10.106.96.145:80
MY_APP2_SERVICE_PORT=80
HOSTNAME=testpod
SHLVL=1
HOME=/
db_port=3306
MY_APP1_PORT_80_TCP_ADDR=10.97.11.219
MY_APP2_PORT_80_TCP_ADDR=10.106.96.145
MY_APP1_PORT_80_TCP_PORT=80
do_host=172.25.250.70
MY_APP1_PORT_80_TCP_PROTO=tcp
MY_APP2_PORT_80_TCP_PORT=80
MY_APP2_PORT_80_TCP_PROTO=tcp
KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1
MY_APP1_PORT_80_TCP=tcp://10.97.11.219:80
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
KUBERNETES_PORT_443_TCP_PORT=443
MY_APP2_PORT_80_TCP=tcp://10.106.96.145:80
KUBERNETES_PORT_443_TCP_PROTO=tcp
KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443
KUBERNETES_SERVICE_PORT_HTTPS=443
PWD=/
KUBERNETES_SERVICE_HOST=10.96.0.1
MY_APP1_SERVICE_HOST=10.97.11.219
MY_APP2_SERVICE_HOST=10.106.96.145
[root@k8s-master ~]# kubectl delete -f testpod2.yml
pod "testpod" deleted使用configmap填充环境变量
[root@k8s-master ~]# vim testpod3.ymlapiVersion: v1
kind: Pod
metadata:
labels:
run: testpod
name: testpod
spec:
containers:
- image: busyboxplus:latest
name: testpod
command:
- /bin/sh
- -c
- echo ${do_host} ${db_port}
envFrom:
- configMapRef:
name: lee4-config
restartPolicy: Never
[root@k8s-master ~]# kubectl apply -f testpod3.yml
pod/testpod created
[root@k8s-master ~]# kubectl logs pods/testpod
3306
[root@k8s-master ~]# vim testpod3.yml
[root@k8s-master ~]# kubectl delete -f testpod3.yml
pod "testpod" deleted
[root@k8s-master ~]# kubectl apply -f testpod3.yml
pod/testpod created
[root@k8s-master ~]# kubectl logs pods/testpod
172.25.250.70 3306
[root@k8s-master ~]# kubectl delete -f testpod3.yml
pod "testpod" deleted通过数据卷使用configmap
[root@k8s-master ~]# vim testpod4.ymlapiVersion: v1
kind: Pod
metadata:
labels:
run: testpod
name: testpod
spec:
containers:
- image: busyboxplus:latest
name: testpod
command:
- /bin/sh
- -c
- cat /config/do_host
volumeMounts:
- name: config-volume
mountPath: /config
volumes:
- name: config-volume
configMap:
name: lee4-config
restartPolicy: Never
[root@k8s-master ~]# kubectl apply -f testpod4.yml
pod/testpod created
[root@k8s-master ~]# kubectl logs testpod
172.25.250.70利用configMap填充pod的配置文件
[root@k8s-master ~]# vim nginx.conf
server {
listen 8000;
server_name _;
root /usr/share/nginx/html;
index index.html;
}
[root@k8s-master ~]# kubectl create cm nginx-conf --from-file nginx.conf
configmap/nginx-conf created
[root@k8s-master ~]# kubectl describe cm nginx-conf
Name: nginx-conf
Namespace: default
Labels: <none>
Annotations: <none>Data
====
nginx.conf:
----
server {
listen 8000;
server_name _;
root /usr/share/nginx/html;
index index.html;
}BinaryData
====Events: <none>
[root@k8s-master ~]# kubectl create deployment nginx --image nginx:latest --replicas 1 --dry-run=client -o yaml > nginx.yml
[root@k8s-master ~]# cat nginx.yml
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: nginx
name: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: nginx
spec:
containers:
- image: nginx:latest
name: nginx
resources: {}
status: {}
[root@k8s-master ~]# vim nginx.yml
[root@k8s-master ~]# kubectl apply -f nginx.yml
error: error parsing nginx.yml: error converting YAML to JSON: yaml: line 22: mapping values are not allowed in this context
[root@k8s-master ~]# vim nginx.yml
[root@k8s-master ~]# kubectl apply -f nginx.yml
deployment.apps/nginx created
[root@k8s-master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
myapp1-7685cb55cf-frzmj 1/1 Running 1 (18h ago) 18h 10.244.2.50 k8s-node2 <none> <none>
myapp1-7685cb55cf-zj67h 1/1 Running 1 (18h ago) 18h 10.244.1.167 k8s-node1 <none> <none>
myapp2-54b6db5f88-bzwmp 1/1 Running 1 (18h ago) 18h 10.244.1.166 k8s-node1 <none> <none>
myapp2-54b6db5f88-d74d7 1/1 Running 1 (18h ago) 18h 10.244.2.48 k8s-node2 <none> <none>
nginx-8487c65cfc-whvqq 1/1 Running 0 8s 10.244.1.180 k8s-node1 <none> <none>
pod 0/1 CrashLoopBackOff 24 (5m5s ago) 104m 10.244.1.170 k8s-node1 <none> <none>
testpod 0/1 Completed 0 7m3s 10.244.1.179 k8s-node1 <none> <none>
timinglee-c7c6f676b-chcfh 1/1 Running 4 (18h ago) 2d2h 10.244.1.168 k8s-node1 <none> <none>
timinglee-c7c6f676b-d72m9 1/1 Running 4 (18h ago) 2d2h 10.244.2.51 k8s-node2 <none> <none>
[root@k8s-master ~]# curl 10.244.1.180:8000
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p><p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p><p><em>Thank you for using nginx.</em></p>
</body>
</html>secrets的功能介绍
[root@k8s-master ~]# mkdir secrets
[root@k8s-master ~]# cd secrets/
[root@k8s-master secrets]# echo -n bwmis > username.txt
[root@k8s-master secrets]# echo -n 123 > password.txt
[root@k8s-master secrets]# kubectl create secret generic userlist --from-file username.txt --from-file password.txt
secret/userlist created
[root@k8s-master secrets]# kubectl get secrets userlist -o yaml
apiVersion: v1
data:
password.txt: MTIz
username.txt: YndtaXM=
kind: Secret
metadata:
creationTimestamp: "2024-09-08T09:07:15Z"
name: userlist
namespace: default
resourceVersion: "276830"
uid: 8a113b59-4802-41ba-9f80-ca6fce110208
type: Opaque
[root@k8s-master secrets]# echo -n bwmis | base64
YndtaXM=
[root@k8s-master secrets]# echo -n lee | base64
bGVl
[root@k8s-master secrets]# kubectl create secret generic userlist --dry-run=client -o yaml > userlist.yml
[root@k8s-master secrets]# vim userlist.yml
[root@k8s-master secrets]# vim userlist.yml
[root@k8s-master secrets]# vim userlist.yml
[root@k8s-master secrets]# kubectl apply -f userlist.yml
Warning: resource secrets/userlist is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl appl y should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched autom atically.
secret/userlist configured
[root@k8s-master secrets]# vim userlist.yml
[root@k8s-master secrets]# kubectl describe secrets userlist
Name: userlist
Namespace: default
Labels: <none>
Annotations: <none>Type: Opaque
Data
====
username.txt: 5 bytes
password: 3 bytes
password.txt: 3 bytes
username: 5 bytes
将Secret挂载到Volume中
[root@k8s-master secrets]# kubectl run nginx --image nginx:latest --dry-run=client -o yaml > pod1.yml
[root@k8s-master secrets]# vim pod1.yml
[root@k8s-master secrets]# kubectl apply -f pod1.yml
pod/nginx created
[root@k8s-master secrets]# kubectl exec pods/nginx -it -- /bin/bash
root@nginx:/# cat /secret/
cat: /secret/: Is a directory
root@nginx:/# cd /secret/
root@nginx:/secret# ls
password password.txt username username.txt
root@nginx:/secret# cat password
leeroot@nginx:/secret# cat username
root@nginx:/secret# exit
exit
[root@k8s-master secrets]# vim pod2.yml
[root@k8s-master secrets]# kubectl apply -f pod2.yml
pod/nginx1 created
[root@k8s-master secrets]# kubectl exec pods/nginx1 -it -- /bin/bash
root@nginx1:/# cd secret/
root@nginx1:/secret# ls
my-users
root@nginx1:/secret# cd my-users
root@nginx1:/secret/my-users# ls
username
root@nginx1:/secret/my-users# cat username
bwmisroot@nginx1:/secret/my-users#向指定路径映射 secret 密钥
[root@k8s-master secrets]# vim pod2.yml
[root@k8s-master secrets]# kubectl apply -f pod2.yml
pod/nginx1 created
[root@k8s-master secrets]# kubectl exec pods/nginx1 -it -- /bin/bash
root@nginx1:/# cd secret/
root@nginx1:/secret# ls
my-users
root@nginx1:/secret# cd my-users
root@nginx1:/secret/my-users# ls
username
root@nginx1:/secret/my-users# cat username
bwmisroot@nginx1:/secret/my-users# exit
exit
[root@k8s-master secrets]# vim pod3.yaml
[root@k8s-master secrets]# kubectl apply -f pod3.yaml
pod/busybox created
[root@k8s-master secrets]# kubectl logs pods/busybox
KUBERNETES_PORT=tcp://10.96.0.1:443
KUBERNETES_SERVICE_PORT=443
MY_APP1_SERVICE_PORT=80
MY_APP1_PORT=tcp://10.97.11.219:80
HOSTNAME=busybox
MY_APP2_SERVICE_PORT=80
MY_APP2_PORT=tcp://10.106.96.145:80
SHLVL=1
HOME=/root
MY_APP1_PORT_80_TCP_ADDR=10.97.11.219
MY_APP2_PORT_80_TCP_ADDR=10.106.96.145
MY_APP1_PORT_80_TCP_PORT=80
MY_APP2_PORT_80_TCP_PORT=80
MY_APP1_PORT_80_TCP_PROTO=tcp
MY_APP2_PORT_80_TCP_PROTO=tcp
USERNAME=bwmis
KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
MY_APP1_PORT_80_TCP=tcp://10.97.11.219:80
MY_APP2_PORT_80_TCP=tcp://10.106.96.145:80
KUBERNETES_PORT_443_TCP_PORT=443
KUBERNETES_PORT_443_TCP_PROTO=tcp
PASS=lee
KUBERNETES_SERVICE_PORT_HTTPS=443
KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443
KUBERNETES_SERVICE_HOST=10.96.0.1
PWD=/
MY_APP1_SERVICE_HOST=10.97.11.219
MY_APP2_SERVICE_HOST=10.106.96.145存储docker registry的认证信息
[root@k8s-master secrets]# docker login bwmis.org
Authenticating with existing credentials...
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credential-storesLogin Succeeded
[root@k8s-master secrets]# cd
[root@k8s-master ~]# docker load -i game2048.tar
011b303988d2: Loading layer [==================================================>] 5.05MB/5.05MB
36e9226e74f8: Loading layer [==================================================>] 51.46MB/51.46MB
192e9fad2abc: Loading layer [==================================================>] 3.584kB/3.584kB
6d7504772167: Loading layer [==================================================>] 4.608kB/4.608kB
88fca8ae768a: Loading layer [==================================================>] 629.8kB/629.8kB
Loaded image: timinglee/game2048:latest
[root@k8s-master ~]# docker tag timinglee/game2048:latest bwmis.org/library/game2048:latest
[root@k8s-master ~]# docker push bwmis.org/library/game2048:latest
The push refers to repository [bwmis.org/library/game2048]
88fca8ae768a: Pushed
6d7504772167: Pushed
192e9fad2abc: Pushed
36e9226e74f8: Pushed
011b303988d2: Pushed
latest: digest: sha256:8a34fb9cb168c420604b6e5d32ca6d412cb0d533a826b313b190535c03fe9390 size: 1364
[root@k8s-master ~]# kubectl create secret docker-registry docker-auth --docker-server bwmis.org --docker-username admin --docker-password lee --docker-email bwmis@bwmis.org
secret/docker-auth created
[root@k8s-master ~]# vim pod3.yml
apiVersion: v1
kind: Pod
metadata:
labels:
run: game2048
name: game2048
spec:
containers:
- image: bwmis.org/library/game2048:latest
name: game2048
imagePullSecrets:
- name: docker-auth
[root@k8s-master ~]# kubectl apply -f pod3.yml
pod/game2048 created
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
busybox 0/1 Completed 0 10m
game2048 1/1 Running 0 21s
volumes配置管理
emptyDir卷
[root@k8s-master ~]# vim vo1.yml
apiVersion: v1
kind: Pod
metadata:
name: vol1
spec:
containers:
- image: busyboxplus:latest
name: vm1
command:
- /bin/sh
- -c
- sleep 30000000
volumeMounts:
- mountPath: /cache
name: cache-vol
- image: nginx:latest
name: vm2
volumeMounts:
- mountPath: /usr/share/nginx/html
name: cache-vol
volumes:
- name: cache-vol
emptyDir:
medium: Memory
sizeLimit: 100Mi
[root@k8s-master ~]# kubectl apply -f vo1.yml
pod/vol1 created
[root@k8s-master ~]# kubectl describe pods vol1
Name: vol1
Namespace: default
Priority: 0
Service Account: default
Node: k8s-node2/172.25.250.132
Start Time: Sun, 08 Sep 2024 18:55:25 +0800
Labels: <none>
Annotations: <none>
Status: Running
IP: 10.244.2.54
IPs:
IP: 10.244.2.54
Containers:
vm1:
Container ID: docker://7ec94e536380b54f4e7e0aff9366202196707ee3ae65b9ee3783ba2921c9ec8d
Image: busyboxplus:latest
Image ID: docker-pullable://bwmis.org/library/busyboxplus@sha256:9d1c242c1fd588a1b8ec4461d33a9ba08071f0cc5bb2d50d4ca49e430014ab06
Port: <none>
Host Port: <none>
Command:
/bin/sh
-c
sleep 30000000
State: Running
Started: Sun, 08 Sep 2024 18:55:26 +0800
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/cache from cache-vol (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-pjzvp (ro)
vm2:
Container ID: docker://2dba9aa959828d615989a47e6e29cbc36b9bf86dbda5ad237e48daa4b1360803
Image: nginx:latest
Image ID: docker-pullable://nginx@sha256:04ba374043ccd2fc5c593885c0eacddebabd5ca375f9323666f28dfd5a9710e3
Port: <none>
Host Port: <none>
State: Running
Started: Sun, 08 Sep 2024 18:55:29 +0800
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/usr/share/nginx/html from cache-vol (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-pjzvp (ro)
Conditions:
Type Status
PodReadyToStartContainers True
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
cache-vol:
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
Medium: Memory
SizeLimit: 100Mi
kube-api-access-pjzvp:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 8s default-scheduler Successfully assigned default/vol1 to k8s-node2
Normal Pulling 7s kubelet Pulling image "busyboxplus:latest"
Normal Pulled 7s kubelet Successfully pulled image "busyboxplus:latest" in 59ms (59ms including waiting). Image size: 12855024 bytes.
Normal Created 7s kubelet Created container vm1
Normal Started 7s kubelet Started container vm1
Normal Pulling 7s kubelet Pulling image "nginx:latest"
Normal Pulled 4s kubelet Successfully pulled image "nginx:latest" in 3.282s (3.282s including waiting). Image size: 187706879 bytes.
Normal Created 4s kubelet Created container vm2
Normal Started 4s kubelet Started container vm2
[root@k8s-master ~]# kubectl exec -it pods/vol1 -c vm1 -- /bin/sh
/ # cd /cache/
/cache # ls
/cache # curl loalhost
^C
/cache # curl localhost
<html>
<head><title>403 Forbidden</title></head>
<body>
<center><h1>403 Forbidden</h1></center>
<hr><center>nginx/1.27.1</center>
</body>
</html>
hostpath卷
hostPath 卷能将主机节点文件系统上的文件或目录挂载到您的 Pod 中,不会因为pod关闭而被删除
[root@k8s-master ~]# vim vo2.yml
apiVersion: v1
kind: Pod
metadata:
name: vol2
spec:
containers:
- image: nginx:latest
name: vm1
volumeMounts:
- mountPath: /usr/share/nginx/html
name: cache-vol
volumes:
- name: cache-vol
hostPath:
path: /data
type: DirectoryOrCreate
[root@k8s-master ~]# kubectl apply -f vo2.yml
pod/vol2 configured
Normal Started 8m3s kubelet Started container vm1
[root@k8s-master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
myapp1-7685cb55cf-frzmj 1/1 Running 1 (21h ago) 21h 10.244.2.50 k8s-node2 <none> <none>
myapp1-7685cb55cf-zj67h 1/1 Running 1 (21h ago) 21h 10.244.1.167 k8s-node1 <none> <none>
myapp2-54b6db5f88-bzwmp 1/1 Running 1 (21h ago) 21h 10.244.1.166 k8s-node1 <none> <none>
myapp2-54b6db5f88-d74d7 1/1 Running 1 (21h ago) 21h 10.244.2.48 k8s-node2 <none> <none>
timinglee-c7c6f676b-chcfh 1/1 Running 4 (21h ago) 2d4h 10.244.1.168 k8s-node1 <none> <none>
timinglee-c7c6f676b-d72m9 1/1 Running 4 (21h ago) 2d4h 10.244.2.51 k8s-node2 <none> <none>
vol1 2/2 Running 0 6m35s 10.244.1.189 k8s-node1 <none> <none>
vol2 1/1 Running 0 10m 10.244.1.188 k8s-node1 <none> <none>[root@k8s-node1 ~]# echo bwmis > /data/index.html
[root@k8s-master ~]# curl 10.244.1.188
bwmis[root@k8s-node1 ~]# ll /data/index.html
-rw-r--r--. 1 root root 6 9月 8 19:46 /data/index.html
部署一台nfs共享主机并在所有k8s节点中安装nfs-utils
[root@localhost ~]# systemctl enable --now nfs-server.service
Created symlink /etc/systemd/system/multi-user.target.wants/nfs-server.service → /usr/lib/systemd/system/nfs-server.service.
[root@localhost ~]# mkdir /nfsdata
[root@localhost ~]# vim /etc/exports
[root@localhost ~]# showmount -e
Export list for localhost.localdomain:
/nfsdata *
[root@localhost ~]# showmount -e
Export list for localhost.localdomain:
/nfsdata *
[root@localhost ~]# vim /etc/exports
[root@localhost ~]# exportfs -rv
exporting *:/nfsdata
[root@localhost ~]# echo bwmis > /nfsdata/index.html[root@k8s-master ~]# vim vo3.yml
apiVersion: v1
kind: Pod
metadata:
name: vol3
spec:
containers:
- image: nginx:latest
name: vm1
volumeMounts:
- mountPath: /usr/share/nginx/html
name: cache-vol
volumes:
- name: cache-vol
nfs:
server: 172.25.250.130
path: /nfsdata
#在k8s所有节点中安装nfs-utils
[root@k8s-master & node1 & node2 ~]# dnf install nfs-utils -y[root@k8s-master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
myapp1-7685cb55cf-frzmj 1/1 Running 1 (21h ago) 22h 10.244.2.50 k8s-node2 <none> <none>
myapp1-7685cb55cf-zj67h 1/1 Running 1 (21h ago) 22h 10.244.1.167 k8s-node1 <none> <none>
myapp2-54b6db5f88-bzwmp 1/1 Running 1 (21h ago) 22h 10.244.1.166 k8s-node1 <none> <none>
myapp2-54b6db5f88-d74d7 1/1 Running 1 (21h ago) 22h 10.244.2.48 k8s-node2 <none> <none>
timinglee-c7c6f676b-chcfh 1/1 Running 4 (21h ago) 2d5h 10.244.1.168 k8s-node1 <none> <none>
timinglee-c7c6f676b-d72m9 1/1 Running 4 (21h ago) 2d5h 10.244.2.51 k8s-node2 <none> <none>
vol3 1/1 Running 0 34s 10.244.1.190 k8s-node1 <none> <none>
[root@localhost ~]# echo bwmis > /nfsdata/index.html[root@k8s-master ~]# kubectl apply -f vo3.yml
pod/vol3 configured
[root@k8s-master ~]# curl 10.244.1.190
bwmis
PersistentVolume持久卷
kind: PersistentVolume
metadata:
name: pv1
spec:
capacity:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfsdata/pv1
server: 172.25.250.130
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv2
spec:
capacity:
storage: 15Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfsdata/pv2
server: 172.25.250.130
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv3
spec:
capacity:
storage: 25Gi
volumeMode: Filesystem
accessModes:
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfsdata/pv3
server: 172.25.250.130
[root@k8s-master ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE
pv1 5Gi RWO Retain Bound default/pvc1 nfs <unset> 29h
pv2 15Gi RWX Retain Bound default/pvc2 nfs <unset> 29h
pv3 25Gi ROX Retain Bound default/pvc3 nfs <unset> 29h
[root@k8s-master ~]# vim pvc.ymlapiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc1
spec:
storageClassName: nfs
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc2
spec:
storageClassName: nfs
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc3
spec:
storageClassName: nfs
accessModes:
- ReadOnlyMany
resources:
requests:
storage: 15Gi
[root@k8s-master ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
pvc1 Bound pv1 5Gi RWO nfs <unset> 29h
pvc2 Bound pv2 15Gi RWX nfs <unset> 29h
pvc3 Bound pv3 25Gi ROX nfs <unset> 29h
apiVersion: v1
kind: Pod
metadata:
name: bwmis
spec:
containers:
- image: nginx
name: nginx
volumeMounts:
- mountPath: /usr/share/nginx/html
name: vol1
volumes:
- name: vol1
persistentVolumeClaim:
claimName: pvc1
[root@k8s-master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
bwmis 1/1 Running 0 8s 10.244.1.202 k8s-node1 <none> <none>[root@k8s-master ~]# kubectl exec -it pods/bwmis -- /bin/bash
root@bwmis:/# curl localhost
<html>
<head><title>403 Forbidden</title></head>
<body>
<center><h1>403 Forbidden</h1></center>
<hr><center>nginx/1.27.1</center>
</body>
</html>
[root@localhost nfsdata]# echo bwmis > /nfsdata/pv1/index.html[root@k8s-master ~]# kubectl exec -it pods/bwmis -- /bin/bash
root@bwmis:/# cd /usr/share/nginx/
root@bwmis:/usr/share/nginx# ls
html
root@bwmis:/usr/share/nginx# cd html/
root@bwmis:/usr/share/nginx/html# ls
index.html
root@bwmis:/usr/share/nginx/html# cat index.html
bwmis
root@bwmis:/usr/share/nginx/html# curl localhost
bwmis
存储类storageclass
创建sa并授权
apiVersion: v1
kind: Namespace
metadata:
name: nfs-client-provisioner
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
namespace: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: nfs-client-provisioner
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: nfs-client-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: nfs-client-provisioner
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io[root@k8s-master storageclass]# kubectl apply -f rbac.yml
namespace/nfs-client-provisioner created
serviceaccount/nfs-client-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
[root@k8s-master storageclass]# kubectl -n nfs-client-provisioner get sa
NAME SECRETS AGE
default 0 12s
nfs-client-provisioner 0 12s
[root@k8s-master storageclass]# vim deployment.ymlapiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
namespace: nfs-client-provisioner
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: bwmis.org/sig-storage/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: NFS_SERVER
value: 172.25.250.130
- name: NFS_PATH
value: /nfsdata
volumes:
- name: nfs-client-root
nfs:
server: 172.25.250.130
path: /nfsdata
[root@k8s-master storageclass]# kubectl -n nfs-client-provisioner get pod nfs-client-provisioner-dd47cd66b-lzksn
NAME READY STATUS RESTARTS AGE
nfs-client-provisioner-dd47cd66b-lzksn 1/1 Running 0 57s
创建存储类
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-client
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
archiveOnDelete: "false“[root@k8s-master storageclass]# kubectl apply -f class.yml
storageclass.storage.k8s.io/nfs-client created
[root@k8s-master storageclass]# kubectl get storageclasses.storage.k8s.io
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs-client k8s-sigs.io/nfs-subdir-external-provisioner Delete Immediate false 20s
创建pvc
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1G
[root@k8s-master storageclass]# kubectl apply -f pvc1.yml
persistentvolumeclaim/test-claim created
[root@k8s-master storageclass]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
test-claim Pending nfs-client <unset> 10s
[root@k8s-master storageclass]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
test-claim Bound pvc-a0cf861e-a105-4806-be2e-d072077cbf24 1G RWX nfs-client <unset> 8s
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: busybox
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/SUCCESS && exit 0 || exit 1"
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim
设置默认存储类
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc1
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc2
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc3
spec:
storageClassName: nfs-client
accessModes:
- ReadOnlyMany
resources:
requests:
storage: 15Gi[root@k8s-master storageclass]# kubectl apply -f pvc1.yml
persistentvolumeclaim/pvc1 created
persistentvolumeclaim/pvc2 created
persistentvolumeclaim/pvc3 created[root@k8s-master storageclass]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
pvc1 Bound pvc-0f12a21c-34f9-4280-b42b-c5435b748f11 1Gi RWO nfs-client <unset> 38s
pvc2 Bound pvc-90be563b-9982-4d5a-b0ad-bfa31d9b4797 10Gi RWX nfs-client <unset> 38s
pvc3 Bound pvc-1f4d1444-62cf-4c62-b972-030dc1f0d057 15Gi ROX nfs-client <unset> 38s
test-claim Bound pvc-a0cf861e-a105-4806-be2e-d072077cbf24 1G RWX nfs-client <unset> 41m[root@k8s-master storageclass]# kubectl edit sc nfs-client
storageclass.kubernetes.io/is-default-class: "true"kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
apiVersion: v1
kind: Service
metadata:
name: nginx-svc
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx[root@k8s-master statefulset]# kubectl apply -f headless.yml
#建立statefulset
[root@k8s-master statefulset]# vim statefulset.yml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
spec:
serviceName: "nginx-svc"
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
[root@k8s-master statefulset]# kubectl apply -f statefulset.yml
statefulset.apps/web configured[root@k8s-master statefulset]# kubectl get pods
NAME READY STATUS RESTARTS AGE
test-pod 0/1 Completed 0 25m
timinglee-c7c6f676b-6v5l4 1/1 Running 2 (3h8m ago) 32h
timinglee-c7c6f676b-sjdz2 1/1 Running 2 (3h8m ago) 32h
web-0 1/1 Running 0 27s
web-1 1/1 Running 0 21s
web-2 1/1 Running 0 12s
[root@localhost ~]# ls /nfsdata/
default-test-claim-pvc-a0cf861e-a105-4806-be2e-d072077cbf24 default-www-web-2-pvc-4eaca201-0485-48d7-8182-1b4139613491 pv2
default-www-web-0-pvc-9851ad5d-d6b7-4935-bd74-bb66d23dfd84 index.html pv3
default-www-web-1-pvc-43a6efe0-4fec-4751-9f0e-6c7c83f8631b pv1
[root@localhost ~]# cd /nfsdata/
[root@localhost nfsdata]# echo web-0 > default-www-web-0-pvc-9851ad5d-d6b7-4935-bd74-bb66d23dfd84
-bash: default-www-web-0-pvc-9851ad5d-d6b7-4935-bd74-bb66d23dfd84: 是一个目录
[root@localhost nfsdata]# echo web-0 > default-www-web-0-pvc-9851ad5d-d6b7-4935-bd74-bb66d23dfd84/index.html
[root@localhost nfsdata]# echo web-1 > default-www-web-1-pvc-43a6efe0-4fec-4751-9f0e-6c7c83f8631b/index.html
[root@localhost nfsdata]# echo web-2 > default-www-web-2-pvc-4eaca201-0485-48d7-8182-1b4139613491/index.html
[root@k8s-master statefulset]# kubectl delete -f statefulset.yml
statefulset.apps "web" deleted
[root@k8s-master statefulset]# kubectl apply -f statefulset.yml
statefulset.apps/web created
[root@k8s-master statefulset]# kubectl attach testpod -c testpod -i -t
If you don't see a command prompt, try pressing enter.
/ # curl web-0.nginx-svc
web-0
/ # curl web-1.nginx-svc
web-1
/ # curl web-2.nginx-svc
web-2
statefulset的弹缩
[root@k8s-master statefulset]#
[root@k8s-master statefulset]# kubectl scale statefulset web --replicas 0
statefulset.apps/web scaled
[root@k8s-master statefulset]# kubectl delete -f statefulset.yml
statefulset.apps "web" deleted
[root@k8s-master statefulset]# kubectl delete pvc --all
persistentvolumeclaim "test-claim" deleted
persistentvolumeclaim "www-web-0" deleted
persistentvolumeclaim "www-web-1" deleted
persistentvolumeclaim "www-web-2" deleted
flannel跨主机通信原理
#默认网络通信路由
[root@k8s-master ~]# ip r
default via 172.25.250.2 dev ens160 proto dhcp src 172.25.250.129 metric 100
10.244.0.0/24 dev cni0 proto kernel scope link src 10.244.0.1
10.244.1.0/24 via 10.244.1.0 dev flannel.1 onlink
10.244.2.0/24 via 10.244.2.0 dev flannel.1 onlink
172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown
172.25.250.0/24 dev ens160 proto kernel scope link src 172.25.250.129 metric 100#桥接转发数据库
[root@k8s-master ~]# bridge fdb
01:00:5e:00:00:01 dev ens160 self permanent
33:33:00:00:00:01 dev ens160 self permanent
33:33:ff:34:d4:e7 dev ens160 self permanent
33:33:00:00:00:01 dev docker0 self permanent
01:00:5e:00:00:6a dev docker0 self permanent
33:33:00:00:00:6a dev docker0 self permanent
01:00:5e:00:00:01 dev docker0 self permanent
02:42:35:f8:9f:8d dev docker0 vlan 1 master docker0 permanent
02:42:35:f8:9f:8d dev docker0 master docker0 permanent
33:33:00:00:00:01 dev kube-ipvs0 self permanent
de:b6:2c:be:21:41 dev flannel.1 dst 172.25.250.132 self permanent
22:6c:24:0e:85:2e dev flannel.1 dst 172.25.250.131 self permanent
33:33:00:00:00:01 dev cni0 self permanent
01:00:5e:00:00:6a dev cni0 self permanent
33:33:00:00:00:6a dev cni0 self permanent
01:00:5e:00:00:01 dev cni0 self permanent
33:33:ff:bb:9e:e5 dev cni0 self permanent
96:2d:88:bb:9e:e5 dev cni0 vlan 1 master cni0 permanent
96:2d:88:bb:9e:e5 dev cni0 master cni0 permanent
52:11:d3:4a:9d:40 dev veth32e833e0 master cni0
5a:dc:86:6b:82:66 dev veth32e833e0 vlan 1 master cni0 permanent
5a:dc:86:6b:82:66 dev veth32e833e0 master cni0 permanent
33:33:00:00:00:01 dev veth32e833e0 self permanent
01:00:5e:00:00:01 dev veth32e833e0 self permanent
33:33:ff:6b:82:66 dev veth32e833e0 self permanent
92:77:60:d8:c3:28 dev vethf787f239 master cni0
6a:81:e8:e4:9f:56 dev vethf787f239 vlan 1 master cni0 permanent
6a:81:e8:e4:9f:56 dev vethf787f239 master cni0 permanent
33:33:00:00:00:01 dev vethf787f239 self permanent
01:00:5e:00:00:01 dev vethf787f239 self permanent
33:33:ff:e4:9f:56 dev vethf787f239 self permanent#arp列表
[root@k8s-master ~]# arp -n
Address HWtype HWaddress Flags Mask Iface
172.25.250.131 ether 00:0c:29:1f:c2:f0 C ens160
172.25.250.132 ether 00:0c:29:09:a4:f0 C ens160
172.25.250.130 ether 00:0c:29:98:e4:43 C ens160
10.244.2.0 ether de:b6:2c:be:21:41 CM flannel.1
10.244.0.57 ether 92:77:60:d8:c3:28 C cni0
10.244.1.0 ether 22:6c:24:0e:85:2e CM flannel.1
10.244.0.56 ether 52:11:d3:4a:9d:40 C cni0
172.25.250.2 ether 00:50:56:ec:89:b0 C ens160
172.25.250.1 ether 00:50:56:c0:00:08 C ens160
172.25.250.254 ether 00:50:56:e3:a4:8f C ens160
flannel支持的后端模式
[root@k8s-master ~]# kubectl -n kube-flannel edit cm kube-flannel-cfg
"Type": "host-gw"
[root@k8s-master ~]# kubectl -n kube-flannel delete pod --all
pod "kube-flannel-ds-8x4hw" deleted
pod "kube-flannel-ds-lrwzt" deleted
pod "kube-flannel-ds-ngwvd" deleted
[root@k8s-master ~]# ip r
default via 172.25.250.2 dev ens160 proto dhcp src 172.25.250.129 metric 100
10.244.0.0/24 dev cni0 proto kernel scope link src 10.244.0.1
10.244.1.0/24 via 172.25.250.131 dev ens160
10.244.2.0/24 via 172.25.250.132 dev ens160
172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown
172.25.250.0/24 dev ens160 proto kernel scope link src 172.25.250.129 metric 100
部署calico
[root@k8s-master calico]# docker tag calico/typha:v3.28.1 bwmis.org/calico/typha:v3.28.1
[root@k8s-master calico]# docker tag calico/kube-controllers:v3.28.1 bwmis.org/calico/kube-controllers:v3.28.1
[root@k8s-master calico]# docker tag calico/cni:v3.28.1 bwmis.org/calico/cni:v3.28.1
[root@k8s-master calico]# docker tag calico/node:v3.28.1 bwmis.org/calico/node:v3.28.1[root@k8s-master calico]# vim calico.yaml
4835 image: calico/cni:v3.28.1
4835 image: calico/cni:v3.28.1
4906 image: calico/node:v3.28.1
4932 image: calico/node:v3.28.1
5160 image: calico/kube-controllers:v3.28.1
5249 - image: calico/typha:v3.28.14973 - name: CALICO_IPV4POOL_VXLAN
4974 value: "Never"4999 - name: CALICO_IPV4POOL_CIDR
5000 value: "10.244.0.0/16"
5001 - name: CALICO_AUTODETECTION_METHOD
5002 value: "interface=eth0"
[root@k8s-master calico]# kubectl apply -f calico.yaml
[root@k8s-master calico]# kubectl -n kube-system get pods
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-6849cb478c-w55j9 1/1 Running 0 12s
calico-node-cxbl7 0/1 Running 0 12s
calico-node-ntk6d 0/1 Running 0 12s
calico-node-vj9kd 0/1 Running 0 12s
calico-typha-fff9df85f-9n2mf 1/1 Running 0 12s
coredns-6cbd89557f-hd868 1/1 Running 22 (3h34m ago) 7d12h
coredns-6cbd89557f-wr8ks 1/1 Running 22 (3h34m ago) 7d12h
etcd-k8s-master 1/1 Running 22 (3h34m ago) 7d12h
kube-apiserver-k8s-master 1/1 Running 12 (3h34m ago) 4d9h
kube-controller-manager-k8s-master 1/1 Running 23 (3h34m ago) 7d12h
kube-proxy-42mqm 1/1 Running 8 (3h34m ago) 4d11h
kube-proxy-4tdgn 1/1 Running 8 (3h34m ago) 4d11h
kube-proxy-qmr25 1/1 Running 12 (3h34m ago) 4d11h
kube-scheduler-k8s-master 1/1 Running 23 (3h34m ago) 7d12h
[root@k8s-master calico]# kubectl run web --image timinglee/myapp:v1
pod/web created
[root@k8s-master calico]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
testpod 1/1 Running 2 (21m ago) 24m 10.244.2.71 k8s-node2 <none> <none>
timinglee-c7c6f676b-6v5l4 1/1 Running 2 (3h36m ago) 32h 10.244.2.69 k8s-node2 <none> <none>
timinglee-c7c6f676b-sjdz2 1/1 Running 2 (3h36m ago) 32h 10.244.1.199 k8s-node1 <none> <none>
web 1/1 Running 0 8s 10.244.169.129 k8s-node2 <none> <none>
[root@k8s-master calico]# curl 10.244.169.129
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
k8s调度
[root@k8s-master scheduler]# kubectl run testpod --image timinglee/myapp:v1 --dry-run=client -o yaml > pod1.yml
[root@k8s-master scheduler]# vim pod1.ymlapiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: testpod
name: testpod
spec:
nodeName: k8s-node2
containers:
- image: timinglee/myapp:v1
name: testpod[root@k8s-master scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
testpod 1/1 Running 2 (28m ago) 30m 10.244.2.71 k8s-node2 <none> <none>
Nodeselector(通过标签控制节点)
[root@k8s-master scheduler]# kubectl get nodes --show-labels
NAME STATUS ROLES AGE VERSION LABELS
k8s-master Ready control-plane 7d13h v1.30.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes. io/arch=amd64,kubernetes.io/hostname=k8s-master,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node.kubernetes .io/exclude-from-external-load-balancers=
k8s-node1 Ready <none> 7d12h v1.30.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes. io/arch=amd64,kubernetes.io/hostname=k8s-node1,kubernetes.io/os=linux
k8s-node2 Ready <none> 7d12h v1.30.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes. io/arch=amd64,kubernetes.io/hostname=k8s-node2,kubernetes.io/os=linux[root@k8s-master scheduler]# kubectl label nodes k8s-node1 lab=bwmis
node/k8s-node1 labeled
[root@k8s-master scheduler]# kubectl get nodes k8s-node1 --show-labels
NAME STATUS ROLES AGE VERSION LABELS
k8s-node1 Ready <none> 7d12h v1.30.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node1,kubernetes.io/os=linux,lab=bwmis
[root@k8s-master scheduler]# vim pod2.yml
apiVersion: v1
kind: Pod
metadata:
labels:
run: testpod
name: testpod
spec:
nodeSelector:
lab: bwmis
containers:
- image: timinglee/myapp:v1
name: testpod[root@k8s-master scheduler]# kubectl apply -f pod2.yml
[root@k8s-master scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
testpod 1/1 Running 0 61s 10.244.36.65 k8s-node1 <none> <none>
affinity(亲和性)
apiVersion: v1
kind: Pod
metadata:
name: node-affinity
spec:
containers:
- name: nginx
image: nginx
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: disk
operator: In | NotIn #两个结果相反
values:
- ssd
Podaffinity示例
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- nginx
topologyKey: "kubernetes.io/hostname"[root@k8s-master scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-deployment-658496fff-5clwc 1/1 Running 0 14s 10.244.169.130 k8s-node2 <none> <none>
nginx-deployment-658496fff-9zdnl 1/1 Running 0 14s 10.244.169.131 k8s-node2 <none> <none>
nginx-deployment-658496fff-mfxxb 1/1 Running 0 14s 10.244.169.132 k8s-node2 <none> <none>
Podantiaffinity(pod反亲和)
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- nginx
topologyKey: "kubernetes.io/hostname"
[root@k8s-master scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-deployment-5f5fc7b8b9-2zpql 1/1 Running 0 9s 10.244.169.133 k8s-node2 <none> <none>
nginx-deployment-5f5fc7b8b9-dqcr4 0/1 Pending 0 9s <none> <none> <none> <none>
nginx-deployment-5f5fc7b8b9-z8bch 1/1 Running 0 9s 10.244.36.66 k8s-node1 <none> <none>
Taints(污点模式,禁止调度)
$ kubectl taint nodes <nodename> key=string:effect #命令执行方法
$ kubectl taint nodes node1 key=value:NoSchedule #创建
$ kubectl describe nodes server1 | grep Taints #查询
$ kubectl taint nodes node1 key- #删除
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: web
name: web
spec:
replicas: 2
selector:
matchLabels:
app: web
template:
metadata:
labels:
app: web
spec:
containers:
- image: nginx
name: nginx[root@k8s-master scheduler]# kubectl apply -f pod6.yml
deployment.apps/web created
[root@k8s-master scheduler]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
timinglee-c7c6f676b-6v5l4 1/1 Running 2 (3h58m ago) 32h 10.244.2.69 k8s-node2 <none> <none>
timinglee-c7c6f676b-sjdz2 1/1 Running 2 (3h58m ago) 32h 10.244.1.199 k8s-node1 <none> <none>
web-7c56dcdb9b-6d5z8 1/1 Running 0 12s 10.244.169.134 k8s-node2 <none> <none>
web-7c56dcdb9b-knj8d 1/1 Running 0 12s 10.244.36.67 k8s-node1 <none> <none>
[root@k8s-master scheduler]# kubectl taint node k8s-node1 name=lee:NoSchedule
node/k8s-node1 tainted
[root@k8s-master scheduler]# kubectl describe nodes k8s-node1 | grep Tain
[root@k8s-master scheduler]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
timinglee-c7c6f676b-6v5l4 1/1 Running 2 (4h ago) 33h 10.244.2.69 k8s-node2 <none> <none>
timinglee-c7c6f676b-sjdz2 1/1 Running 2 (4h ago) 33h 10.244.1.199 k8s-node1 <none> <none>
web-7c56dcdb9b-6d5z8 1/1 Running 0 2m26s 10.244.169.134 k8s-node2 <none> <none>
web-7c56dcdb9b-k5ng6 1/1 Running 0 19s 10.244.169.138 k8s-node2 <none> <none>
web-7c56dcdb9b-knj8d 1/1 Running 0 2m26s 10.244.36.67 k8s-node1 <none> <none>
web-7c56dcdb9b-nrd8v 1/1 Running 0 19s 10.244.169.137 k8s-node2 <none> <none>
web-7c56dcdb9b-nvr2c 1/1 Running 0 19s 10.244.169.135 k8s-node2 <none> <none>
web-7c56dcdb9b-p5bp2 1/1 Running 0 19s 10.244.169.136 k8s-node2 <none> <none>[root@k8s-master scheduler]# kubectl taint node k8s-node1 name=lee:NoExecute
[root@k8s-master scheduler]# kubectl describe nodes k8s-node1 | grep Tain
Taints: name=lee:NoExecute
[root@k8s-master scheduler]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
timinglee-c7c6f676b-6v5l4 1/1 Running 2 (4h8m ago) 33h 10.244.2.69 k8s-node2 <none> <none>
timinglee-c7c6f676b-9zqwk 1/1 Running 0 2m41s 10.244.169.139 k8s-node2 <none> <none>
web-7c56dcdb9b-696js 1/1 Running 0 19s 10.244.169.152 k8s-node2 <none> <none>
web-7c56dcdb9b-6px2c 1/1 Running 0 19s 10.244.169.151 k8s-node2 <none> <none>
web-7c56dcdb9b-7njfn 1/1 Running 0 19s 10.244.169.149 k8s-node2 <none> <none>
web-7c56dcdb9b-9pmhz 1/1 Running 0 19s 10.244.169.153 k8s-node2 <none> <none>
web-7c56dcdb9b-qsgjj 1/1 Running 0 19s 10.244.169.150 k8s-node2 <none> <none>
web-7c56dcdb9b-sbjnn 0/1 ContainerCreating 0 19s <none> k8s-node2 <none> <none>
污点容忍示例
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: web
name: web
spec:
replicas: 6
selector:
matchLabels:
app: web
template:
metadata:
labels:
app: web
spec:
containers:
- image: nginx
name: nginxtolerations:
- operator: Exists
[root@k8s-master scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
timinglee-c7c6f676b-6v5l4 1/1 Running 2 (4h12m ago) 33h 10.244.2.69 k8s-node2 <none> <none>
timinglee-c7c6f676b-9zqwk 1/1 Running 0 6m36s 10.244.169.139 k8s-node2 <none> <none>
web-7f64586499-4jz6p 1/1 Running 0 49s 10.244.36.68 k8s-node1 <none> <none>
web-7f64586499-94wqh 1/1 Running 0 49s 10.244.36.70 k8s-node1 <none> <none>
web-7f64586499-dtkhd 1/1 Running 0 49s 10.244.169.155 k8s-node2 <none> <none>
web-7f64586499-fsdg8 1/1 Running 0 49s 10.244.169.156 k8s-node2 <none> <none>
web-7f64586499-jfx84 1/1 Running 0 49s 10.244.235.193 k8s-master <none> <none>
web-7f64586499-thvm6 1/1 Running 0 49s 10.244.36.69 k8s-node1 <none> <none>
tolerations: #容忍effect为Noschedule的污点
- operator: Exists
effect: NoSchedule[root@k8s-master scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
timinglee-c7c6f676b-6v5l4 1/1 Running 2 (4h15m ago) 33h 10.244.2.69 k8s-node2 <none> <none>
timinglee-c7c6f676b-9zqwk 1/1 Running 0 9m57s 10.244.169.139 k8s-node2 <none> <none>
web-55857fd445-9bmwd 1/1 Running 0 14s 10.244.169.162 k8s-node2 <none> <none>
web-55857fd445-9n4qz 1/1 Running 0 14s 10.244.235.199 k8s-master <none> <none>
web-55857fd445-bw7mk 1/1 Running 0 14s 10.244.169.161 k8s-node2 <none> <none>
web-55857fd445-hz9rj 1/1 Running 0 14s 10.244.169.160 k8s-node2 <none> <none>
web-55857fd445-jbl8w 1/1 Running 0 14s 10.244.235.197 k8s-master <none> <none>
web-55857fd445-zc8nn 1/1 Running 0 14s 10.244.235.198 k8s-master <none> <none>
tolerations: #容忍指定kv的NoSchedule污点
- key: nodetype
value: bad
effect: NoSchedule[root@k8s-master scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
timinglee-c7c6f676b-6v5l4 1/1 Running 2 (4h18m ago) 33h 10.244.2.69 k8s-node2 <none> <none>
timinglee-c7c6f676b-9zqwk 1/1 Running 0 12m 10.244.169.139 k8s-node2 <none> <none>
web-d76674b-fvd2c 1/1 Running 0 24s 10.244.169.168 k8s-node2 <none> <none>
web-d76674b-g7xwv 1/1 Running 0 24s 10.244.169.167 k8s-node2 <none> <none>
web-d76674b-kv7mk 1/1 Running 0 24s 10.244.169.165 k8s-node2 <none> <none>
web-d76674b-tnm2h 1/1 Running 0 24s 10.244.169.164 k8s-node2 <none> <none>
web-d76674b-v7fmw 1/1 Running 0 24s 10.244.169.166 k8s-node2 <none> <none>
web-d76674b-zhb8q 1/1 Running 0 24s 10.244.169.163 k8s-node2 <none> <none>
helm
安装helm
[root@k8s-master helm]# tar zxf helm-v3.15.4-linux-amd64.tar.gz
[root@k8s-master helm]# ls
helm-v3.15.4-linux-amd64.tar.gz linux-amd64
[root@k8s-master helm]# cd linux-amd64/
[root@k8s-master linux-amd64]# ls
helm LICENSE README.md
[root@k8s-master linux-amd64]# cp -p helm /usr/local/bin/
配置helm命令补齐
[root@k8s-master linux-amd64]# echo "source <(helm completion bash)" >> ~/.bashrc
[root@k8s-master linux-amd64]# source ~/.bashrc
[root@k8s-master linux-amd64]# helm version
version.BuildInfo{Version:"v3.15.4", GitCommit:"fa9efb07d9d8debbb4306d72af76a383895aa8c4", GitTreeState:"clean", GoVersion:"go1.22.6"}
管理第三方repo源
#添加阿里云仓库
[root@k8s-master helm]# helm repo add aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
"aliyun" has been added to your repositories#添加bitnami仓库
[root@k8s-master helm]# helm repo add bitnami https://charts.bitnami.com/bitnami
"bitnami" has been added to your repositories#查看仓库信息
[root@k8s-master helm]# helm repo list
NAME URL
aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
bitnami https://charts.bitnami.com/bitnami#查看仓库存储helm清单
[root@k8s-master helm]# helm search repo aliyun
NAME CHART VERSION APP VERSION DESCRIPTION #应用名称 封装版本 软件版本 软件描述
aliyun/acs-engine-autoscaler 2.1.3 2.1.1 Scales worker nodes within agent pools
aliyun/aerospike 0.1.7 v3.14.1.2 A Helm chart for Aerospike in Kubernetes#删除第三方存储库
[root@k8s-master helm]# helm repo list
NAME URL
aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
bitnami https://charts.bitnami.com/bitnami[root@k8s-master helm]# helm repo remove aliyun
"aliyun" has been removed from your repositories
[root@k8s-master helm]# helm repo list
NAME URL
bitnami https://charts.bitnami.com/bitnami
helm的使用方法
[root@k8s-master ~]# helm search repo nginx
NAME CHART VERSION APP VERSION DESCRIPTION
bitnami/nginx 18.1.11 1.27.1 NGINX Open Source is a web server that can be a...
bitnami/nginx-ingress-controller 11.4.1 1.11.2 NGINX Ingress Controller is an Ingress controll...
bitnami/nginx-intel 2.1.15 0.4.9 DEPRECATED NGINX Open Source for Intel is a lig...
查看chart信息
[root@k8s-master ~]# helm show chart bitnami/nginx
annotations:
category: Infrastructure
images: |
- name: git
image: docker.io/bitnami/git:2.46.0-debian-12-r0
- name: nginx
image: docker.io/bitnami/nginx:1.27.1-debian-12-r2
- name: nginx-exporter
image: docker.io/bitnami/nginx-exporter:1.3.0-debian-12-r2
licenses: Apache-2.0
apiVersion: v2
appVersion: 1.27.1
dependencies:
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
tags:
- bitnami-common
version: 2.x.x
description: NGINX Open Source is a web server that can be also used as a reverse
proxy, load balancer, and HTTP cache. Recommended for high-demanding sites due to
its ability to provide faster content.
home: https://bitnami.com
icon: https://bitnami.com/assets/stacks/nginx/img/nginx-stack-220x234.png
keywords:
- nginx
- http
- web
- www
- reverse proxy
maintainers:
- name: Broadcom, Inc. All Rights Reserved.
url: https://github.com/bitnami/charts
name: nginx
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/nginx
version: 18.1.11
安装chart 包
[root@k8s-master ~]# helm install bwmis bitnami/nginx
[root@k8s-master ~]# helm list
NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
bw default 1 2024-09-11 20:01:56.854916295 +0800 CST deployed nginx-18.1.11 1.27.1
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
bw-nginx-677d4b9556-g4lkp 0/1 Init:0/1 0 77s
#查看项目的发布状态[root@k8s-master ~]# helm status bw
NAME: bw
LAST DEPLOYED: Wed Sep 11 20:01:56 2024
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
CHART NAME: nginx
CHART VERSION: 18.1.11
APP VERSION: 1.27.1** Please be patient while the chart is being deployed **
NGINX can be accessed through the following DNS name from within your cluster#卸载项目
[root@k8s-master ~]# helm uninstall bw
release "bw" uninstalled
[root@k8s-master ~]# helm list
NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
安装项目前预定义项目选项
[root@k8s-master helm]# ls
helm-v3.15.4-linux-amd64.tar.gz linux-amd64 nginx-18.1.11.tgz
[root@k8s-master helm]# tar zxf nginx-18.1.11.tgz
[root@k8s-master helm]# ls
helm-v3.15.4-linux-amd64.tar.gz linux-amd64 nginx nginx-18.1.11.tgz
[root@k8s-master helm]# cd nginx/
[root@k8s-master nginx]# ls
Chart.lock charts Chart.yaml README.md templates values.schema.json values.yaml
[root@k8s-master nginx]# ls templates/
deployment.yaml _helpers.tpl ingress.yaml pdb.yaml serviceaccount.yaml tls-secret.yaml
extra-list.yaml hpa.yaml networkpolicy.yaml prometheusrules.yaml servicemonitor.yaml
health-ingress.yaml ingress-tls-secret.yaml NOTES.txt server-block-configmap.yaml svc.yaml
[root@k8s-master nginx]# vim values.yaml
global:
imageRegistry: "bwmis.org"[root@k8s-master helm]# docker tag bitnami/nginx:1.27.1-debian-12-r2 bwmis.org/bitnami/nginx:1.27.1-debian-12-r2
[root@k8s-master helm]# docker push bwmis.org/bitnami/nginx:1.27.1-debian-12-r2
The push refers to repository [bwmis.org/bitnami/nginx]
30f5b1069b7f: Pushed
1.27.1-debian-12-r2: digest: sha256:6825a4d52b84873dd08c26d38dccce3d78d4d9f470b7555afdc4edfb4de7e595 size: 529
[root@k8s-master nginx]# helm install bwmis /root/helm/nginx
NAME: bwmis
LAST DEPLOYED: Wed Sep 11 20:16:05 2024
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
CHART NAME: nginx
CHART VERSION: 18.1.11
APP VERSION: 1.27.1
[root@k8s-master nginx]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
bwmis-nginx LoadBalancer 10.108.59.40 172.25.250.16 80:33966/TCP,443:37200/TCP 8s
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 8d
nginx NodePort 10.105.123.31 <none> 80:35659/TCP 35h
nginx-svc ClusterIP None <none> 80/TCP 21h
[root@k8s-master nginx]# kubectl get pods
NAME READY STATUS RESTARTS AGE
bwmis-nginx-d96bdfd97-x4g8c 1/1 Running 0 18s
testpod 1/1 Running 1 (23m ago) 9h
timinglee-c7c6f676b-2wq4d 1/1 Running 1 (23m ago) 11h
timinglee-c7c6f676b-dzl4w 1/1 Running 1 (23m ago) 11h
#更新项目
[root@k8s-master nginx]# vim values.yaml #更新变量文件
624 type: ClusterIP
751 enabled: truehostname: myapp.bwmis.org
ingressClassName: "nginx"
[root@k8s-master nginx]# helm upgrade bwmis .
Release "bwmis" has been upgraded. Happy Helming!
NAME: bwmis
LAST DEPLOYED: Wed Sep 11 20:19:39 2024
NAMESPACE: default
STATUS: deployed
REVISION: 2
TEST SUITE: None
NOTES:
CHART NAME: nginx
CHART VERSION: 18.1.11
APP VERSION: 1.27.1
[root@k8s-master ~]# helm history bwmis
REVISION UPDATED STATUS CHART APP VERSION DESCRIPTION
1 Wed Sep 11 20:16:05 2024 superseded nginx-18.1.11 1.27.1 Install complete
2 Wed Sep 11 20:19:39 2024 deployed nginx-18.1.11 1.27.1 Upgrade complete
[root@k8s-master ~]# helm uninstall bwmis
release "bwmis" uninstalled
构建helm中的chart包
[root@k8s-master helm]# helm create bwmis
Creating bwmis
[root@k8s-master helm]# ls
bwmis helm-push_0.10.4_linux_amd64.tar.gz helm-v3.15.4-linux-amd64.tar.gz linux-amd64 nginx nginx-1.27.1-debian-12-r2.tar nginx-18.1.11.tgz[root@k8s-master helm]# tree bwmis/
bwmis/
├── charts
├── Chart.yaml
├── templates
│ ├── deployment.yaml
│ ├── _helpers.tpl
│ ├── hpa.yaml
│ ├── ingress.yaml
│ ├── NOTES.txt
│ ├── serviceaccount.yaml
│ ├── service.yaml
│ └── tests
│ └── test-connection.yaml
└── values.yaml
apiVersion: v2
name: timinglee
description: A Helm chart for Kubernetes
type: application
version: 0.1.0 #项目版本
appVersion: "v1" #软件版本
[root@k8s-master bwmis]# helm lint .
==> Linting .
[INFO] Chart.yaml: icon is recommended1 chart(s) linted, 0 chart(s) failed
enabled: true
className: "nginx"
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: myapp.bwmis.org
paths:
- path: /
pathType: ImplementationSpecific#语法检测
[root@k8s-master bwmis]# helm lint .
==> Linting .
[INFO] Chart.yaml: icon is recommended1 chart(s) linted, 0 chart(s) failed
#项目打包
[root@k8s-master helm]# helm package bwmis/
Successfully packaged chart and saved it to: /root/helm/bwmis-0.1.0.tgz
[root@k8s-master helm]# ls
bwmis Chart.yaml helm-v3.15.4-linux-amd64.tar.gz nginx nginx-18.1.11.tgz
bwmis-0.1.0.tgz helm-push_0.10.4_linux_amd64.tar.gz linux-amd64
#项目可以通过各种分享方式发方为任何人后部署即可[root@k8s-master helm]# helm install bw bwmis-0.1.0.tgz
NAME: bw
LAST DEPLOYED: Wed Sep 11 20:33:07 2024
NAMESPACE: default
STATUS: deployed
REVISION: 1
NOTES:
1. Get the application URL by running these commands:
http://myapp.bwmis.org/
安装helm push插件
在线安装
root@k8s-master helm]# dnf install git -y
[root@k8s-master helm]# helm plugin install https://github.com/chartmuseum/helm-push离线安装
[root@k8s-master helm]# mkdir ~/.local/share/helm/plugins/helm-push -p
[root@k8s-master helm]# tar zxf helm-push_0.10.4_linux_amd64.tar.gz -C ~/.local/share/helm/plugins/helm-push
[root@k8s-master helm]# ls ~/.local/share/helm/plugins/helm-push
acceptance_tests bin BUILDING.md cmd go.mod go.sum LICENSE Makefile pkg plugin.yaml README.md releases scripts testdata
[root@k8s-master helm]# helm cm-push --help
[root@k8s-master helm]# helm repo add bwm https://bwmis.org/chartrepo/bwmis
"bwm" has been added to your repositories
[root@k8s-master helm]# helm cm-push bwmis-0.1.0.tgz bwm -u admin -p 123
Pushing bwmis-0.1.0.tgz to bwm...
Done.[root@k8s-master helm]# helm uninstall bwm
release "bwm" uninstalled
[root@k8s-master helm]# helm install bwm bwm/bwmis
NAME: bwm
LAST DEPLOYED: Wed Sep 11 21:02:33 2024
NAMESPACE: default
STATUS: deployed
REVISION: 1
NOTES:
1. Get the application URL by running these commands:
http://myapp.bwmis.org/
[root@k8s-master helm]# vim bwmis/Chart.yaml
version: 0.2.0[root@k8s-master helm]# vim bwmis/values.yaml
tag: "v2"[root@k8s-master helm]# helm package bwmis
Successfully packaged chart and saved it to: /root/helm/bwmis-0.2.0.tgz
[root@k8s-master helm]# helm cm-push bwmis-0.2.0.tgz bwm -u admin -p 123
Pushing bwmis-0.2.0.tgz to bwm...
Done.
[root@k8s-master helm]# helm repo update
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "bwm" chart repository[root@k8s-master helm]# helm repo update bwm
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "bwm" chart repository
Update Complete. ⎈Happy Helming!⎈
[root@k8s-master helm]# helm search repo
[root@k8s-master helm]# helm search repo
NAME CHART VERSION APP VERSION DESCRIPTION
bitnami/airflow 19.0.5 2.10.1 Apache Airflow is a tool to express and execute...
bitnami/apache 11.2.17 2.4.62 Apache HTTP Server is an open-source HTTP serve...
bitnami/apisix 3.3.11 3.10.0 Apache APISIX is high-performance, real-time AP...
bitnami/appsmith 4.0.6 1.39.0 Appsmith is an open source platform for buildin...
bitnami/argo-cd 7.0.10 2.12.3 Argo CD is a continuous delivery tool for Kuber...
bitnami/argo-workflows 9.1.14 3.5.10 Argo Workflows is meant to orchestrate Kubernet...
bitnami/aspnet-core 6.2.11 8.0.8 ASP.NET Core is an open-source framework for we...
bitnami/cassandra 12.0.1 5.0.0 Apache Cassandra is an open source distributed ...
bitnami/cert-manager 1.3.18 1.15.3 cert-manager is a Kubernetes add-on to automate...
bitnami/chainloop 1.0.7 0.96.6 C[root@k8s-master helm]# helm search repo bwm -l
NAME CHART VERSION APP VERSION DESCRIPTION
bwm/bwmis 0.2.0 1.16.0 A Helm chart for Kubernetes
bwm/bwmis 0.1.0 1.16.0 A Helm chart for Kubernetes
[root@k8s-master helm]# helm upgrade bwm bwm/bwmis
Release "bwm" has been upgraded. Happy Helming!
NAME: bwm
LAST DEPLOYED: Wed Sep 11 21:12:46 2024
NAMESPACE: default
STATUS: deployed
REVISION: 2
NOTES:
1. Get the application URL by running these commands:
http://myapp.bwmis.org/
#显示项目版本
[root@k8s-master helm]# helm history bwm
REVISION UPDATED STATUS CHART APP VERSION DESCRIPTION
1 Wed Sep 11 21:02:33 2024 superseded bwmis-0.1.0 1.16.0 Install complete
2 Wed Sep 11 21:12:46 2024 deployed bwmis-0.2.0 1.16.0 Upgrade complete#2.应用回滚
[root@k8s-master helm]# helm rollback bwm
Rollback was a success! Happy Helming!
[root@k8s-master helm]# helm history bwm
REVISION UPDATED STATUS CHART APP VERSION DESCRIPTION
1 Wed Sep 11 21:02:33 2024 superseded bwmis-0.1.0 1.16.0 Install complete
2 Wed Sep 11 21:12:46 2024 superseded bwmis-0.2.0 1.16.0 Upgrade complete
3 Wed Sep 11 21:13:42 2024 deployed bwmis-0.1.0 1.16.0 Rollback to 1
imageRegistry: "bwmis.org"
[root@k8s-master Prometheus]# docker tag quay.io/prometheus/prometheus:v2.54.1 bwmis.org/prometheus/prometheus:v2.54.1
[root@k8s-master Prometheus]# docker tag quay.io/thanos/thanos:v0.36.1 bwmis.org/thanos/thanos:v0.36.1
[root@k8s-master Prometheus]# docker tag quay.io/prometheus/alertmanager:v0.27.0 bwmis.org/prometheus/alertmanager:v0.27.0
[root@k8s-master Prometheus]# docker tag quay.io/prometheus-operator/admission-webhook:v0.76.1 bwmis.org/prometheus-operator/admission-webhook:v0.76.1
[root@k8s-master Prometheus]# docker tag quay.io/prometheus-operator/prometheus-operator:v0.76.1 bwmis.org/prometheus-operator/prometheus-operator:v0.76.1
[root@k8s-master Prometheus]# docker tag quay.io/prometheus-operator/prometheus-config-reloader:v0.76.1 bwmis.org/prometheus-operator/prometheus-config-reloader:v0.76.1
[root@k8s-master grafana]# vim values.yaml
[root@k8s-master grafana]# pwd
/root/Prometheus/kube-prometheus-stack/charts/grafanaimageRegistry: "bwmis.org"
#利用helm安装Prometheus
[root@k8s-master kube-prometheus-stack]# kubectl create namespace kube-prometheus-stack#注意,在安装过程中千万别ctrl+c
[root@k8s-master kube-prometheus-stack]# helm -n kube-prometheus-stack install kube-prometheus-stack .
NAME: kube-prometheus-stack
LAST DEPLOYED: Tue Sep 10 20:56:53 2024
NAMESPACE: kube-prometheus-stack
STATUS: deployed
REVISION: 1
NOTES:
kube-prometheus-stack has been installed. Check its status by running:
kubectl --namespace kube-prometheus-stack get pods -l "release=kube-prometheus-stack"Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
#查看所有pod是否运行
[root@k8s-master kube-prometheus-stack]# kubectl --namespace kube-prometheus-stack get pods
NAME READY STATUS RESTARTS AGE
alertmanager-kube-prometheus-stack-alertmanager-0 2/2 Running 0 103s
kube-prometheus-stack-grafana-74ff665878-psdvh 3/3 Running 0 107s
kube-prometheus-stack-kube-state-metrics-7974795889-8kj6j 1/1 Running 0 107s
kube-prometheus-stack-operator-7966d67576-fh6ld 1/1 Running 0 107s
kube-prometheus-stack-prometheus-node-exporter-stzn2 1/1 Running 0 106s
kube-prometheus-stack-prometheus-node-exporter-vrw2g 1/1 Running 0 107s
kube-prometheus-stack-prometheus-node-exporter-zxtdw 1/1 Running 0 106s
prometheus-kube-prometheus-stack-prometheus-0 2/2 Running 0 103s
n[root@k8s-master kube-prometheus-stakubectl -n kube-prometheus-stack get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
alertmanager-operated ClusterIP None <none> 9093/TCP,9094/TCP,9094/UDP 4m53s
kube-prometheus-stack-alertmanager ClusterIP 10.109.199.5 <none> 9093/TCP,8080/TCP 4m55s
kube-prometheus-stack-grafana LoadBalancer 10.104.188.201 172.25.250.16 80:33531/TCP 4m55s
kube-prometheus-stack-kube-state-metrics ClusterIP 10.100.45.79 <none> 8080/TCP 4m55s
kube-prometheus-stack-operator ClusterIP 10.110.211.165 <none> 443/TCP 4m55s
kube-prometheus-stack-prometheus ClusterIP 10.96.244.83 <none> 9090/TCP,8080/TCP 4m55s
kube-prometheus-stack-prometheus-node-exporter ClusterIP 10.102.252.55 <none> 9100/TCP 4m55s
prometheus-operated ClusterIP None <none> 9090/TCP 4m53s
[root@k8s-master helm]# kubectl -n kube-prometheus-stack get secrets kube-prometheus-stack-grafana -o yaml
apiVersion: v1
data:
admin-password: cHJvbS1vcGVyYXRvcg==
admin-user: YWRtaW4=
ldap-toml: ""
kind: Secret
metadata:
annotations:
meta.helm.sh/release-name: kube-prometheus-stack
meta.helm.sh/release-namespace: kube-prometheus-stack
creationTimestamp: "2024-09-10T12:57:03Z"
labels:
app.kubernetes.io/instance: kube-prometheus-stack
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.2.0
helm.sh/chart: grafana-8.5.1
name: kube-prometheus-stack-grafana
namespace: kube-prometheus-stack
resourceVersion: "332682"
uid: fc50f5e2-ebc7-48b0-b3b6-960b8e077d03
type: Opaque#查看密码
[root@k8s-master helm]# echo -n "cHJvbS1vcGVyYXRvcg==" | base64 -d
prom-operator #密码
[root@k8s-master helm]# echo "YWRtaW4=" | base64 -d
admin #用户
[root@k8s-master kube-prometheus-stack]# kubectl -n kube-prometheus-stack edit svc kube-prometheus-stack-prometheus
service/kube-prometheus-stack-prometheus edited
[root@k8s-master kube-prometheus-stack]# kubectl -n kube-prometheus-stack get svc kube-prometheus-stack-prometheus
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-prometheus-stack-prometheus LoadBalancer 10.96.244.83 172.25.250.17 9090:35539/TCP,8080:38305/TCP 7m35s