一、环境准备
服务器要求: 最小硬件配置:2核CPU、4G内存、30G硬盘。 服务器可以访问外网。
软件环境:
操作系统:Anolis OS 7.9
Docker:19.03.9版本
Kubernetes:v1.18.0版本
内核版本:5.4.203-1.el7.elrepo.x86_64
服务器清单:
master:192.168.153.221
node1:192.168.153.222
node2:192.168.153.223
二、linux 内核升级版本
#升级linux内核版本为:5.4.203-1.el7.elrepo.x86_64
#下载必要的RPM包
wget http://mirrors.coreix.net/elrepo-archive-archive/kernel/el7/x86_64/RPMS/kernel-lt-5.4.203-1.el7.elrepo.x86_64.rpm
wget http://mirrors.coreix.net/elrepo-archive-archive/kernel/el7/x86_64/RPMS/kernel-lt-devel-5.4.203-1.el7.elrepo.x86_64.rpm
wget http://mirrors.coreix.net/elrepo-archive-archive/kernel/el7/x86_64/RPMS/kernel-lt-headers-5.4.203-1.el7.elrepo.x86_64.rpm
#安装RPM包
rpm -ivh kernel-lt-5.4.203-1.el7.elrepo.x86_64.rpm kernel-lt-devel-5.4.203-1.el7.elrepo.x86_64.rpm
#列出已安装的内核包
rpm -qa | grep kernel
#查看当前的启动菜单
awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg
#设置默认启动项为当前内核
grub2-set-default 0
#重启计算机,使新内核生效
reboot
#验证当前运行的内核版本
uname -r
#[root@localhost ~]# uname -r
#5.4.203-1.el7.elrepo.x86_64
命令版
wget http://mirrors.coreix.net/elrepo-archive-archive/kernel/el7/x86_64/RPMS/kernel-lt-5.4.203-1.el7.elrepo.x86_64.rpm
wget http://mirrors.coreix.net/elrepo-archive-archive/kernel/el7/x86_64/RPMS/kernel-lt-devel-5.4.203-1.el7.elrepo.x86_64.rpm
wget http://mirrors.coreix.net/elrepo-archive-archive/kernel/el7/x86_64/RPMS/kernel-lt-headers-5.4.203-1.el7.elrepo.x86_64.rpm
rpm -ivh kernel-lt-5.4.203-1.el7.elrepo.x86_64.rpm kernel-lt-devel-5.4.203-1.el7.elrepo.x86_64.rpm
rpm -qa | grep kernel
awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg
grub2-set-default 0
reboot
uname -r
三、环境配置
#(三台都要配置,除了主机名需要设置不同,其他同理)
1.关闭防火墙和selinux
#关闭防火墙
systemctl stop firewalld && systemctl disable firewalld && iptables -F
#关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config && setenforce 02. 关闭swap分区
#临时关闭
swapoff -a
#永久关闭swap
sed -ri 's/.*swap.*/#&/' /etc/fstab3.修改hosts文件,设置主机名
#master
hostnamectl set-hostname master
#node1
hostnamectl set-hostname node1
#node2
hostnamectl set-hostname node2#修改本地hosts文件
sudo cat >> /etc/hosts << EOF
192.168.153.221 master
192.168.153.222 node1
192.168.153.223 node2
EOF4.修改内核参数
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
#重新加载并应用系统中所有的 sysctl 配置文件
sysctl --system5.加载ip_vs内核模块
#如果kube-proxy 模式为ip_vs则必须加载,本文采用iptables
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
#设置下次开机自动加载
cat > /etc/modules-load.d/ip_vs.conf << EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
EOF
命令版 主机名需要设置不同 :
#master
hostnamectl set-hostname master
#node1
hostnamectl set-hostname node1
#node2
hostnamectl set-hostname node2
systemctl stop firewalld && systemctl disable firewalld && iptables -F
sed -i 's/enforcing/disabled/' /etc/selinux/config && setenforce 0
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
sudo cat >> /etc/hosts << EOF
192.168.153.221 master
192.168.153.222 node1
192.168.153.223 node2
EOF
sysctl --system
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
cat > /etc/modules-load.d/ip_vs.conf << EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
EOF
四、安装docker
#移除旧的Docker版本
yum remove -y docker docker-client docker-client-latest docker-ce-cli docker-common docker-latest docker-latest-logrotate docker-logrotate docker-selinux docker-engine-selinux docker-engine
#清理旧缓存并重新生成
yum clean all
yum makecache
# 安装必要的依赖包
yum install -y yum-utils device-mapper-persistent-data lvm2
#下载yum源
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
#安装docker
yum -y install docker-ce-19.03.9-3.el7 docker-ce-cli-19.03.9-3.el7
#编辑docker配置文件
mkdir /etc/docker/
cat > /etc/docker/daemon.json << EOF
{"registry-mirrors": ["https://gqs7xcfd.mirror.aliyuncs.com","https://hub-mirror.c.163.com","https://registry.cn-hangzhou.aliyuncs.com","https://mirror.tencent.com"],"exec-opts": ["native.cgroupdriver=systemd"],"log-driver": "json-file","log-opts": {"max-size": "100m"},"storage-driver": "overlay2"
}
EOF#说明:
#配置 Docker 镜像加速源,以便更快地拉取镜像。
#设置 Docker 使用 systemd 驱动来管理 Cgroup。
#设置日志驱动为 json-file,并限制日志文件大小为 100MB。
#使用 overlay2 作为 Docker 的存储驱动。#启动docker服务
systemctl daemon-reload && systemctl enable docker && systemctl start docker#查看Docker 守护进程的详细状态和配置
docker info
命令版
yum remove -y docker docker-client docker-client-latest docker-ce-cli docker-common docker-latest docker-latest-logrotate docker-logrotate docker-selinux docker-engine-selinux docker-engine
yum clean all
yum makecache
yum install -y yum-utils device-mapper-persistent-data lvm2
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum -y install docker-ce-19.03.9-3.el7 docker-ce-cli-19.03.9-3.el7
mkdir /etc/docker/
cat > /etc/docker/daemon.json << EOF
{"registry-mirrors": ["https://gqs7xcfd.mirror.aliyuncs.com","https://hub-mirror.c.163.com","https://registry.cn-hangzhou.aliyuncs.com","https://mirror.tencent.com"],"exec-opts": ["native.cgroupdriver=systemd"],"log-driver": "json-file","log-opts": {"max-size": "100m"},"storage-driver": "overlay2"
}
EOF
systemctl daemon-reload && systemctl enable docker && systemctl start docker
五、安装kubeadm,kubelet和kubectl
#配置yum源(这里使用阿里云的源)
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF#安装指定版本的kubeadm,kubelet,kubectl
yum install -y kubelet-1.18.8 kubeadm-1.18.8 kubectl-1.18.8#设置开机自启(现在还不能启动,需要初始化之后才行)
systemctl enable kubelet#列出所有版本
yum list kubelet --showduplicates#查看版本
kubeadm version
kubelet --version
kubectl version
命令版
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet-1.18.8 kubeadm-1.18.8 kubectl-1.18.8
systemctl enable kubelet
六、搭建Kubernetes集群
#在master节点初始化
kubeadm init \--kubernetes-version 1.18.8 \--apiserver-advertise-address=192.168.153.221 \--service-cidr=10.96.0.0/16 \--pod-network-cidr=10.244.0.0/16 \--image-repository registry.aliyuncs.com/google_containers #说明:
#Kubernetes 版本为 1.18.8。
#控制平面节点的 API 服务器使用 192.168.153.221 地址。
#服务的虚拟 IP 地址范围为 10.96.0.0/16。
#Pod 的 IP 地址范围为 10.244.0.0/16。
#使用阿里云的镜像仓库来拉取 Kubernetes 所需的容器镜像。#出现以下内容表示初始成功
Your Kubernetes control-plane has initialized successfully!To start using your cluster, you need to run the following as a regular user:mkdir -p $HOME/.kubesudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/configsudo chown $(id -u):$(id -g) $HOME/.kube/configYou should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:https://kubernetes.io/docs/concepts/cluster-administration/addons/Then you can join any number of worker nodes by running the following on each as root:kubeadm join 192.168.153.221:6443 --token u7ooq1.mruphah16xkur4hl \--discovery-token-ca-cert-hash sha256:f5558450483f985f10148b6462ad0f8430870f5229935e022ec28d3f36d4e01b #拷贝k8s认证文件
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config#加入集群(在node节点执行命令)把初始化的最后两行复制过来 kubeadm join
kubeadm join 192.168.153.221:6443 --token u7ooq1.mruphah16xkur4hl \--discovery-token-ca-cert-hash sha256:f5558450483f985f10148b6462ad0f8430870f5229935e022ec28d3f36d4e01b#出现以下内容表示成功:
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.Run 'kubectl get nodes' on the control-plane to see this node join the cluster.#查看节点信息
kubectl get nodes
#[root@localhost ~]# kubectl get nodes
#NAME STATUS ROLES AGE VERSION
#master NotReady master 7m51s v1.18.8
#node1 NotReady <none> 2m16s v1.18.8
#node2 NotReady <none> 2m11s v1.18.8
#可以看到 STATUS状态都是NotReady, 这是因为确实网络插件导致的,等安装好网络插件就好了。#重新生成 kubeadm join 所需的命令(在master节点)。
sudo kubeadm token create --print-join-command#如果node节点报错处理
#设置 bridge-nf-call-iptables
sysctl -w net.bridge.bridge-nf-call-iptables=1
#设置 ip_forward
sysctl -w net.ipv4.ip_forward=1
#重新在node节点执行命令kubeadm join #重新初始化命令步骤#1、重置当前 Kubernetes 配置
sudo kubeadm reset
#2、删除残留文件
sudo rm -rf /etc/kubernetes/
sudo rm -rf ~/.kube/
#3、清理 Docker
sudo docker rm -f $(sudo docker ps -aq)
sudo docker rmi -f $(sudo docker images -aq)
#4、重新初始化
#重新运行 kubeadm init 命令来初始化 Kubernetes 集群
七、安装网络插件
#拉取flannel网络
docker pull quay.io/coreos/flannel:v0.11.0-arm64
##查看仓库是否拉去下来
docker images
#创建目录给flannel做配置文件
mkdir -p /etc/cni/net.d
# 创建CNI(容器网络接口) 配置文件
cat > /etc/cni/net.d/10-flannel.conf << EOF
{"name": "cbr0","cniVersion": "0.3.1","plugins": [{"type": "flannel","delegate": {"hairpinMode": true,"isDefaultGateway": true}},{"type": "portmap","capabilities": {"portMappings": true}}]
}
EOF#为 OCI 容器卸载工具创建目录,用于存放卸载脚本或配置文件。
mkdir /usr/share/oci-umount/oci-umount.d/ -p
#为 Flannel 网络插件创建目录,用于存放运行时的临时文件。
mkdir /run/flannel # 进入CNI 插件的可执行文件目录
cd /opt/cni/bin/
#从官网下载yaml文件
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#更改 "Network" 的网络配置和image: 设置为阿里镜像源。
vim kube-flannel.yml#[root@localhost bin]# grep "Network" /opt/cni/bin/kube-flannel.yml
# "Network": "10.244.0.0/16",
# hostNetwork: true#[root@master bin]# grep image: /opt/cni/bin/kube-flannel.yml
# image: registry.cn-hangzhou.aliyuncs.com/google_containers/flannel-cni-plugin:v1.6.0-flannel1
# image: registry.cn-hangzhou.aliyuncs.com/google_containers/flannel:v0.26.3
# image: docker.io/flannel/flannel:v0.26.3#然后执行yml文件
kubectl apply -f kube-flannel.yml
#[root@localhost bin]# kubectl apply -f kube-flannel.yml
#namespace/kube-flannel created
#clusterrole.rbac.authorization.k8s.io/flannel created
#clusterrolebinding.rbac.authorization.k8s.io/flannel created
#serviceaccount/flannel created
#configmap/kube-flannel-cfg created
#daemonset.apps/kube-flannel-ds created#检查 kube-flannel 网络插件在集群中的运行状态。
kubectl -n kube-flannel get pods#查看flannel网络插件部署结果
kubectl -n kube-system get pods -o wide
#查看所有命名空间的 Pods
kubectl get pods --all-namespaces
#查看节点信息
kubectl get nodes
#检查 Kubernetes 节点的状态
kubectl describe node master
kubectl describe node node1
kubectl describe node node2#如果仍然报错
#解决方法:
#Github 手动下载 cni plugin v0.8.6
#进入CNI 插件的可执行文件目录
cd /opt/cni/bin/
#下载 cni plugin v0.8.6 包
wget https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz
#解压
tar -zxvf cni-plugins-linux-amd64-v0.8.6.tgz#重启以生效
systemctl daemon-reload
systemctl restart kubelet
systemctl restart docker #重新查看节点信息
kubectl get nodes
八、部署Kubernetes 仪表板(Dashboard)
#从 GitHub 下载 Kubernetes Dashboard 项目版本 2.0.4 的部署 YAML
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.4/aio/deploy/recommended.yaml
#进行编辑
vim recommended.yaml
#在spec: 下面添加暴露给外部的服务类型 type: NodePort 在targetPort: 8443 下面添加外网访问的端口 nodePort: 30001
#查看第一个匹配到的 spec: 及其后的7行内容 grep -A 7 'spec:' recommended.yaml | head -n 8
#[root@master ~]# grep -A 7 'spec:' recommended.yaml | head -n 8
spec:type: NodePortports:- port: 443targetPort: 8443nodePort: 30001selector:k8s-app: kubernetes-dashboard#在 image: 后面添加阿里镜像仓库地址
#查看文件 recommended.yaml 中包含 image: 的行,grep 'image:' recommended.yaml#[root@master ~]# grep 'image:' recommended.yamlimage: registry.cn-hangzhou.aliyuncs.com/google_containers/dashboard:v2.0.4image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-scraper:v1.0.4#然后k8s的主节点当中去执行:应用或更新配置的命令
kubectl apply -f recommended.yaml#在浏览器访问
https://192.168.153.221:30001/
#切换键盘为英文
输入 thisisunsafe#查看 kubernetes-dashboard 命名空间下资源状态
kubectl get pods,svc -n kubernetes-dashboard#生成token
#1. 创建 Service Account
kubectl create serviceaccount dashboard-admin-sa -n kubernetes-dashboard
#2. 绑定角色
kubectl create clusterrolebinding dashboard-admin-sa --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:dashboard-admin-sa
#3. 获取 Token
kubectl get secret $(kubectl get sa dashboard-admin-sa -n kubernetes-dashboard -o jsonpath="{.secrets[0].name}") -n kubernetes-dashboard -o go-template="{{.data.token | base64decode}}"
#4. 访问 Dashboard 查看 Dashboard 的 URL。
kubectl get svc -n kubernetes-dashboard
完成