一、实验步骤
1、部署框架前准备工作
服务器类型 | 部署组件 | ip地址 |
---|---|---|
DR1调度服务器 主(ha01) | Keepalived+LVS-DR | 192.168.86.13 |
DR2调度服务器 备 (ha02) | Keepalived+LVS-DR | 192.168.86.14 |
web1节点服务器 (slave01) | Nginx+Tomcat+MySQL 备+MHA manager+MHA node | 192.168.86.10 |
web2节点服务器 (slave02) | Nginx+Tomcat +MySQL 备+MHA node | 192.168.86.11 |
NFS存储服务器(master01) | MySQL 主+NFS+MHA node | 192.168.86.12 |
vip | 虚拟ip | 192.168.86.100 |
实验要求 通过keepalived的虚拟ip(vip)访问静态或者动态网页,mysql中的master节点能够实现故障自动切换
2、准备环境(关闭防护墙、修改主机名)
[root@localhost ~]# systemctl stop firewalld [root@localhost ~]# setenforce 0 [root@localhost ~]# hostnamectl set-hostname ha01 [root@localhost ~]# hostnamectl set-hostname ha02 [root@localhost ~]# hostnamectl set-hostname slave01 [root@localhost ~]# hostnamectl set-hostname slave02 [root@localhost ~]# hostnamectl set-hostname master01 [root@localhost ~]# su
3、部署LVS-DR
3.1 、配置负载调度器ha01与ha02同时配置(192.168.86.13、192.168.86.14)
[root@ha01 ~]# modprobe ip_vs [root@ha01 ~]# cat /proc/net/ip_vs ## 加载ip_vs模块,并查看版本信息 IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags-> RemoteAddress:Port Forward Weight ActiveConn InActConn [root@ha01 ~]# yum install -y ipvsadm
3.2、配置虚拟ip地址(VIP:192.168.86.100)
[root@ha01 ~]# cd /etc/sysconfig/network-scripts/ [root@ha01 network-scripts]# ls ifcfg-ens33 ifdown-ipv6 ifdown-TeamPort ifup-ippp ifup-routes network-functions ifcfg-lo ifdown-isdn ifdown-tunnel ifup-ipv6 ifup-sit network-functions-ipv6 ifdown ifdown-post ifup ifup-isdn ifup-Team ifdown-bnep ifdown-ppp ifup-aliases ifup-plip ifup-TeamPort ifdown-eth ifdown-routes ifup-bnep ifup-plusb ifup-tunnel ifdown-ib ifdown-sit ifup-eth ifup-post ifup-wireless ifdown-ippp ifdown-Team ifup-ib ifup-ppp init.ipv6-global [root@ha01 network-scripts]# cp ifcfg-ens33 ifcfg-ens33:0 [root@ha01 network-scripts]# vim ifcfg-ens33:0 #配置文件内容 DEVICE=ens33:0 ONBOOT=yes IPADDR=192.168.86.100 NETMASK=255.255.255.255
[root@ha01 network-scripts]# ifup ifcfg-ens33:0 [root@ha01 network-scripts]# ifconfig ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500inet 192.168.86.13 netmask 255.255.255.0 broadcast 20.0.0.255inet6 fe80::fa76:f2fe:47ea:cebf prefixlen 64 scopeid 0x20<link>ether 00:0c:29:1f:fb:2f txqueuelen 1000 (Ethernet)RX packets 21454 bytes 29257870 (27.9 MiB)RX errors 0 dropped 0 overruns 0 frame 0TX packets 9843 bytes 614500 (600.0 KiB)TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 ens33:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500inet 192.168.86.100 netmask 255.255.255.255 broadcast 20.0.0.10ether 00:0c:29:1f:fb:2f txqueuelen 1000 (Ethernet) lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536inet 127.0.0.1 netmask 255.0.0.0inet6 ::1 prefixlen 128 scopeid 0x10<host>loop txqueuelen 1 (Local Loopback)RX packets 386 bytes 34810 (33.9 KiB)RX errors 0 dropped 0 overruns 0 frame 0TX packets 386 bytes 34810 (33.9 KiB)TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 virbr0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500inet 192.168.122.1 netmask 255.255.255.0 broadcast 192.168.122.255ether 52:54:00:7b:ec:da txqueuelen 1000 (Ethernet)RX packets 0 bytes 0 (0.0 B)RX errors 0 dropped 0 overruns 0 frame 0TX packets 0 bytes 0 (0.0 B)TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@ha01 network-scripts]# route add -host 192.168.86.100 dev ens33:0 [root@ha01 network-scripts]# vim /etc/rc.local #配置文件添加内容 /usr/sbin/route add -host 192.168.86.100 dev ens33:0
3.3、配置ARP内核响应参数防止更新VIP中的MAC地址,避免发生冲突
[root@ha01 network-scripts]# vim /etc/sysctl.conf #配置文件内容 #proc响应关闭重定向功能 net.ipv4.ip_forward = 0 net.ipv4.conf.all.send_redirects = 0 net.ipv4.conf.default.send_redirects = 0 net.ipv4.conf.ens33.send_redirects = 0
[root@ha01 network-scripts]# sysctl -p ## 加载配置文件生效 net.ipv4.ip_forward = 0 net.ipv4.conf.all.send_redirects = 0 net.ipv4.conf.default.send_redirects = 0 net.ipv4.conf.ens33.send_redirects = 0
3.4、配置负载均衡分配策略
[root@ha01 network-scripts]# ipvsadm-save > /etc/sysconfig/ipvsadm## 保持策略 [root@ha01 network-scripts]# systemctl start ipvsadm.service ## 开启ipvsadm服务 [root@ha01 network-scripts]# ipvsadm -C ## 情况策略,添加虚拟ip地址,指定负载均衡算法给两台web节点服务器 [root@ha01 network-scripts]# ipvsadm -A -t 192.168.86.100:80 -s rr [root@ha01 network-scripts]# ipvsadm -a -t 192.168.86.100:80 -r 192.168.86.10:80 -g [root@ha01 network-scripts]# ipvsadm -a -t 192.168.86.100:80 -r 192.168.86.11:80 -g [root@ha01 network-scripts]# ipvsadm IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags-> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP ha01:http rr-> 192.168.86.10:http Route 1 0 0 -> 192.168.86.11:http Route 1 0 0
3.5、配置web节点服务器(两台slave同时部署 192.168.86.10 、192.168.86.11)
3.5.1、配置虚拟ip地址(VIP:192.168.86.100)
[root@slave01 ~]# cd /etc/sysconfig/network-scripts/ [root@slave01 network-scripts]# ls ifcfg-ens33 ifdown-ipv6 ifdown-TeamPort ifup-ippp ifup-routes network-functions ifcfg-lo ifdown-isdn ifdown-tunnel ifup-ipv6 ifup-sit network-functions-ipv6 ifdown ifdown-post ifup ifup-isdn ifup-Team ifdown-bnep ifdown-ppp ifup-aliases ifup-plip ifup-TeamPort ifdown-eth ifdown-routes ifup-bnep ifup-plusb ifup-tunnel ifdown-ib ifdown-sit ifup-eth ifup-post ifup-wireless ifdown-ippp ifdown-Team ifup-ib ifup-ppp init.ipv6-global [root@slave01 network-scripts]# cp ifcfg-ens33 ifcfg-lo:0 [root@slave01 network-scripts]# vim ifcfg-lo:0DEVICE=lo:0 ONBOOT=yes IPADDR=192.168.86.100 NETMASK=255.255.255.255
[root@slave01 network-scripts]# ifup ifcfg-lo:0 ## 开启虚拟网卡 [root@slave01 network-scripts]# ifconfig ## 查看网卡
[root@slave01 network-scripts]# route add -host 192.168.86.100 dev lo:0 [root@slave01 network-scripts]# vim /etc/rc.local ## 配置永久添加路由 route add -host 192.168.86.100 dev lo:0
3.5.3、配置ARP内核响应参数防止更新VIP中的MAC地址,避免发生冲突
[root@slave01 network-scripts]# vim /etc/sysctl.conf net.ipv4.conf.lo.arp_ignore = 1 net.ipv4.conf.lo.arp_announce = 2 net.ipv4.conf.all.arp_ignore = 1 net.ipv4.conf.all.arp_announce = 2
[root@slave01 network-scripts]# sysctl -p net.ipv4.conf.lo.arp_ignore = 1 net.ipv4.conf.lo.arp_announce = 2 net.ipv4.conf.all.arp_ignore = 1 net.ipv4.conf.all.arp_announce = 2
3.6、部署NFS存储服务器(NFS共享存储ip地址:192.168.86.12)
[root@master01 ~]# rpm -q rpcbind nfs-utils ## 检查是否有安装nfs rpcbind-0.2.0-42.el7.x86_64 nfs-utils-1.3.0-0.48.el7.x86_64[root@master01 ~]# systemctl start nfs [root@master01 ~]# systemctl start rpcbind ##开启服务 [root@master01 ~]# systemctl enable nfs ## 设置开机自启 Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service. [root@master01 ~]# systemctl enable rpcbind [root@master01 ~]# mkdir /opt/web1 /opt/web2 ## 创建web目录 [root@master01 ~]# echo '<h1>This is node web1</h1>' > /opt/web1/index.html [root@master01 ~]# echo '<h1>This is node web2</h1>' > /opt/web2/index.html ## 添加网页内容
[root@master01 ~]# vim /etc/exports /opt/web1 192.168.86.0/24(ro,sync) /opt/web2 192.168.86.0/24(ro,sync)
[root@master01 ~]# exportfs -rv ## 发布共享 exporting 192.168.86.0/24:/opt/web2 exporting 192.168.86.0/24:/opt/web1
3.7、节点服务器安装web服务(Nginx)并挂载共享目录
3.7.1、安装Nginx
将安装包拖入/opt/目录下
[root@slave01 network-scripts]# cd /opt/ [root@slave01 opt]# ls nginx-1.12.2.tar.gz rh [root@slave01 opt]# systemctl disable firewalld.service Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service. Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service. [root@slave01 opt]# systemctl status firewalld.service
3.7.2、安装依赖包
[root@slave01 opt]# yum -y install pcre-devel zlib-devel gcc gcc-c++ make
3.7.3、创建运行用户、组
[root@slave01 opt]# useradd -M -s /sbin/nologin nginx [root@slave01 opt]# id nginx
[root@slave01 opt]# ls nginx-1.12.0.tar.gz rh [root@slave01 opt]# tar zxvf nginx-1.12.0.tar.gz
3.7.5、安装相关模块
[root@slave01 opt]# ls nginx-1.12.0 nginx-1.12.0.tar.gz rh [root@slave01 opt]# cd nginx-1.12.0/ [root@slave01 nginx-1.12.0]# ls auto CHANGES CHANGES.ru conf configure contrib html LICENSE man README src [root@slave01 nginx-1.12.0]# ./configure \ --prefix=/usr/local/nginx \ --user=nginx \ --group=nginx \ --with-http_stub_status_module
3.7.6、编译安装Nginx
[root@slave01 nginx-1.12.0]# make && make install
3.7.7、将Nginx链接到/user/local/sbin下
[root@slave01 nginx-1.12.0]# cd /usr/local/nginx/sbin/ [root@slave01 sbin]# ls nginx [root@slave01 sbin]# ln -s /usr/local/nginx/sbin/nginx /usr/local/sbin/
3.7.8、启动Nginx服务
[root@slave01 sbin]# nginx -t ## 检查配置文件是否正确 nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful [root@slave01 sbin]# netstat -natp | grep :80 [root@slave01 sbin]# systemctl restart nginx ## 开启nginx服务 [root@slave01 sbin]# netstat -natp | grep :80 ## nginx 与httpd都是80端口不能同时开启服务,如有开启httpd服务需关闭 tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 43551/nginx: master
3.7.9、节点服务器挂载共享目录 (slave1 slave2分别挂载/opt/web1 /opt/web2)
[root@slave01 sbin]# cd [root@slave01 ~]# showmount -e 192.168.86.12 Export list for 192.168.86.12: /opt/web2 192.168.86.0/24 /opt/web1 192.168.86.0/24 [root@slave01 ~]# mount.nfs 192.168.86.12:/opt/web1 /usr/local/nginx/html/ [root@slave01 ~]# cd /usr/local/nginx/html/ [root@slave01 html]# ls index.html [root@slave01 html]# cat index.html <h1>This is node web1</h1>
4、部署Nginx+Tomcat的动静分离
4.1、安装Tomcat作为后端服务器
将tomcat和jdk安装包放入/opt/目录下
[root@slave01 html]# cd /opt/ [root@slave01 opt]# ls apache-tomcat-9.0.16.tar.gz jdk-8u201-linux-x64.rpm nginx-1.12.0 nginx-1.12.0.tar.gz rh [root@slave01 opt]# cd [root@slave01 ~]# vim tomcat.sh #!/bin/bash #安装部署tomcat systemctl stop firewalld systemctl disable firewalld setenforce 0 #安装JDK cd /opt rpm -ivh jdk-8u201-linux-x64.rpm &> /dev/null java -version #设置JDK环境变量 cat > /etc/profile.d/java.sh <<EOF export JAVA_HOME=/usr/java/jdk1.8.0_201-amd64 export CLASSPATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar export PATH=$JAVA_HOME/bin:$PATH EOF source /etc/profile.d/java.sh if [ $? -eq 0 ];then echo -e "\033[34;1m JDK安装完成! \033[0m" fi java -version #安装启动Tomcat cd /opt tar zxvf apache-tomcat-9.0.16.tar.gz &> /dev/null mv apache-tomcat-9.0.16 /usr/local/tomcat ##启动tomcat /usr/local/tomcat/bin/startup.sh if [ $? -eq 0 ];then echo -e "\033[34;1m tomcat安装完成! \033[0m" fi
[root@slave01 ~]# chmod +x tomcat.sh [root@slave01 ~]# ./tomcat.sh
4.2 动静分离Tomcat server配置(192.168.86.10、192.168.86.11)
4.2 配置Tomcat的动态网页显示内容
slave01 192.168.86.10
[root@slave01 opt]# mkdir /usr/local/tomcat/webapps/test ## 创建目录 [root@slave01 opt]# cd /usr/local/tomcat/webapps/ [root@slave01 webapps]# ls docs examples host-manager manager ROOT test [root@slave01 webapps]# cd test/ [root@slave01 test]# vim /usr/local/tomcat/webapps/test/index.jsp <%@ page language="java" import="java.util.*" pageEncoding="UTF-8" %> <html> <head> <title>tomcat1</title> </head> <body> <% out.println("This is tomcat1 server");%> <div>动态页面1</div><br/> </body> </html> [root@slave01 test]# ls index.jsp
slave02 192.168.86.11
[root@slave02 opt]# mkdir /usr/local/tomcat/webapps/test ## 创建目录 [root@slave02 opt]# cd /usr/local/tomcat/webapps/ [root@slave02 webapps]# ls docs examples host-manager manager ROOT test [root@slave02 webapps]# cd test/ [root@slave02 test]# vim /usr/local/tomcat/webapps/test/index.jsp <%@ page language="java" import="java.util.*" pageEncoding="UTF-8" %> <html> <head> <title>tomcat2</title> </head> <body> <% out.println("This is tomcat2 server");%> <div>动态页面2</div><br/> </body> </html> [root@slave02 test]# ls index.jsp
4.2.2、Tomcat实例主配置删除前面的 Host配置,增添新的Host配置
两个web节点服务器配置相同
[root@slave01 test]# cd /usr/local/tomcat/conf/ [root@slave01 conf]# ls catalina.policy context.xml jaspic-providers.xsd server.xml tomcat-users.xsd catalina.properties jaspic-providers.xml logging.properties tomcat-users.xml web.xml [root@slave01 conf]# cp server.xml{,.bak} ## 备份配置文件 [root@slave01 conf]# vim server.xml**删除148-164行配置文件**添加以下字段<Host name="localhost" appBase="webapps"unpackWARs="true" autoDeploy="true" xmlValidation="false" xmlNamespaceAware="false"><Context docBase="/usr/local/tomcat/webapps/test" path="" reloadable="true" /></Host>
[root@slave01 conf]# /usr/local/tomcat/bin/shutdown.sh [root@slave01 conf]# /usr/local/tomcat/bin/startup.sh [root@slave01 conf]# netstat -natp | grep 8080 ## 查看端口是否打开 tcp6 0 0 :::8080 :::* LISTEN 45514/java
4.3 Nginx server 配置(192.168.86.10、192.168.86.11)两者配置相同
[root@slave01 conf]# cd /usr/local/nginx/conf/ [root@slave01 conf]# ls fastcgi.conf koi-utf nginx.conf uwsgi_params fastcgi.conf.default koi-win nginx.conf.default uwsgi_params.default fastcgi_params mime.types scgi_params win-utf fastcgi_params.default mime.types.default scgi_params.default [root@slave01 conf]# cp nginx.conf{,.bak} [root@slave01 conf]# vim nginx.conf ##复制备份修改配置文件upstream tomcat_server {server 192.168.86.10:8080 weight=1;server 192.168.86.11:8080 weight=1;}server_name www.web1.com;charset utf-8;location ~ .*.jsp$ {proxy_pass http://tomcat_server;proxy_set_header HOST $host;proxy_set_header X-Real-IP $remote_addr;proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;}location ~ .*\.(gif|jpg|jpeg|png|bmp|swf|css)$ {root /usr/local/nginx/html;expires 10d;}
[root@slave02 conf]# cd /usr/local/nginx/sbin/ [root@slave01 sbin]# nginx -s reload
4.4、测试Nginx和Tomcat 动静分离 结果
5、 配置keeplived(主(ha01)、备(ha02)DR 服务器都需要配置)
主 DR服务器 ha01:192.168.86.13
[root@ha01 ~]# yum install ipvsadm keepalived -y [root@ha01 ~]# cd /etc/keepalived/ [root@ha01 keepalived]# ls keepalived.conf [root@ha01 keepalived]# cp keepalived.conf{,.bak} [root@ha01 keepalived]# vim keepalived.conf ## 按图修改配置文件
按行数yy+p复制段落
[root@ha01 keepalived]# systemctl start keepalived.service [root@ha01 keepalived]# ip addr show dev ens33
[root@ha01 keepalived]# scp keepalived.conf 192.168.86.14:/etc/keepalived/ ## 注意备ha02调度服务器要先下载 yum install ipvsadm keepalived -y ,在scp传输keepalived.conf文件
备 DR服务器 ha02:192.168.86.14
[root@ha01 ~]# yum install ipvsadm keepalived -y [root@ha02 network-scripts]# cd /etc/keepalived/ [root@ha02 keepalived]# systemctl start keepalived.service [root@ha02 keepalived]# vim keepalived.conf
[root@ha02 keepalived]# systemctl start keepalived.service
5.1、 调整内核 proc 响应参数,关闭linux内核的重定向参数响应
主DR在LVS-DR模式中已经调整了响应参数,现在只需要修改备DR服务器即可
[root@ha02 keepalived]# vim /etc/sysctl.conf net.ipv4.conf.all.send_redirects = 0 net.ipv4.conf.default.send_redirects = 0 net.ipv4.conf.ens33.send_redirects = 0
[root@ha02 keepalived]# sysctl -p
测试keepalived是否能主备切换成功
关闭主DR服务器ha01中的keepaliverd服务: 主DR服务器ha01
[root@ha01 keepalived]# systemctl stop keepalived.service [root@ha01 keepalived]# ip a
虚拟ip已关闭
备DR服务器ha02
在浏览器中测试动静分离页面
6.1、安装MySQL(slave01,slave02,master同时配置)
6.1.1、编译及安装MySQL
将安装包放入opt目录下
[root@master01 opt]# ls boost_1_59_0.tar.gz mysql-5.7.17.tar.gz rh web1 web2 [root@master01 opt]# yum -y install gcc gcc-c++ ncurses ncurses-devel bison cmake ## 安装环境依赖包 [root@master01 opt]# tar zxvf mysql-5.7.17.tar.gz [root@master01 opt]# tar zxvf boost_1_59_0.tar.gz [root@master01 opt]# mv boost_1_59_0 /usr/local/boost [root@master01 opt]# mkdir /usr/local/mysql [root@master01 opt]# cd mysql-5.7.17/ [root@master01 mysql-5.7.17]# ## 先输入cmake \ ,复制下面文字cmake \-DCMAKE_INSTALL_PREFIX=/usr/local/mysql \-DMYSQL_UNIX_ADDR=/usr/local/mysql/mysql.sock \-DSYSCONFDIR=/etc \-DSYSTEMD_PID_DIR=/usr/local/mysql \-DDEFAULT_CHARSET=utf8 \-DDEFAULT_COLLATION=utf8_general_ci \-DWITH_EXTRA_CHARSETS=all \-DWITH_INNOBASE_STORAGE_ENGINE=1 \-DWITH_ARCHIVE_STORAGE_ENGINE=1 \-DWITH_BLACKHOLE_STORAGE_ENGINE=1 \-DWITH_PERFSCHEMA_STORAGE_ENGINE=1 \-DMYSQL_DATADIR=/usr/local/mysql/data \-DWITH_BOOST=/usr/local/boost \ -DWITH_SYSTEMD=1
[root@master01 mysql-5.7.17]# make && make install
6.2、创建MySQL用户
[root@master01 mysql-5.7.17]# cd /usr/local/ [root@master01 local]# useradd -M -s /sbin/nologin mysql ## 创建mysql用户,不建立宿主目录,以及不允许登录
6.3、修改MySQL配置文件
[root@master01 local]# vim /etc/my.cnf ##将原有配置文件数据全部清空,复制下面所有配置信息到my.cnf中 50+dd全部清空 [client] port = 3306 default-character-set=utf8 socket=/usr/local/mysql/mysql.sock [mysql] port = 3306 default-character-set=utf8 socket = /usr/local/mysql/mysql.sock auto-rehash [mysqld] user = mysql basedir=/usr/local/mysql datadir=/usr/local/mysql/data port = 3306 character-set-server=utf8 pid-file = /usr/local/mysql/mysqld.pid socket=/usr/local/mysql/mysql.sock bind-address = 0.0.0.0 skip-name-resolve max_connections=2048 default-storage-engine=INNODB max_allowed_packet=16M server-id = 1sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES,NO_AUTO_CREATE_USER,NO_AUTO_VALUE_ON_ZERO,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,PIPES_AS_CONCAT,ANSI_QUOTES
6.4、更改MySQL安装目录和配置文件的属主属组
[root@master01 local]# chown -R mysql:mysql /usr/local/mysql/ [root@master01 local]# chown mysql:mysql /etc/my.cnf
6.5、设置路径环境变量
[root@master01 local]# echo 'export PATH=/usr/local/mysql/bin:/usr/local/mysql/lib:$PATH' >> /etc/profile [root@master01 local]# source /etc/profile [root@master01 local]# echo $PATH /usr/local/mysql/bin:/usr/local/mysql/lib:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin:/root/bin
6.6、初始化数据库
[root@master01 local]# cd /usr/local/mysql/bin/ [root@master01 bin]# ##复制下面代码写入 ./mysqld \--initialize-insecure \--user=mysql-maseter01 \--basedir=/usr/local/mysql \--datadir=/usr/local/mysql/data
6.7、添加MySQLD系统服务
[root@slave01 bin]# cp /usr/local/mysql/usr/lib/systemd/system/mysqld.service /usr/lib/systemd/system/ [root@slave01 bin]# systemctl daemon-reload [root@slave01 bin]# systemctl start mysqld.service [root@slave01 bin]# systemctl enable mysqld.service ## 重新加载并开启服务,设置开机自启动 Created symlink from /etc/systemd/system/multi-user.target.wants/mysqld.service to /usr/lib/systemd/system/mysqld.service. [root@slave01 bin]# netstat -natp |grep 3306 tcp 0 0 0.0.0.0:3306 0.0.0.0:* LISTEN 90475/mysqld
6.8、修改MySQL 的登录密码
[root@slave01 bin]# mysqladmin -u root -p password "123456" ## 按回车 Enter password: mysqladmin: [Warning] Using a password on the command line interface can be insecure. Warning: Since password will be sent to server in plain text, use ssl connection to ensure password safety.
6.9、授权远程登录
[root@slave01 bin]# mysql -u root -p123456 mysql: [Warning] Using a password on the command line interface can be insecure. Welcome to the MySQL monitor. Commands end with ; or \g. Your MySQL connection id is 6 Server version: 5.7.17 Source distributionCopyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.Oracle is a registered trademark of Oracle Corporation and/or its affiliates. Other names may be trademarks of their respective owners.Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.mysql> CREATE DATABASE mydatabase; Query OK, 1 row affected (0.01 sec)mysql> show databases; +--------------------+ | Database | +--------------------+ | information_schema | | mydatabase | | mysql | | performance_schema | | sys | +--------------------+ 5 rows in set (0.01 sec)mysql> grant all privileges on *.* to 'root'@'%' identified by '123'; ## 允许root用户登录,并给权限 Query OK, 0 rows affected, 1 warning (0.00 sec)mysql> quit Bye
6.10、MySQL集群配置一主双从
在所有服务器上/etc/hosts配置文件中,添加IP与主机名的解析并进行ping测试
[root@slave01 ~]# vim /etc/hosts 192.168.86.10 slave01 192.168.86.11 slave02 192.168.86.12 maseter01
测试是否能ping通
修改 Master01、Slave01、Slave02 节点的 Mysql主配置文件/etc/my.cnf
Master01主数据库
[root@master01 bin]# vim /etc/my.cnf ## 添加 log_bin = master-bin log-slave-updates = true
[root@master01 bin]# systemctl restart mysqld.service ## 重启数据库
slave01备数据库
[root@slave01 ~]# vim /etc/my.cnf ## 修改添加 server-id = 2 log_bin = master-bin relay-log = relay-log-bin relay-log-index = slave-relay-bin.index
[root@slave01 ~]# systemctl restart mysqld.service ##重启
slave02备主数据库
[root@slave02 bin]# vim /etc/my.cnf ##同上修改添加 server-id = 3 log_bin = master-bin relay-log = relay-log-bin relay-log-index = slave-relay-bin.index
[root@slave02 bin]# systemctl restart mysqld.service ##重启
在 Master01、Slave01、Slave02 节点上都创建两个软链接
Master01主数据库
[root@master01 bin]# ln -s /usr/local/mysql/bin/{mysql,mysqlbinlog} /usr/sbin/ [root@master01 bin]# ll /usr/sbin/mysql* lrwxrwxrwx. 1 root root 26 1月 4 09:13 /usr/sbin/mysql -> /usr/local/mysql/bin/mysql lrwxrwxrwx. 1 root root 32 1月 4 09:13 /usr/sbin/mysqlbinlog -> /usr/local/mysql/bin/mysqlbinlog
slave01备数据库
[root@slave01 ~]# ln -s /usr/local/mysql/bin/{mysql,mysqlbinlog} /usr/sbin/ [root@slave01 ~]# ll /usr/sbin/mysql* lrwxrwxrwx. 1 root root 26 1月 4 09:14 /usr/sbin/mysql -> /usr/local/mysql/bin/mysql lrwxrwxrwx. 1 root root 32 1月 4 09:14 /usr/sbin/mysqlbinlog -> /usr/local/mysql/bin/mysqlbinlog
slave02备数据库
[root@slave02 bin]# ln -s /usr/local/mysql/bin/{mysql,mysqlbinlog} /usr/sbin/ [root@slave02 bin]# ll /usr/sbin/mysql* lrwxrwxrwx. 1 root root 26 1月 4 09:14 /usr/sbin/mysql -> /usr/local/mysql/bin/mysql lrwxrwxrwx. 1 root root 32 1月 4 09:14 /usr/sbin/mysqlbinlog -> /usr/local/mysql/bin/mysqlbinlog
所有数据库节点进行 mysql 授权(一主两从),三台都设置一样
[root@master01 bin]# mysql -uroot -p123456 mysql: [Warning] Using a password on the command line interface can be insecure. Welcome to the MySQL monitor. Commands end with ; or \g. Your MySQL connection id is 3 Server version: 5.7.17-log Source distributionCopyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.Oracle is a registered trademark of Oracle Corporation and/or its affiliates. Other names may be trademarks of their respective owners.Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.mysql> grant replication slave on *.* to 'myslave'@'192.168.86.%' identified by '123456'; ## 从数据库同步使用 Query OK, 0 rows affected, 1 warning (0.00 sec)mysql> grant all privileges on *.* to 'mha'@'192.168.86.%' identified by 'manager'; Query OK, 0 rows affected, 1 warning (0.00 sec)mysql> grant all privileges on *.* to 'mha'@'maseter01' identified by 'manager'; ## 防止从库通过主机名连接不上主库 Query OK, 0 rows affected, 2 warnings (0.00 sec)mysql> grant all privileges on *.* to 'mha'@'slave01' identified by 'manager'; Query OK, 0 rows affected, 2 warnings (0.00 sec)mysql> grant all privileges on *.* to 'mha'@'slave02' identified by 'manager'; Query OK, 0 rows affected, 2 warnings (0.00 sec)mysql> flush privileges; ## 刷新权限 Query OK, 0 rows affected (0.00 sec)
** Master01 主数据库查看二进制文件和同步点 **
mysql> show master status; +-------------------+----------+--------------+------------------+-------------------+ | File | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set | +-------------------+----------+--------------+------------------+-------------------+ | master-bin.000001 | 1750 | | | | +-------------------+----------+--------------+------------------+-------------------+ 1 row in set (0.00 sec)mysql>
Slave01、Slave02 服务器执行同步操作,并查看数据同步结果,注意:上方的master状态的数量要与下方的master_log_pos 的写入的数要一致,才能同步
mysql> change master to master_host='192.168.86.12',master_user='myslave',master_password='123456',master_log_file='master-bin.000001',master_log_pos=1750; Query OK, 0 rows affected, 2 warnings (0.01 sec)mysql> start slave; Query OK, 0 rows affected (0.01 sec)mysql> show slave status\G; *************************** 1. row ***************************Slave_IO_State: Waiting for master to send eventMaster_Host: 192.168.86.12Master_User: myslaveMaster_Port: 3306Connect_Retry: 60Master_Log_File: master-bin.000001Read_Master_Log_Pos: 1750Relay_Log_File: relay-log-bin.000002Relay_Log_Pos: 321Relay_Master_Log_File: master-bin.000001Slave_IO_Running: YesSlave_SQL_Running: YesReplicate_Do_DB:
把slave01、slave02两个从库必须设置为只读模式
mysql> set global read_only=1; Query OK, 0 rows affected (0.00 sec)
6.11、 Master 主数据库插入数据,测试是否同步
mysql> create database student; Query OK, 1 row affected (0.01 sec)mysql> use student; Database changed mysql> create table student(id int,name char(5),age int); Query OK, 0 rows affected (0.01 sec)mysql> insert into student values(1,'lmn',22); Query OK, 1 row affected (0.00 sec)mysql> select * from student; +------+------+------+ | id | name | age | +------+------+------+ | 1 | abc | 22 | +------+------+------+ 1 row in set (0.00 sec)mysql>
到slave01、salve02 查看数据是否同步
select * from student.student;
7、部署MHA
先把三台机器,master01、slave01、slave02中的
[root@slave01 mha4mysql-manager-0.57]# vim /etc/my.cnf ## 把utf-8的段落#注释掉
Master01、Slave01、Slave02 所有服务器上都安装 MHA 依赖的环境,首先安装 epel 源
[root@master01 bin]# yum install epel-release --nogpgcheck -y[root@master01 bin]# ## 复制下面代码下载 yum install -y perl-DBD-MySQL \ perl-Config-Tiny \ perl-Log-Dispatch \ perl-Parallel-ForkManager \ perl-ExtUtils-CBuilder \ perl-ExtUtils-MakeMaker \ perl-CPAN
Master01、Slave01、Slave02所有服务器上必须先安装 node 组件
[root@slave01 opt]# tar zxvf mha4mysql-node-0.57.tar.gz [root@slave01 opt]# cd mha4mysql-node-0.57 [root@slave01 mha4mysql-node-0.57]# perl Makefile.PL [root@slave01 mha4mysql-node-0.57]# make && make install
slave01 节点服务器上安装 manager 组件
[root@slave01 mha4mysql-node-0.57]# cd /opt/ [root@slave01 opt]# tar zxvf mha4mysql-manager-0.57.tar.gz [root@slave01 opt]# cd mha4mysql-manager-0.57 [root@slave01 mha4mysql-manager-0.57]# perl Makefile.PL [root@slave01 mha4mysql-manager-0.57]# make && make install
Master01、Slave01、Slave02所有服务器上配置无密码认证
按master01 上配置到数据库节点 slave01 和 slave02 的无密码认证,模板为例三台分别设置,注意manager节点(slave01节点)需要ssh三台SQL机器,其余slave02,master01只需要ssh其它两台机器
[root@master01 mha4mysql-node-0.57]# ssh-keygen -t rsa ##一直回车结束 [root@master01 mha4mysql-node-0.57]# ssh-copy-id 192.168.86.10 ## 输入登录密码即可 [root@master01 mha4mysql-node-0.57]# ssh-copy-id 192.168.86.11 ## 输入登录密码即可 [root@slave01 sbin]# ssh-copy-id 192.168.86.12 ## 输入登录密码即可
slave1:192.168.86.10 上配置 MHA
[root@slave01 mha4mysql-manager-0.57]# cp -rp /opt/mha4mysql-manager-0.57/samples/scripts/ /usr/local/bin/[root@slave01 mha4mysql-manager-0.57]# ll /usr/local/bin/
复制上述的自动切换时 VIP 管理的脚本到 /usr/local/bin 目录,使用master_ip_failover脚本来管理 VIP 和故障切换
[root@slave01 mha4mysql-manager-0.57]# vim /usr/local/bin/master_ip_failover#!/usr/bin/env perl use strict; use warnings FATAL => 'all';use Getopt::Long;my ( $command, $ssh_user, $orig_master_host, $orig_master_ip, $orig_master_port, $new_master_host, $new_master_ip, $new_master_port ); #############################添加内容部分######################################### my $vip = '192.168.86.100'; #指定vip的地址 my $brdc = '192.168.86.255'; #指定vip的广播地址 my $ifdev = 'ens33'; #指定vip绑定的网卡 my $key = '1'; #指定vip绑定的虚拟网卡序列号 my $ssh_start_vip = "/sbin/ifconfig ens33:$key $vip"; #代表此变量值为ifconfig ens33:1 192.168.86.100 my $ssh_stop_vip = "/sbin/ifconfig ens33:$key down"; #代表此变量值为ifconfig ens33:1 192.168.86.100 down my $exit_code = 0; #指定退出状态码为0 #my $ssh_start_vip = "/usr/sbin/ip addr add $vip/24 brd $brdc dev $ifdev label $ifdev:$key;/usr/sbin/arping -q -A -c 1 -I $ifdev $vip;iptables -F;"; #my $ssh_stop_vip = "/usr/sbin/ip addr del $vip/24 dev $ifdev label $ifdev:$key"; ################################################################################## GetOptions( 'command=s' => \$command, 'ssh_user=s' => \$ssh_user, 'orig_master_host=s' => \$orig_master_host, 'orig_master_ip=s' => \$orig_master_ip, 'orig_master_port=i' => \$orig_master_port, 'new_master_host=s' => \$new_master_host, 'new_master_ip=s' => \$new_master_ip, 'new_master_port=i' => \$new_master_port, );exit &main();sub main {print "\n\nIN SCRIPT TEST====$ssh_stop_vip==$ssh_start_vip===\n\n";if ( $command eq "stop" || $command eq "stopssh" ) {my $exit_code = 1; eval { print "Disabling the VIP on old master: $orig_master_host \n"; &stop_vip(); $exit_code = 0; }; if ($@) { warn "Got Error: $@\n"; exit $exit_code; } exit $exit_code; } elsif ( $command eq "start" ) {my $exit_code = 10; eval { print "Enabling the VIP - $vip on the new master - $new_master_host \n"; &start_vip(); $exit_code = 0; }; if ($@) { warn $@; exit $exit_code; } exit $exit_code; } elsif ( $command eq "status" ) { print "Checking the Status of the script.. OK \n"; exit 0; } else { &usage(); exit 1; } } sub start_vip() { `ssh $ssh_user\@$new_master_host \" $ssh_start_vip \"`; } ## A simple system call that disable the VIP on the old_master sub stop_vip() { `ssh $ssh_user\@$orig_master_host \" $ssh_stop_vip \"`; }sub usage { print "Usage: master_ip_failover --command=start|stop|stopssh|status --orig_master_host=host --orig_master_ip=ip --orig_master_port=port --new_master_host=host --new_master_ip=ip --new_master_port=port\n"; }
创建 MHA 软件目录并拷贝配置文件,使用app1.cnf配置文件来管理 mysql 节点服务器
[root@slave01 mha4mysql-manager-0.57]# mkdir /etc/masterha [root@slave01 mha4mysql-manager-0.57]# cp /opt/mha4mysql-manager-0.57/samples/conf/app1.cnf /etc/masterha[root@slave01 mha4mysql-manager-0.57]# vim /etc/masterha/app1.cnf #删除原有内容,直接复制并修改节点服务器的IP地址[server default] manager_log=/var/log/masterha/app1/manager.log manager_workdir=/var/log/masterha/app1 master_binlog_dir=/usr/local/mysql/data master_ip_failover_script=/usr/local/bin/master_ip_failover master_ip_online_change_script=/usr/local/bin/master_ip_online_change password=manager ping_interval=1 remote_workdir=/tmp repl_password=123456 repl_user=myslave secondary_check_script=/usr/local/bin/masterha_secondary_check -s 192.168.86.10 -s 192.168.86.11 shutdown_script="" ssh_user=root user=mha[server1] hostname=192.168.86.12 port=3306[server2] candidate_master=1 check_repl_delay=0 hostname=192.168.86.10 port=3306[server3] hostname=192.168.86.11 port=3306
首次配置需要在 Master 服务器上手动开启虚拟IP
[root@master01 mha4mysql-node-0.57]# /sbin/ifconfig ens33:1 192.168.86.100/24 [root@master01 mha4mysql-node-0.57]# ifconfig
manager 节点上测试 ssh 无密码认证,如果正常最后会输出 successfully
[root@slave01 mha4mysql-manager-0.57]# masterha_check_ssh -conf=/etc/masterha/app1.cnf
manager (slave01)节点上启动 MHA 并查看MHA状态以及日志
[root@slave01 mha4mysql-manager-0.57]# cd /usr/local/bin/ [root@slave01 bin]# ls apply_diff_relay_logs masterha_check_status masterha_master_switch purge_relay_logs filter_mysqlbinlog masterha_conf_host masterha_secondary_check save_binary_logs masterha_check_repl masterha_manager masterha_stop scripts masterha_check_ssh masterha_master_monitor master_ip_failover [root@slave01 bin]# chmod 777 master_ip_failover [root@slave01 bin]# ls apply_diff_relay_logs masterha_check_status masterha_master_switch purge_relay_logs filter_mysqlbinlog masterha_conf_host masterha_secondary_check save_binary_logs masterha_check_repl masterha_manager masterha_stop scripts masterha_check_ssh masterha_master_monitor master_ip_failover[root@slave01 bin]# masterha_check_repl -conf=/etc/masterha/app1.cnf
manager (slave01)节点上启动 MHA 并查看MHA状态以及日志
[root@slave01 bin]# nohup masterha_manager --conf=/etc/masterha/app1.cnf --remove_dead_master_conf --ignore_last_failover < /dev/null > /var/log/masterha/app1/manager.log 2>&1 & [1] 105675 [root@slave01 bin]# masterha_check_status --conf=/etc/masterha/app1.cnf app1 (pid:105675) is running(0:PING_OK), master:192.168.86.12 [root@slave01 bin]# cat /var/log/masterha/app1/manager.log | grep "current master" Thu Jan 4 16:34:16 2024 - [info] Checking SSH publickey authentication settings on the current master.. 192.168.86.12(192.168.86.12:3306) (current master)
8、故障切换测试
Master01 服务器上停止mysql服务
[root@master01 mha4mysql-node-0.57]# systemctl stop mysqld.service [root@master01 mha4mysql-node-0.57]# ifconfig
manager 服务器(slave01)上监控观察日志记录,已切换成功
[root@slave01 ~]# cat /var/log/masterha/app1/manager.log
查看slave01服务器,此时vip漂移到新的master上,已启用备用数据库
[root@slave01 ~]# ifconfig
9、故障修复
修复MySQL
[root@master01 mha4mysql-node-0.57]# systemctl restart mysqld.service
修复主从 #在现主库服务器 slave01查看二进制文件和同步点
[root@slave01 bin]# mysql -uroot -p123456 mysql: [Warning] Using a password on the command line interface can be insecure. Welcome to the MySQL monitor. Commands end with ; or \g. Your MySQL connection id is 3 Server version: 5.7.17-log Source distributionCopyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.Oracle is a registered trademark of Oracle Corporation and/or its affiliates. Other names may be trademarks of their respective owners.Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.mysql> show master status; +-------------------+----------+--------------+------------------+-------------------+ | File | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set | +-------------------+----------+--------------+------------------+-------------------+ | master-bin.000002 | 154 | | | | +-------------------+----------+--------------+------------------+-------------------+ 1 row in set (0.00 sec)
#在原主库服务器 mysql1 执行同步操作
[root@master01 mha4mysql-node-0.57]# mysql -uroot -p123456 mysql: [Warning] Using a password on the command line interface can be insecure. Welcome to the MySQL monitor. Commands end with ; or \g. Your MySQL connection id is 5 Server version: 5.7.17-log Source distributionCopyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.Oracle is a registered trademark of Oracle Corporation and/or its affiliates. Other names may be trademarks of their respective owners.Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.mysql> change master to master_host='192.168.86.10',master_user='myslave',master_password='123456',master_log_file='master-bin.000002',master_log_pos=154; Query OK, 0 rows affected, 2 warnings (0.01 sec)mysql> start slave; Query OK, 0 rows affected (0.00 sec)mysql> show slave status\G *************************** 1. row ***************************Slave_IO_State: Waiting for master to send eventMaster_Host: 192.168.86.10Master_User: myslaveMaster_Port: 3306Connect_Retry: 60Master_Log_File: master-bin.000002Read_Master_Log_Pos: 154Relay_Log_File: master01-relay-bin.000002Relay_Log_Pos: 321Relay_Master_Log_File: master-bin.000002Slave_IO_Running: YesSlave_SQL_Running: Yes
在 manager 节点上修改配置文件app1.cnf(再把这个记录添加进去,因为它检测掉失效时候会自动消失)
[root@slave01 bin]# vim /etc/masterha/app1.cnf [server default] manager_log=/var/log/masterha/app1/manager.log manager_workdir=/var/log/masterha/app1 master_binlog_dir=/usr/local/mysql/data master_ip_failover_script=/usr/local/bin/master_ip_failover master_ip_online_change_script=/usr/local/bin/master_ip_online_change password=manager ping_interval=1 remote_workdir=/tmp repl_password=123456 repl_user=myslave secondary_check_script=/usr/local/bin/masterha_secondary_check -s 192.168.86.10 -s 192.168.86.11 shutdown_script="" ssh_user=root user=mha[server1] hostname=192.168.86.12 port=3306[server2] candidate_master=1 check_repl_delay=0 hostname=192.168.86.10 port=3306[server3] hostname=192.168.86.11 port=3306
在 manager 节点上启动 MHA
[root@slave01 bin]# nohup masterha_manager --conf=/etc/masterha/app1.cnf --remove_dead_master_conf --ignore_last_failover < /dev/null > /var/log/masterha/app1/manager.log 2>&1 & [2] 112914 [root@slave01 bin]# masterha_check_status --conf=/etc/masterha/app1.cnf app1 (pid:108469) is running(0:PING_OK), master:192.168.86.10 [root@slave01 bin]# cat /var/log/masterha/app1/manager.log | grep "current master" Thu Jan 4 17:48:15 2024 - [info] Checking SSH publickey authentication settings on the current master.. 192.168.86.10(192.168.86.10:3306) (current master) [root@slave01 bin]#
ps:目前文档在桌面二阶段考试实验