1.环境规划
192.168.2.31 node01
192.168.2.32 node02
192.168.2.33 node03
2.环境准备
一主两从GTID
https://downloads.mysql.com/archives/get/p/23/file/mysql-8.0.24-1.el8.x86_64.rpm-bundle.tar
https://downloads.mysql.com/archives/get/p/23/file/mysql-8.0.24-1.el7.x86_64.rpm-bundle.tar
https://downloads.mysql.com/archives/get/p/23/file/mysql-8.0.24-linux-glibc2.12-x86_64.tar.xz
https://cdn.mysql.com/archives/mysql-8.0/mysql-boost-8.0.24.tar.gz
https://cdn.mysql.com//Downloads/MySQL-5.7/mysql-5.7.34-1.el7.x86_64.rpm-bundle.tar
https://cdn.mysql.com//Downloads/MySQL-5.7/mysql-5.7.34-linux-glibc2.12-x86_64.tar.gz
https://cdn.mysql.com//Downloads/MySQL-5.7/mysql-boost-5.7.34.tar.gz
二进制安装MySQL 5.7
https://blog.csdn.net/qq_37598011/article/details/93489404
https://blog.csdn.net/zuixinyan/article/details/49562931
基于mysqldump的MySQL 5.7主从复制
https://blog.csdn.net/zuixinyan/article/details/49648207
利用xtrabackupex在线添加slave-MHA
https://blog.csdn.net/zuixinyan/article/details/49847317
MySQL 5.7 MHA(GTID+ROW)部署及failover、online_change实战演练
https://blog.csdn.net/weixin_30588827/article/details/94987911
用MHA实现MySQL自动故障转移(配置vip模式、非keepalive)
https://blog.csdn.net/yabingshi_tech/article/details/47341705
MySQL 5.7一主双从MHA自动切换(VIP方法)
https://blog.csdn.net/cuiruidu3106/article/details/100438928
MySQL 5.7主从复制部署MHA自动故障转移
https://blog.csdn.net/congjiong3047/article/details/100447297
部署MySQL数据库
下载软件并解压
wget https://cdn.mysql.com//Downloads/MySQL-5.7/mysql-5.7.34-linux-glibc2.12-x86_64.tar.gz
tar -zxvf mysql-5.7.34-linux-glibc2.12-x86_64.tar.gz
mv mysql-5.7.34-linux-glibc2.12-x86_64 /usr/local/mysql
创建mysql用户组和用户并修改权限
groupadd mysql && useradd -r -g mysql mysql
创建数据目录并赋予权限
mkdir -p /data/mysql/{data,logs}
chown mysql:mysql -R /data/mysql
Node1配置文件
vim /etc/my.cnf
[mysqld]
bind-address=0.0.0.0
port=3306
user=mysql
socket=/tmp/mysql.sock
basedir=/usr/local/mysql
datadir=/data/mysql/data
log_error=/data/mysql/mysqld.err
pid_file=/data/mysql/mysqld.pid
character_set_server=utf8mb4
symbolic-links=0
explicit_defaults_for_timestamp=true
server_id=1
log_bin=mysql-bin
log_bin-index=mysql-bin.index
relay_bin=relay-bin
relay_bin_index=relay-bin.index
binlog_format=row
gtid_mode=ON
enforce_gtid_consistency=true
rpl_semi_sync_master_enabled=1
plugin_load=rpl_semi_sync_master=semisync_master.so
#Node2配置文件
vim /etc/my.cnf
server_id=2
log_bin=mysql-bin
log_bin-index=mysql-bin.index
relay_bin=relay-bin
relay_bin_index=relay-bin.index
binlog_format=row
log_slave_updates=ON
gtid_mode=ON
enforce_gtid_consistency=true
slave_parallel_type=LOGICAL_CLOCK
slave_parallel_workers=16
master_info_repository=TABLE
relay_log_info_repository=TABLE
relay_log_recovery=ON
rpl_semi_sync_slave_enabled=1
plugin_load=rpl_semi_sync_slave=semisync_slave.so
read_only=1
#Node3配置文件
vim /etc/my.cnf
server_id=3
log_bin=mysql-bin
log_bin-index=mysql-bin.index
relay_bin=relay-bin
relay_bin_index=relay-bin.index
binlog_format=row
gtid_mode=ON
enforce_gtid_consistency=true
slave_parallel_type=LOGICAL_CLOCK
slave_parallel_workers=16
master_info_repository=TABLE
relay_log_info_repository=TABLE
relay_log_recovery=ON
rpl_semi_sync_slave_enabled=1
plugin_load=rpl_semi_sync_slave=semisync_slave.so
read_only=1
安装所需依赖包
yum -y install libaio libaio_devel numactl
初始化数据库
cd /usr/local/mysql/bin/
./mysqld --initialize --user=mysql --defaults-file=/etc/my.cnf --basedir=/usr/local/mysql --datadir=/data/mysql/data
或
mysql_install_db --user=root --datadir=/data/mysql/data
查看密码
cat /data/mysql/mysqld.err
将mysql.server放置到/etc/init.d/mysqld
cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysqld
启动MySQL并更改root密码
systemctl start mysqld
ps -elf | grep -v grep | grep mysql
./mysql -h127.0.0.1 -u root -p
SET PASSWORD=PASSWORD('123456');
ALTER USER root@'localhost' PASSWORD EXPIRE NEVER;
flush privileges;
使用远程连接
update mysql.user set host='%' where user='root';
FLUSH PRIVILEGES;
创建mysql命令软链接
ln -s /usr/local/mysql/bin/mysql /usr/bin
创建复制账号
grant replication client,replication slave on *.* to 'repl'@’192.168.2.%' identified by 'mysql';
备份数据
mysqldump -hlocalhost -uroot -p -S /tmp/mysql.sock --single-transaction --master-data=2 -A > full.sql
将生成文件导入到Node2、Node3
scp full.sql node2:/data/mysql/backup
在Node2、Node3备份导入到db
mysql> source /data/mysql/backup/full.sql
mysql> change master to master_host='192.168.2.31',master_port=3306,master_user='repl',master_password='mysql',master_auto_position=1;
mysql> start slave;
mysql> show slave status G
3.配置关键程序软连接(所有节点)
ln -s /usr/local/mysql/bin/mysqlbinlog /usr/bin/mysqlbinlog
ln -s /usr/local/mysql/bin/mysql /usr/bin/mysql
4.配置各节点互信(密钥对)
Node01:
ssh-keygen -t rsa -P ''
ssh-copy-id -i root@192.168.2.32
ssh-copy-id -i root@192.168.2.33
验证:
Node01:
ssh 192.168.2.32 date
ssh 192.168.2.33 date
Node02:
ssh 192.168.2.31 date
ssh 192.168.2.33 date
Node03:
ssh 192.168.2.31 date
ssh 192.168.2.32 date
5.安装软件
Node软件和Manager软件下载连接:
https://github.com/yoshinorim/mha4mysql-manager/releases/tag/v0.58
https://github.com/yoshinorim/mha4mysql-node/releases/tag/v0.58
所有节点安装Node软件依赖包
yum -y install perl-DBD-MySQL
安装Node软件
rpm -ivh mha4mysql-node-0.58-0.el7.centos.noarch
在主库中创建mha需要的用户
grant all privileges on *.* to mha@'192.168.2.%' identified by 'mysql';
Node03节点安装Manager软件依赖包
yum -y install perl-Config-Tiny epel-release perl-Log-Dispatch perl-Parallel-ForkManager perl-Time-HiRes
安装Manager软件
rpm -ivh mha4mysql-manager-0.58-0.el7.centos.noarch
6.Manager配置文件准备
创建配置文件目录
mkdir -p /etc/mha
创建日志目录
mkdir -p /var/log/mha/app1
编辑mha配置文件
[root@node03 ~]# vim /etc/mha/app1.cnf
[server default]
manager_log=/var/log/mha/app1/manager
manager_workdir=/var/log/mha/app1
master_binlog_dir=/mysql/binlog
user=mha
password=mysql
ping_interval=2
repl_password=mysql
repl_user=repl
ssh_user=mysql
[server1]
hostname=192.168.2.31
port=3306
[server2]
hostname=192.168.2.32
port=3306
[server3]
hostname=192.168.2.33
port=3306
7.状态检查
检查互信:
[root@node03 ~]$ masterha_check_ssh --conf=/etc/mha/app1.cnf
Sat Jul 11 20:02:04 2020 - [warning] Global configuration file /etc/masterha_default.cnf not found. Skipping.
Sat Jul 11 20:02:04 2020 - [info] Reading application default configuration from /etc/mha/app1.cnf..
Sat Jul 11 20:02:04 2020 - [info] Reading server configuration from /etc/mha/app1.cnf..
Sat Jul 11 20:02:04 2020 - [info] Starting SSH connection tests..
Sat Jul 11 20:02:05 2020 - [debug]
Sat Jul 11 20:02:04 2020 - [debug] Connecting via SSH from root@192.168.2.31(192.168.2.31:22) to root@192.168.2.32(192.168.2.32:22)..
Sat Jul 11 20:02:05 2020 - [debug] ok.
Sat Jul 11 20:02:05 2020 - [debug] Connecting via SSH from root@192.168.2.31(192.168.2.31:22) to root@192.168.2.33(192.168.2.33:22)..
Sat Jul 11 20:02:05 2020 - [debug] ok.
Sat Jul 11 20:02:06 2020 - [debug]
Sat Jul 11 20:02:05 2020 - [debug] Connecting via SSH from root@192.168.2.32(192.168.2.32:22) to root@192.168.2.31(192.168.2.31:22)..
Sat Jul 11 20:02:05 2020 - [debug] ok.
Sat Jul 11 20:02:05 2020 - [debug] Connecting via SSH from root@192.168.2.32(192.168.2.32:22) to root@192.168.2.33(192.168.2.33:22)..
Sat Jul 11 20:02:06 2020 - [debug] ok.
Sat Jul 11 20:02:07 2020 - [debug]
Sat Jul 11 20:02:05 2020 - [debug] Connecting via SSH from root@192.168.2.33(192.168.2.33:22) to root@192.168.2.31(192.168.2.31:22)..
Sat Jul 11 20:02:06 2020 - [debug] ok.
Sat Jul 11 20:02:06 2020 - [debug] Connecting via SSH from root@192.168.2.33(192.168.2.33:22) to root@192.168.2.32(192.168.2.32:22)..
Sat Jul 11 20:02:06 2020 - [debug] ok.
Sat Jul 11 20:02:07 2020 - [info] All SSH connection tests passed successfully.
检查主从复制状态:
[root@node03 ~]$ masterha_check_repl --conf=/etc/mha/app1.cnf
Sat Jul 11 20:03:29 2020 - [warning] Global configuration file /etc/masterha_default.cnf not found. Skipping.
Sat Jul 11 20:03:29 2020 - [info] Reading application default configuration from /etc/mha/app1.cnf..
Sat Jul 11 20:03:29 2020 - [info] Reading server configuration from /etc/mha/app1.cnf..
Sat Jul 11 20:03:29 2020 - [info] MHA::MasterMonitor version 0.58.
Sat Jul 11 20:03:30 2020 - [info] GTID failover mode = 1
Sat Jul 11 20:03:30 2020 - [info] Dead Servers:
Sat Jul 11 20:03:30 2020 - [info] Alive Servers:
Sat Jul 11 20:03:30 2020 - [info] 192.168.2.31(192.168.2.31:3306)
Sat Jul 11 20:03:30 2020 - [info] 192.168.2.32(192.168.2.32:3306)
Sat Jul 11 20:03:30 2020 - [info] 192.168.2.33(192.168.2.33:3306)
Sat Jul 11 20:03:30 2020 - [info] Alive Slaves:
Sat Jul 11 20:03:30 2020 - [info] 192.168.12.132(192.168.2.32:3306) Version=5.7.29-log (oldest major version between slaves) log-bin:enabled
Sat Jul 11 20:03:30 2020 - [info] GTID ON
Sat Jul 11 20:03:30 2020 - [info] Replicating from 192.168.2.31(192.168.2.31:3306)
Sat Jul 11 20:03:30 2020 - [info] 192.168.12.133(192.168.2.33:3306) Version=5.7.29-log (oldest major version between slaves) log-bin:enabled
Sat Jul 11 20:03:30 2020 - [info] GTID ON
Sat Jul 11 20:03:30 2020 - [info] Replicating from 192.168.2.31(192.168.2.31:3306)
Sat Jul 11 20:03:30 2020 - [info] Current Alive Master: 192.168.2.31(192.168.2.31:3306)
Sat Jul 11 20:03:30 2020 - [info] Checking slave configurations..
Sat Jul 11 20:03:30 2020 - [info] read_only=1 is not set on slave 192.168.2.32(192.168.2.32:3306).
Sat Jul 11 20:03:30 2020 - [info] read_only=1 is not set on slave 192.168.2.33(192.168.2.33:3306).
Sat Jul 11 20:03:30 2020 - [info] Checking replication filtering settings..
Sat Jul 11 20:03:30 2020 - [info] binlog_do_db= , binlog_ignore_db=
Sat Jul 11 20:03:30 2020 - [info] Replication filtering check ok.
Sat Jul 11 20:03:30 2020 - [info] GTID (with auto-pos) is supported. Skipping all SSH and Node package checking.
Sat Jul 11 20:03:30 2020 - [info] Checking SSH publickey authentication settings on the current master..
Sat Jul 11 20:03:31 2020 - [info] HealthCheck: SSH to 192.168.2.31 is reachable.
Sat Jul 11 20:03:31 2020 - [info]
192.168.2.31(192.168.2.31:3306) (current master)
+--192.168.2.32(192.168.2.32:3306)
+--192.168.2.33(192.168.2.33:3306)
Sat Jul 11 20:03:31 2020 - [info] Checking replication health on 192.168.2.32..
Sat Jul 11 20:03:31 2020 - [info] ok.
Sat Jul 11 20:03:31 2020 - [info] Checking replication health on 192.168.2.33..
Sat Jul 11 20:03:31 2020 - [info] ok.
Sat Jul 11 20:03:31 2020 - [warning] master_ip_failover_script is not defined.
Sat Jul 11 20:03:31 2020 - [warning] shutdown_script is not defined.
Sat Jul 11 20:03:31 2020 - [info] Got exit code 0 (Not master dead).
MySQL Replication Health is OK.
8.开启MHA-manager
[root@node03 ~]# nohup masterha_manager --conf=/etc/mha/app1.cnf --remove_dead_master_conf --ignore_last_failover < /dev/null> /var/log/mha/app1/manager.log 2>&1 &
9.查看MHA状态
[root@node03 ~]# masterha_check_status --conf=/etc/mha/app1.cnf
app1 (pid:1827) is running(0:PING_OK), master:192.168.12.131
转载自:
https://www.cnblogs.com/orcl-2018/p/13285227.html
https://blog.csdn.net/justaiiit/article/details/108783784
https://blog.csdn.net/zuixinyan/article/details/49702769