zoukankan      html  css  js  c++  java
  • 作业一

    要求: 画出实验拓扑图,标准清楚IP/hostname/作用
    (1)实现1主2从GTID主从复制环境
    (2)构建MHA高可用环境 30分
    (3)模拟损坏主库,并实现修复 10分
    (4)实现应用透明(VIP) 20分
    (5)实现外部数据补偿(binlog server) 10分
    (6)基于MHA环境实现Atlas读写分离 20分
    (7)在线增加删除节点管理Atlas 10分

    实验步骤

    1、安装GITD主从复制环境

    1.1 编写配置文件

    1.编写master主机配置文件(172.16.1.51)
    [root@db01 ~]# cat /etc/my.cnf
    [mysqld]
    basedir=/application/mysql
    datadir=/application/mysql/data
    socket=/application/mysql/tmp/mysql.sock
    log-error=/var/log/mysql.log
    log_bin=/data/binlog/mysql-bin
    binlog_format=row
    skip-name-resolve
    server-id=51
    gtid-mode=on
    enforce-gtid-consistency=true
    log-slave-updates=1
    [client]
    socket=/application/mysql/tmp/mysql.sock
    [root@db01 ~]# 
    2.编写slave1主机配置文件(172.16.1.52)
    [root@db02 ~]# cat /etc/my.cnf
    [mysqld]
    basedir=/application/mysql
    datadir=/application/mysql/data
    server_id=52
    socket=/application/mysql/tmp/mysql.sock
    log-error=/var/log/mysql.log
    log_bin=/data/binlog/mysql-bin
    binlog_format=row
    skip-name-resolve
    gtid-mode=on
    enforce-gtid-consistency=true
    log-slave-updates=1
    [client]
    socket=/application/mysql/tmp/mysql.sock
    3.编写slave1主机配置文件(172.16.1.53)
    [root@db03 ~]# vim /etc/my.cnf
    [mysqld]
    basedir=/application/mysql
    datadir=/application/mysql/data
    server_id=53
    socket=/application/mysql/tmp/mysql.sock
    log-error=/var/log/mysql.log
    log_bin=/data/binlog/mysql-bin
    binlog_format=row
    skip-name-resolve
    gtid-mode=on
    enforce-gtid-consistency=true
    log-slave-updates=1
    [client]
    socket=/application/mysql/tmp/mysql.sock
    4.重启数据库
    /etc/init.d/mysqld restart
    

    1.2 配置复制环境

    1.在master主库上配置(51)
    grant replication slave  on *.* to repl@'172.16.1.%' identified by '123';
    2.在从库上配置(5152)
    change master to master_host='172.16.1.51',master_user='repl',master_password='123' ,MASTER_AUTO_POSITION=1;
    3.启动slave
    start slave;
    4.查看从库状态
    show slave statusG
    

    1.3 测试GTID环境

    1.在主库上新建数据库
    create database oldboy;
    2.在从库上查看有没有复制成功
    mysql> show databases;
    +--------------------+
    | Database           |
    +--------------------+
    | information_schema |
    | mysql              |
    | oldboy             |
    | performance_schema |
    | test               |
    +--------------------+
    5 rows in set (0.01 sec)
    

    2.搭建mha环境

    2.1 在/etc/my.cnf配置文件中添加(每个节点都要添加)

    relay_log_purge=0 #保留mysql中relay_log
    每个节点都需要做好解析
    172.16.1.51 db01
    172.16.1.52 db02
    172.16.1.53 db03
    

    2.2 各个节点安装mha node

    mha下载地址:https://github.com/yoshinorim/mha4mysql-manager/releases
    每个点都需要进行安装mha-node
    yum -y install mha4mysql-node-0.58-0.el7.centos.noarch.rpm
    

    2.3 在主库上新建mha管理用户

    grant all privileges on *.* to mha@'172.16.1.%' identified by 'mha';
    

    2.4 配置软件链接

    ln -s /application/mysql/bin/mysqlbinlog /usr/bin/mysqlbinlog
    ln -s /application/mysql/bin/mysql /usr/bin/mysql
    

    2.5 部署manage节点(生产环境一般拿多一台服务器做manage节点,此处使用db03)

    yum -y install mha4mysql-manager-0.58-0.el7.centos.noarch.rpm
    

    2.6 创建manage目录与配置文件

    mkdir -p /etc/mha
    mkdir -p /var/log/mha/app1    ----》可以管理多套主从复制
    vim /etc/mha/app1.cnf 
    [server default]
    manager_log=/var/log/mha/app1/manager
    manager_workdir=/var/log/mha/app1
    master_binlog_dir=/data/binlog
    user=mha
    password=mha
    ping_interval=2
    repl_password=123
    repl_user=repl
    ssh_user=root
    [server1]
    hostname=172.16.1.51
    port=3306
    [server2]
    hostname=172.16.1.52
    port=3306
    [server3]
    hostname=172.16.1.53
    port=3306
    

    2.7配置各节点ssh互信

    ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa >/dev/null 2>&1
    ssh-copy-id -i /root/.ssh/id_dsa.pub root@172.16.1.51
    ssh-copy-id -i /root/.ssh/id_dsa.pub root@172.16.1.52
    ssh-copy-id -i /root/.ssh/id_dsa.pub root@172.16.1.53
    

    2.8 检查互信

    [root@db03 tools]# masterha_check_ssh --conf=/etc/mha/app1.cnf
    Tue Apr 30 20:04:52 2019 - [warning] Global configuration file /etc/masterha_default.cnf not found. Skipping
    .Tue Apr 30 20:04:52 2019 - [info] Reading application default configuration from /etc/mha/app1.cnf..
    Tue Apr 30 20:04:52 2019 - [info] Reading server configuration from /etc/mha/app1.cnf..
    Tue Apr 30 20:04:52 2019 - [info] Starting SSH connection tests..
    Tue Apr 30 20:04:53 2019 - [debug] 
    Tue Apr 30 20:04:52 2019 - [debug]  Connecting via SSH from root@172.16.1.51(172.16.1.51:22) to root@172.16.
    1.52(172.16.1.52:22)..Tue Apr 30 20:04:52 2019 - [debug]   ok.
    Tue Apr 30 20:04:52 2019 - [debug]  Connecting via SSH from root@172.16.1.51(172.16.1.51:22) to root@172.16.
    1.53(172.16.1.53:22)..Tue Apr 30 20:04:53 2019 - [debug]   ok.
    Tue Apr 30 20:04:53 2019 - [debug] 
    Tue Apr 30 20:04:52 2019 - [debug]  Connecting via SSH from root@172.16.1.52(172.16.1.52:22) to root@172.16.
    1.51(172.16.1.51:22)..Tue Apr 30 20:04:53 2019 - [debug]   ok.
    Tue Apr 30 20:04:53 2019 - [debug]  Connecting via SSH from root@172.16.1.52(172.16.1.52:22) to root@172.16.
    1.53(172.16.1.53:22)..Tue Apr 30 20:04:53 2019 - [debug]   ok.
    Tue Apr 30 20:04:53 2019 - [error][/usr/share/perl5/vendor_perl/MHA/SSHCheck.pm, ln63] 
    Tue Apr 30 20:04:53 2019 - [debug]  Connecting via SSH from root@172.16.1.53(172.16.1.53:22) to root@172.16.
    1.51(172.16.1.51:22)..Warning: Permanently added '172.16.1.53' (ECDSA) to the list of known hosts.
    Permission denied (publickey,password).
    Tue Apr 30 20:04:53 2019 - [error][/usr/share/perl5/vendor_perl/MHA/SSHCheck.pm, ln111] SSH connection from 
    root@172.16.1.53(172.16.1.53:22) to root@172.16.1.51(172.16.1.51:22) failed!SSH Configuration Check Failed!
     at /usr/bin/masterha_check_ssh line 44.
    [root@db03 tools]# 
    

    2.9 检查主从

    [root@db03 tools]# masterha_check_ssh --conf=/etc/mha/app1.cnf
    Tue Apr 30 20:04:52 2019 - [warning] Global configuration file /etc/masterha_default.cnf not found. Skipping
    .Tue Apr 30 20:04:52 2019 - [info] Reading application default configuration from /etc/mha/app1.cnf..
    Tue Apr 30 20:04:52 2019 - [info] Reading server configuration from /etc/mha/app1.cnf..
    Tue Apr 30 20:04:52 2019 - [info] Starting SSH connection tests..
    Tue Apr 30 20:04:53 2019 - [debug] 
    Tue Apr 30 20:04:52 2019 - [debug]  Connecting via SSH from root@172.16.1.51(172.16.1.51:22) to root@172.16.
    1.52(172.16.1.52:22)..Tue Apr 30 20:04:52 2019 - [debug]   ok.
    Tue Apr 30 20:04:52 2019 - [debug]  Connecting via SSH from root@172.16.1.51(172.16.1.51:22) to root@172.16.
    1.53(172.16.1.53:22)..Tue Apr 30 20:04:53 2019 - [debug]   ok.
    Tue Apr 30 20:04:53 2019 - [debug] 
    Tue Apr 30 20:04:52 2019 - [debug]  Connecting via SSH from root@172.16.1.52(172.16.1.52:22) to root@172.16.
    1.51(172.16.1.51:22)..Tue Apr 30 20:04:53 2019 - [debug]   ok.
    Tue Apr 30 20:04:53 2019 - [debug]  Connecting via SSH from root@172.16.1.52(172.16.1.52:22) to root@172.16.
    1.53(172.16.1.53:22)..Tue Apr 30 20:04:53 2019 - [debug]   ok.
    Tue Apr 30 20:04:53 2019 - [error][/usr/share/perl5/vendor_perl/MHA/SSHCheck.pm, ln63] 
    Tue Apr 30 20:04:53 2019 - [debug]  Connecting via SSH from root@172.16.1.53(172.16.1.53:22) to root@172.16.
    1.51(172.16.1.51:22)..Warning: Permanently added '172.16.1.53' (ECDSA) to the list of known hosts.
    Permission denied (publickey,password).
    Tue Apr 30 20:04:53 2019 - [error][/usr/share/perl5/vendor_perl/MHA/SSHCheck.pm, ln111] SSH connection from 
    root@172.16.1.53(172.16.1.53:22) to root@172.16.1.51(172.16.1.51:22) failed!SSH Configuration Check Failed!
     at /usr/bin/masterha_check_ssh line 44.
    [root@db03 tools]# masterha_check_repl --conf=/etc/mha/app1.cnf
    Tue Apr 30 20:05:38 2019 - [warning] Global configuration file /etc/masterha_default.cnf not found. Skipping
    .Tue Apr 30 20:05:38 2019 - [info] Reading application default configuration from /etc/mha/app1.cnf..
    Tue Apr 30 20:05:38 2019 - [info] Reading server configuration from /etc/mha/app1.cnf..
    Tue Apr 30 20:05:38 2019 - [info] MHA::MasterMonitor version 0.58.
    Tue Apr 30 20:05:40 2019 - [info] GTID failover mode = 1
    Tue Apr 30 20:05:40 2019 - [info] Dead Servers:
    Tue Apr 30 20:05:40 2019 - [info] Alive Servers:
    Tue Apr 30 20:05:40 2019 - [info]   172.16.1.51(172.16.1.51:3306)
    Tue Apr 30 20:05:40 2019 - [info]   172.16.1.52(172.16.1.52:3306)
    Tue Apr 30 20:05:40 2019 - [info]   172.16.1.53(172.16.1.53:3306)
    Tue Apr 30 20:05:40 2019 - [info] Alive Slaves:
    Tue Apr 30 20:05:40 2019 - [info]   172.16.1.52(172.16.1.52:3306)  Version=5.6.43-log (oldest major version 
    between slaves) log-bin:enabledTue Apr 30 20:05:40 2019 - [info]     GTID ON
    Tue Apr 30 20:05:40 2019 - [info]     Replicating from 172.16.1.51(172.16.1.51:3306)
    Tue Apr 30 20:05:40 2019 - [info]   172.16.1.53(172.16.1.53:3306)  Version=5.6.43-log (oldest major version 
    between slaves) log-bin:enabledTue Apr 30 20:05:40 2019 - [info]     GTID ON
    Tue Apr 30 20:05:40 2019 - [info]     Replicating from 172.16.1.51(172.16.1.51:3306)
    Tue Apr 30 20:05:40 2019 - [info] Current Alive Master: 172.16.1.51(172.16.1.51:3306)
    Tue Apr 30 20:05:40 2019 - [info] Checking slave configurations..
    Tue Apr 30 20:05:40 2019 - [info]  read_only=1 is not set on slave 172.16.1.52(172.16.1.52:3306).
    Tue Apr 30 20:05:40 2019 - [info]  read_only=1 is not set on slave 172.16.1.53(172.16.1.53:3306).
    Tue Apr 30 20:05:40 2019 - [info] Checking replication filtering settings..
    Tue Apr 30 20:05:40 2019 - [info]  binlog_do_db= , binlog_ignore_db= 
    Tue Apr 30 20:05:40 2019 - [info]  Replication filtering check ok.
    Tue Apr 30 20:05:40 2019 - [info] GTID (with auto-pos) is supported. Skipping all SSH and Node package check
    ing.Tue Apr 30 20:05:40 2019 - [info] Checking SSH publickey authentication settings on the current master..
    Tue Apr 30 20:05:40 2019 - [info] HealthCheck: SSH to 172.16.1.51 is reachable.
    Tue Apr 30 20:05:40 2019 - [info] 
    172.16.1.51(172.16.1.51:3306) (current master)
     +--172.16.1.52(172.16.1.52:3306)
     +--172.16.1.53(172.16.1.53:3306)
    
    Tue Apr 30 20:05:40 2019 - [info] Checking replication health on 172.16.1.52..
    Tue Apr 30 20:05:40 2019 - [info]  ok.
    Tue Apr 30 20:05:40 2019 - [info] Checking replication health on 172.16.1.53..
    Tue Apr 30 20:05:40 2019 - [info]  ok.
    Tue Apr 30 20:05:40 2019 - [warning] master_ip_failover_script is not defined.
    Tue Apr 30 20:05:40 2019 - [warning] shutdown_script is not defined.
    Tue Apr 30 20:05:40 2019 - [info] Got exit code 0 (Not master dead).
    
    MySQL Replication Health is OK.
    [root@db03 tools]# 
    

    2.10 启动mha

    nohup masterha_manager --conf=/etc/mha/app1.cnf --remove_dead_master_conf --ignore_last_failover < /dev/null > /var/log/mha/app1/manager.log 2>&1 &
    

    3.故障演练

    3.1 宕掉主库(db01)

    [root@db01 tools]# /etc/init.d/mysqld stop
    Shutting down MySQL...... SUCCESS! 
    

    3.2 查看mha日志

    [root@db03 ~]# cat  /var/log/mha/app1/manager
    ......
     CHANGE MASTER TO MASTER_HOST='172.16.1.52', MASTER_PORT=3306, MASTER_AUTO_POSITION=1, MASTER_USER='repl', MASTER_PASSWORD='xxx'; #此处是恢复故障机的命令
    ......
    ----- Failover Report -----
    
    app1: MySQL Master failover 172.16.1.51(172.16.1.51:3306) to 172.16.1.52(172.16.1.52:3306) succeeded
    
    Master 172.16.1.51(172.16.1.51:3306) is down!
    
    Check MHA Manager logs at db03:/var/log/mha/app1/manager for details.
    
    Started automated(non-interactive) failover.
    Selected 172.16.1.52(172.16.1.52:3306) as a new master.
    172.16.1.52(172.16.1.52:3306): OK: Applying all logs succeeded.
    172.16.1.53(172.16.1.53:3306): OK: Slave started, replicating from 172.16.1.52(172.16.1.52:3306)
    172.16.1.52(172.16.1.52:3306): Resetting slave info succeeded.
    Master failover to 172.16.1.52(172.16.1.52:3306) completed successfully. #此处表示已经成功切换到52这一台机器了
    

    3.3 查看db03

    mysql> show slave statusG
    *************************** 1. row ***************************
                   Slave_IO_State: Waiting for master to send event
                      Master_Host: 172.16.1.52 #已成功切换到db02
                      Master_User: repl
                      Master_Port: 3306
                    Connect_Retry: 60
                  Master_Log_File: mysql-bin.000001
              Read_Master_Log_Pos: 805
                   Relay_Log_File: db03-relay-bin.000002
                    Relay_Log_Pos: 408
            Relay_Master_Log_File: mysql-bin.000001
                 Slave_IO_Running: Yes
                Slave_SQL_Running: Yes
                  Replicate_Do_DB: 
              Replicate_Ignore_DB: 
               Replicate_Do_Table: 
           Replicate_Ignore_Table: 
          Replicate_Wild_Do_Table: 
      Replicate_Wild_Ignore_Table: 
                       Last_Errno: 0
                       Last_Error: 
                     Skip_Counter: 0
              Exec_Master_Log_Pos: 805
                  Relay_Log_Space: 611
                  Until_Condition: None
                   Until_Log_File: 
                    Until_Log_Pos: 0
               Master_SSL_Allowed: No
               Master_SSL_CA_File: 
               Master_SSL_CA_Path: 
                  Master_SSL_Cert: 
                Master_SSL_Cipher: 
                   Master_SSL_Key: 
            Seconds_Behind_Master: 0
    Master_SSL_Verify_Server_Cert: No
                    Last_IO_Errno: 0
                    Last_IO_Error: 
                   Last_SQL_Errno: 0
                   Last_SQL_Error: 
      Replicate_Ignore_Server_Ids: 
                 Master_Server_Id: 52
                      Master_UUID: 901b2d37-6af9-11e9-9c6c-000c29481d4a
                 Master_Info_File: /application/mysql/data/master.info
                        SQL_Delay: 0
              SQL_Remaining_Delay: NULL
          Slave_SQL_Running_State: Slave has read all relay log; waiting for the slave I/O thread to update it
               Master_Retry_Count: 86400
                      Master_Bind: 
          Last_IO_Error_Timestamp: 
         Last_SQL_Error_Timestamp: 
                   Master_SSL_Crl: 
               Master_SSL_Crlpath: 
               Retrieved_Gtid_Set: 
                Executed_Gtid_Set: fac6353b-6a35-11e9-9770-000c29c0e349:1-3
                    Auto_Position: 1
    1 row in set (0.00 sec)
    

    3.3 恢复db01

    1.在db01上,将db01重新加入到主从复制
    [root@db01 tools]# /etc/init.d/mysqld start
    CHANGE MASTER TO MASTER_HOST='172.16.1.52', MASTER_PORT=3306, MASTER_AUTO_POSITION=1, MASTER_USER='repl',MASTER_PASSWORD='123';
    start slave
    2.在mha配置文件中重新加入db01
    [root@db03 tools]# vim /etc/mha/app1.cnf 
    [server default]
    ......
    [server1]
    hostname=172.16.1.51
    port=3306
    ......
    3.启动MHA了manager程序
    masterha_check_ssh  --conf=/etc/mha/app1.cnf 
    masterha_check_ssh  --conf=/etc/mha/app1.cnf 
    nohup masterha_manager --conf=/etc/mha/app1.cnf --remove_dead_master_conf --ignore_last_failover < /dev/null > /var/log/mha/app1/manager.log 2>&1 &
    

    4.使用MHA自带脚本实现IP FailOver(vip 漂移,应用透明)

    4.1 上传脚本

    [root@db03 bin]# cat master_ip_failover 
    #!/usr/bin/env perl
    
    use strict;
    use warnings FATAL => 'all';
    
    use Getopt::Long;
    
    my (
        $command,          $ssh_user,        $orig_master_host, $orig_master_ip,
        $orig_master_port, $new_master_host, $new_master_ip,    $new_master_port
    );
    
    my $vip = '10.0.0.55/24';
    my $key = '1';
    my $ssh_start_vip = "/sbin/ifconfig eth1:$key $vip";
    my $ssh_stop_vip = "/sbin/ifconfig eth1:$key down";
    
    GetOptions(
        'command=s'          => $command,
        'ssh_user=s'         => $ssh_user,
        'orig_master_host=s' => $orig_master_host,
        'orig_master_ip=s'   => $orig_master_ip,
        'orig_master_port=i' => $orig_master_port,
        'new_master_host=s'  => $new_master_host,
        'new_master_ip=s'    => $new_master_ip,
        'new_master_port=i'  => $new_master_port,
    );
    
    exit &main();
    
    sub main {
    
        print "
    
    IN SCRIPT TEST====$ssh_stop_vip==$ssh_start_vip===
    
    ";
    
        if ( $command eq "stop" || $command eq "stopssh" ) {
    
            my $exit_code = 1;
            eval {
                print "Disabling the VIP on old master: $orig_master_host 
    ";
                &stop_vip();
                $exit_code = 0;
            };
            if ($@) {
                warn "Got Error: $@
    ";
                exit $exit_code;
            }
            exit $exit_code;
        }
        elsif ( $command eq "start" ) {
    
            my $exit_code = 10;
            eval {
                print "Enabling the VIP - $vip on the new master - $new_master_host 
    ";
                &start_vip();
                $exit_code = 0;
            };
            if ($@) {
                warn $@;
                exit $exit_code;
            }
            exit $exit_code;
        }
        elsif ( $command eq "status" ) {
            print "Checking the Status of the script.. OK 
    ";
            exit 0;
        }
        else {
            &usage();
            exit 1;
        }
    }
    
    sub start_vip() {
        `ssh $ssh_user@$new_master_host " $ssh_start_vip "`;
    }
    sub stop_vip() {
         return 0  unless  ($ssh_user);
        `ssh $ssh_user@$orig_master_host " $ssh_stop_vip "`;
    }
    
    sub usage {
        print
        "Usage: master_ip_failover --command=start|stop|stopssh|status --orig_master_host=host --orig_master_ip=
    ip --orig_master_port=port --new_master_host=host --new_master_ip=ip --new_master_port=port
    ";
    [root@db03 bin]# dos2unix /usr/local/bin/master_ip_failover
    [root@db03 bin]# chmod +x master_ip_failover
    

    4.2 在mha配置文件中添加

    [root@db03 bin]# vim /etc/mha/app1.cnf 
    [server default]
    ......
    master_ip_failover_script=/usr/local/bin/master_ip_failover
    ......
    

    4.3 修改master_ip_failover配置文件

    my $vip = '172.16.1.100/24';
    my $key = '1';
    my $ssh_start_vip = "/sbin/ifconfig eth1:$key $vip";
    my $ssh_stop_vip = "/sbin/ifconfig eth1:$key down";
    

    4.4 重启mha

    masterha_stop --conf=/etc/mha/app1.cnf
    nohup masterha_manager --conf=/etc/mha/app1.cnf --remove_dead_master_conf --ignore_last_failover < /dev/null > /var/log/mha/app1/manager.log 2>&1 &
    

    4.5 手工在主库上绑定vip,注意一定要和配置文件中的ethN一致,我的是eth1:1(1是key指定的值)

    ifconfig eth1:1 172.16.1.100/24 #如最小化安装没有ifconfig 命令则需要使用yum -y install net-tools安装
        [root@db02 tools]# ip a 
    3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
        link/ether 00:0c:29:48:1d:54 brd ff:ff:ff:ff:ff:ff
        inet 172.16.1.52/24 brd 172.16.1.255 scope global eth1
           valid_lft forever preferred_lft forever
        inet 172.16.1.100/24 brd 172.16.1.255 scope global secondary eth1:1
           valid_lft forever preferred_lft forever
    

    4.6 切换测试

    停止主库
    [root@db02 tools]# /etc/init.d/mysqld stop
    Shutting down MySQL...... SUCCESS! 
    此时发现vip已经在db01上了
    [root@db01 tools]# ip a 
    3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
        link/ether 00:0c:29:c0:e3:53 brd ff:ff:ff:ff:ff:ff
        inet 172.16.1.51/24 brd 172.16.1.255 scope global eth1
           valid_lft forever preferred_lft forever
        inet 172.16.1.100/24 brd 172.16.1.255 scope global secondary eth1:1
           valid_lft forever preferred_lft forever
    

    4.7 恢复db02主从复制关系

    mysql> CHANGE MASTER TO MASTER_HOST='172.16.1.51', MASTER_PORT=3306, MASTER_AUTO_POSITION=1, MASTER_USER='re
    pl',MASTER_PASSWORD='123';
    在mha配置文件中加入
    [server2]
    hostname=172.16.1.52
    port=3306
    启动mha
    

    5.实现外部数据补偿(binlogserver配置)

    生产环境中一般是找一台额外的机器,必须要有5.6以上的版本,支持gtid并开启,我们直接用的是db03

    5.1 在mha配置文件中加入

    [root@db03 bin]# vim /etc/mha/app1.cnf 
    
    [server default]
    manager_log=/var/log/mha/app1/manager
    manager_workdir=/var/log/mha/app1
    master_binlog_dir=/data/binlog
    master_ip_failover_script=/usr/local/bin/master_ip_failover
    password=mha
    ping_interval=2
    repl_password=123
    repl_user=repl
    ssh_user=root
    user=mha
    
    [server1]
    hostname=172.16.1.51
    port=3306
    
    [server2]
    hostname=172.16.1.52
    port=3306
    
    [binlog1]
    no_master=1
    hostname=172.16.1.53
    master_binlog_dir=/data/mysql/binlog #提前创建好,这个目录不能和原有的binlog一致
    mkdir -p /data/mysql/binlog
    chown -R mysql.mysql /data/mysql/*
    

    5.2 拉取主库binlog文件

    cd /data/mysql/binlog  -----》必须进入到自己创建好的目录
    mysqlbinlog  -R --host=172.16.1.51 --user=mha --password=mha --raw  --stop-never mysql-bin.000001 &
    

    5.3 重启mha

    masterha_stop --conf=/etc/mha/app1.cnf
    
    nohup masterha_manager --conf=/etc/mha/app1.cnf --remove_dead_master_conf --ignore_last_failover < /dev/null > /var/log/mha/app1/manager.log 2>&1 &
    

    5.4 测试

    刷新binlog日志在主库上
    mysql> flush logs;
    查看binserver目录
    [root@db03 binlog]# ls
    mysql-bin.000001  mysql-bin.000002  mysql-bin.000003  mysql-bin.000004
    

    6.基于MHA环境实现Atlas读写分离

    6.1 安装Atlas

    下载地址:https://github.com/Qihoo360/Atlas/releases
    rpm -ivh Atlas-2.2.1.el6.x86_64.rpm 
    

    6.2 修改配置文件

    /usr/local/mysql-proxy/bin/encrypt  123  #制作加密密码
    cd /usr/local/mysql-proxy/
    vim /usr/local/mysql-proxy/conf/test.cnf
    [mysql-proxy]
    admin-username = user
    admin-password = pwd
    proxy-backend-addresses = 172.16.1.100:3306 #此处是mha的vip地址
    proxy-read-only-backend-addresses = 172.16.1.52:3306,172.16.1.53:3306
    pwds = repl:3yb5jEku5h4=,mha:O2jBXONX098=
    daemon = true
    keepalive = true
    event-threads = 8
    log-level = message
    log-path = /usr/local/mysql-proxy/log
    sql-log=ON
    proxy-address = 0.0.0.0:33060
    admin-address = 0.0.0.0:2345
    charset=utf8
    

    6.3 启动atlas

    /usr/local/mysql-proxy/bin/mysql-proxyd test start
    

    6.4 测试

    测试读写分离:
    读的测试
    mysql -umha -pmha -h172.16.1.53 -P33060 
    show variables like 'server_id';
    
    写操作测试:
    设置两个从节点只读
    set global read_only=1;
    连接测试
    mysql -umha -pmha -h172.16.1.53 -P33060
    create database db1;
    

    6.5 管理altas

    连接管理接口:
    mysql -uuser -ppwd -h127.0.0.1 -P2345
    打印帮助:
    mysql> select * from help;
    动态添加删除节点:
    REMOVE BACKEND 3;
    添加节点
    ADD SLAVE 172.16.1.53:3306;
    保存到配置文件中
    SAVE CONFIG;
    
  • 相关阅读:
    数据结构与算法之“图”
    数据结构与算法之队列、栈
    数据结构与算法之二叉搜索树
    ue mobile GI
    ue ios 用xcode 断点debug手机 显示call stack的环境搭建 /instrument 显示线程名/stat filestart
    ue 后效里宏的设置
    ue上 sceneColorMobile 在android 和ios上表现不同的问题
    减少ue编译shader的时间
    ue 搭建android/ios联机 debug环境
    对曝光的理解 autoExposure
  • 原文地址:https://www.cnblogs.com/yjiu1990/p/10845706.html
Copyright © 2011-2022 走看看