zoukankan      html  css  js  c++  java
  • 1. lvs+keepalived 高可用群集

          一. keepalived 工具介绍

    1.专为lvs 和HA 设计的一款健康检查工具

    2.支持故障自动切换

    3.支持节点健康状态检查

     

    二.  keepalived 实现原理剖析

       keepalived 采用VRRP热备份协议实现linux服务器的多机热备功能。

       VRRP,虚拟路由冗余协议,是针对路由器的一种备份解决方案。由多台路由器组成一个热备组,通过公用虚拟ip地址对外提供服务。每个热备组内同一时刻只有一台主路由器提供服务,其他路由器处于冗余状态。若当前在线的路由器失败,则其他路由器会根据设置的优先级自动接替虚拟ip 地址,继续提供服务。

     

    三 . 搭建lvs+keepalived+DR 高可用负载均衡群集

    环境: centos6.5

    web1服务器   192.168.69.6

    web2 服务器  192.168.69.7

    主负载均衡器:192.168.69.6(在这里主负载均衡器与web1共用同一台服务器,最好单独使用一台服务器)

    从负载均衡器:192.168.69.7(在这里从负载均衡器与web2共用同一台服务器,最好单独使用一台服务器)

    虚拟ip(VIP)为:192.168.69.8

     

    1.搭建web1 web2服务器

    见本博客地址:http://www.cnblogs.com/lzcys8868/p/7856469.html

     搭建好后验证apache服务:

    web1:

    [root@localhost ~]# cd /var/www/html/

    [root@localhost html]# cat index.html
    69.6 页面正常

    web2:

    [root@localhost ~]# cd /var/www/html/

    [root@localhost html]# cat index.html
    69.67页面正常

    浏览器访问web1 web2 两台服务器:  http://IP地址+端口:/index.html

    出现如上页面说明apache服务正常

    2.配置web1  web2 服务器上虚拟ip地址(vip)

    在web1 上操作:

    [root@localhost ~]# cd scripts/

    [root@localhost scripts]# ls
    lvs-dr

    [root@localhost scripts]# pwd
    /root/scripts
    [root@localhost scripts]# cat lvs-dr
    #!/bin/bash
    #lvs-dr

    VIP="192.168.69.8"
    /sbin/ifconfig lo:0 $VIP broadcast $VIP netmask 255.255.255.255
    /sbin/route add -host $VIP dev lo:0
    echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
    echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
    echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
    echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce

     [root@localhost scripts]# chmod +x /root/scripts/lvs-dr

    [root@localhost scripts]# ll
    total 4
    -rwxr-xr-x 1 root root 336 Nov 20 11:08 lvs-dr

    [root@localhost scripts]# sh lvs-dr

    root@localhost scripts]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
        inet 192.168.69.8/32 brd 192.168.69.8 scope global lo:0
        inet6 ::1/128 scope host
           valid_lft forever preferred_lft forever
    2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
        link/ether 30:e1:71:6a:df:6c brd ff:ff:ff:ff:ff:ff
        inet 192.168.69.6/24 brd 192.168.69.255 scope global eth0
        inet 192.168.69.8/32 scope global eth0
        inet6 fe80::32e1:71ff:fe6a:df6c/64 scope link
           valid_lft forever preferred_lft forever
    3: eth1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN qlen 1000
        link/ether 30:e1:71:6a:df:6d brd ff:ff:ff:ff:ff:ff
    [root@localhost scripts]# echo "/root/scripts/lvs-dr" >> /etc/rc.local

    web2上操作同web1,只需要把lvs-dr脚本放到web2上执行即可。

    查看web2:

    [root@localhost ~]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
        inet 192.168.69.8/32 brd 192.168.69.8 scope global lo:0
        inet6 ::1/128 scope host
           valid_lft forever preferred_lft forever
    2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
        link/ether 30:e1:71:70:dd:c4 brd ff:ff:ff:ff:ff:ff
        inet 192.168.69.7/24 brd 192.168.69.255 scope global eth0
        inet6 fe80::32e1:71ff:fe70:ddc4/64 scope link
           valid_lft forever preferred_lft forever

    3.主负载均衡的搭建,在192.168.69.6上

    [root@www ~]# modprobe ip_vs
    [root@www ~]# cat /proc/net/ip_vs
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port Scheduler Flags
      -> RemoteAddress:Port Forward Weight ActiveConn InActConn

    [root@www ~]# rpm -q ipvsadm keepalived
    package ipvsadm is not installed
    package keepalived is not installed

    [root@www ~]# yum -y install ipvsadm keepalived

    [root@localhost ~]# cd /etc/keepalived/
    [root@localhost keepalived]# cp keepalived.conf keepalived.conf.origin

    [root@localhost keepalived]# vim keepalived.conf

     1  1 ! Configuration File for keepalived
     2   2 
     3   3 global_defs {
     4   4   #   notification_email {
     5   5   #   acassen@firewall.loc
     6   6   #   failover@firewall.loc
     7   7   #   sysadmin@firewall.loc
     8   8   # }
     9   9   # notification_email_from Alexandre.Cassen@firewall.loc
    10  10   # smtp_server 192.168.200.1
    11  11    smtp_connect_timeout 30
    12  12    router_id LVS_DEVEL_BLM
    13  13 }
    14  14 
    15  15 vrrp_instance VI_1 {
    16  16     state MASTER
    17  17     interface eth0
    18  18     virtual_router_id 60
    19  19     priority 100
    20  20     advert_int 2
    21  21     authentication {
    22  22         auth_type PASS
    23  23         auth_pass 1111
    24  24     }
    25  25     virtual_ipaddress {
    26  26         192.168.69.8
    27  27     }
    28  28 }
    29  29 
    30  30 virtual_server 192.168.69.8 8000 {    #VIP 端口必须与real_server端口一致
    31  31     delay_loop 2
    32  32     lb_algo rr
    33  33     lb_kind DR  #lvs 采用DR 模式
    34  34 !    nat_mask 255.255.255.0
    35  35 !    persistence_timeout 300
    36  36     protocol TCP
    37  37 
    38  38     real_server 192.168.69.6 8000 {
    39  39         weight 1
    40  40         TCP_CHECK {
    41  41             connect_timeout 10
    42  42             nb_get_retry 3
    43  43             delay_before_retry 3
    44  44             connect_port 8000
    45  45         }
    46  46     }
    47  47 
    48  48     real_server 192.168.69.7 8000 {
    49  49         weight 1
    50  50         TCP_CHECK {
    51  51             connect_timeout 10
    52  52             nb_get_retry 3
    53  53             delay_before_retry 3
    54  54             connect_port 8000
    55  55         }
    56  56     }
    57  57   }

    注:40行,50行中 TCP_CHECK 与大括号之间要有空格,否则,启动keepalived后查看不到两台真实的负载,只显示其中一台web

    [root@localhost keepalived]# /etc/init.d/keepalived start

    [root@localhost keepalived]# ipvsadm -ln
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port Scheduler Flags
      -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
    TCP  192.168.69.8:8000 rr
      -> 192.168.69.6:8000            Local   1      0          0        
      -> 192.168.69.7:8000            Route   1      0          7

    [root@localhost keepalived]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
        inet 192.168.69.8/32 brd 192.168.69.8 scope global lo:0
        inet6 ::1/128 scope host
           valid_lft forever preferred_lft forever
    2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
        link/ether 30:e1:71:6a:df:6c brd ff:ff:ff:ff:ff:ff
        inet 192.168.69.6/24 brd 192.168.69.255 scope global eth0
        inet 192.168.69.8/32 scope global eth0
        inet6 fe80::32e1:71ff:fe6a:df6c/64 scope link
           valid_lft forever preferred_lft forever

     4.从负载均衡器配置

    [root@www ~]# modprobe ip_vs

    [root@www ~]# cat /proc/net/ip_vs

    IP Virtual Server version 1.2.1 (size=4096)

    Prot LocalAddress:Port Scheduler Flags  

    -> RemoteAddress:Port Forward Weight ActiveConn InActConn

    [root@www ~]# rpm -q ipvsadm keepalived

    package ipvsadm is not installed package keepalived is not installed

    [root@www ~]# yum -y install ipvsadm keepalived

    [root@localhost ~]# cat /etc/keepalived/keepalived.conf

     1 ! Configuration File for keepalived
     2 
     3 global_defs {
     4   #   notification_email {
     5   #   acassen@firewall.loc
     6   #   failover@firewall.loc
     7   #   sysadmin@firewall.loc
     8   # }
     9   # notification_email_from Alexandre.Cassen@firewall.loc
    10   # smtp_server 192.168.200.1
    11    smtp_connect_timeout 30
    12    router_id LVS_DEVEL_BLM
    13 }
    14 
    15 vrrp_instance VI_1 {
    16     state BACKUP
    17     interface eth0
    18     virtual_router_id 60
    19     priority 99
    20     advert_int 2
    21     authentication {
    22         auth_type PASS
    23         auth_pass 1111
    24     }
    25     virtual_ipaddress {
    26         192.168.69.8
    27     }
    28 }
    29 
    30 virtual_server 192.168.69.8 8000 {
    31     delay_loop 2
    32     lb_algo rr
    33     lb_kind DR
    34 !    nat_mask 255.255.255.0
    35 !    persistence_timeout 50
    36     protocol TCP
    37 
    38     real_server 192.168.69.6 8000 {
    39         weight 1
    40         TCP_CHECK {    
    41             connect_timeout 10
    42             nb_get_retry 3
    43             delay_before_retry 3
    44             connect_port 8000
    45         }
    46     }
    47 
    48     real_server 192.168.69.7 8000 {
    49         weight 1
    50         TCP_CHECK {    
    51             connect_timeout 10
    52             nb_get_retry 3
    53             delay_before_retry 3
    54             connect_port 8000
    55         }
    56     }
    57 }

    从负载均衡配置与主配置只有如下两点不同:
    1》16行  state  BACKUP

    2》19行  priority 99

    [root@localhost keepalived]# /etc/init.d/keepalived start

    [root@localhost ~]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
        inet 192.168.69.8/32 brd 192.168.69.8 scope global lo:0
        inet6 ::1/128 scope host
           valid_lft forever preferred_lft forever
    2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
        link/ether 30:e1:71:70:dd:c4 brd ff:ff:ff:ff:ff:ff
        inet 192.168.69.7/24 brd 192.168.69.255 scope global eth0
        inet6 fe80::32e1:71ff:fe70:ddc4/64 scope link
           valid_lft forever preferred_lft forever

     [root@localhost ~]# ipvsadm -ln
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port Scheduler Flags
      -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
    TCP  192.168.69.8:8000 rr
      -> 192.168.69.6:8000            Route   1      0          7        
      -> 192.168.69.7:8000            Local   1      0          0

     

    5.浏览器测试vip分发。浏览器输入:http://192.168.69.8:8000/index.html.  刷新浏览器会1:1出现两个页面,因为权重设置的都是1

    6.测试,keepalived 高可用功能

    停掉web1服务器上的keepalived,web2服务器应该是接管VIP,继续分发

    web1上操作:

    [root@localhost ~]# /etc/init.d/keepalived stop

    web2上查看vip:
    [root@localhost ~]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
        inet 192.168.69.8/32 brd 192.168.69.8 scope global lo:0
        inet6 ::1/128 scope host
           valid_lft forever preferred_lft forever
    2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
        link/ether 30:e1:71:70:dd:c4 brd ff:ff:ff:ff:ff:ff
        inet 192.168.69.7/24 brd 192.168.69.255 scope global eth0
        inet 192.168.69.8/32 scope global eth0
        inet6 fe80::32e1:71ff:fe70:ddc4/64 scope link
           valid_lft forever preferred_lft forever

     浏览器继续测试分发功能:

    分发功能正常

    web1启动keepalived后,web1上应该是自动给夺回VIP,测试:

    [root@localhost ~]# /etc/init.d/keepalived start

    [root@localhost ~]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
        inet 192.168.69.8/32 brd 192.168.69.8 scope global lo:0
        inet6 ::1/128 scope host
           valid_lft forever preferred_lft forever
    2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
        link/ether 30:e1:71:6a:df:6c brd ff:ff:ff:ff:ff:ff
        inet 192.168.69.6/24 brd 192.168.69.255 scope global eth0
        inet 192.168.69.8/32 scope global eth0
        inet6 fe80::32e1:71ff:fe6a:df6c/64 scope link
           valid_lft forever preferred_lft forever

    至此 lvs+keepalived  高可用集群搭建完毕

    注:当两台服务器即做reale-server 又做主从负载均衡时,在生产环境测试访问VIP 非常慢,是由于形成了广播风暴。可以停掉从负载均衡,有服务器后再把两台主从负载均衡迁移。

    扩展:

    nat模式与dr模式的区别:

    两种模式都是实现负载均衡lvs的方法,nat模式在包进入的时候在分发器上做了目的地址的mac转换,也就是DNAT,包回去的时候从哪进来的也要从哪里出去,这就造成了nat模式在real server过多的时候造成了数据包在回去的时候都是从一个出口方向,也就造成了瓶颈。DR模式在数据包进入的时候由分发器上把收到的数据包分派给架构下的real server来工作,而数据包在返回的时候没有经过分发器而直接发送给数据包的来源地址,这样就解决了数据包都从分发器上返回数据包的瓶颈,从而解决大量的用户访问。

  • 相关阅读:
    ASP.NET CORE 2.0 模板 (Admin LTE)
    MVC5+EF6 完整教程17--升级到EFCore2.0
    MVC 5 + EF6 完整教程16 -- 控制器详解
    MVC 5 + EF6 完整教程15 -- 使用DI进行解耦
    MVC 5 + EF6 入门完整教程14 -- 动态生成面包屑导航
    MVC5+EF6 入门完整教程13 -- 动态生成多级菜单
    MVC5+EF6 入门完整教程12--灵活控制Action权限
    MVC5+EF6 入门完整教程11--细说MVC中仓储模式的应用
    LeetCode 504. Base 7
    LeetCode 594. Longest Harmonious Subsequence
  • 原文地址:https://www.cnblogs.com/lzcys8868/p/7869484.html
Copyright © 2011-2022 走看看