经验首页 前端设计 程序设计 Java相关 移动开发 数据库/运维 软件/图像 大数据/云计算 其他经验
当前位置:技术经验 » 数据库/运维 » Nginx » 查看文章
Nginx-keepalived+Nginx实现高可用集群别来无恙-
来源:cnblogs  作者:别来无恙-  时间:2019/4/11 8:52:57  对本文有异议

Keepalived+Nginx 高可用集群(主从模式)

集群架构图:

说明:Keepalived机器同样是nginx负载均衡器。

1)实验环境准备(此处都是使用的centos7系统)

  1. # cat /etc/redhat-release
  2. CentOS Linux release 7.4.1708 (Core)

在所有节点上面进行配置

  1. # systemctl stop firewalld //关闭防火墙
  2. # sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/sysconfig/selinux //关闭selinux,重启生效
  3. # setenforce 0         //关闭selinux,临时生效
  4. # ntpdate 0.centos.pool.ntp.org //时间同步
  5. # yum install nginx -y    //安装nginx

2)配置后端web服务器(两台一样)

  1. # echo "`hostname` `ifconfig ens33 |sed -n 's#.*inet \(.*\)netmask.*#\1#p'`" > /usr/share/nginx/html/index.html //准备测试文件,此处是将主机名和ip写到index.html页面中
  1. # vim /etc/nginx/nginx.conf //编辑配置文件
  2. user nginx;
  3. worker_processes auto;
  4. error_log /var/log/nginx/error.log;
  5. pid /run/nginx.pid;
  6. include /usr/share/nginx/modules/*.conf;
  7. events {
  8. worker_connections 1024;
  9. }
  10. http {
  11. log_format main '$remote_addr - $remote_user [$time_local] "$request" '
  12. '$status $body_bytes_sent "$http_referer" '
  13. '"$http_user_agent" "$http_x_forwarded_for"';
  14. access_log /var/log/nginx/access.log main;
  15. sendfile on;
  16. tcp_nopush on;
  17. tcp_nodelay on;
  18. keepalive_timeout 65;
  19. types_hash_max_size 2048;
  20. include /etc/nginx/mime.types;
  21. default_type application/octet-stream;
  22. include /etc/nginx/conf.d/*.conf;
  23. server {
  24. listen 80;
  25. server_name www.mtian.org;
  26. location / {
  27. root /usr/share/nginx/html;
  28. }
  29. access_log /var/log/nginx/access.log main;
  30. }
  31. }
  1. # systemctl start nginx //启动nginx
  2. # systemctl enable nginx //加入开机启动

3)配置LB服务器(两台都一样)

  1. # vim /etc/nginx/nginx.conf
  2. user nginx;
  3. worker_processes auto;
  4. error_log /var/log/nginx/error.log;
  5. pid /run/nginx.pid;
  6. include /usr/share/nginx/modules/*.conf;
  7. events {
  8. worker_connections 1024;
  9. }
  10. http {
  11. log_format main '$remote_addr - $remote_user [$time_local] "$request" '
  12. '$status $body_bytes_sent "$http_referer" '
  13. '"$http_user_agent" "$http_x_forwarded_for"';
  14. access_log /var/log/nginx/access.log main;
  15. sendfile on;
  16. tcp_nopush on;
  17. tcp_nodelay on;
  18. keepalive_timeout 65;
  19. types_hash_max_size 2048;
  20. include /etc/nginx/mime.types;
  21. default_type application/octet-stream;
  22. include /etc/nginx/conf.d/*.conf;
  23. upstream backend {
  24. server 192.168.1.33:80 weight=1 max_fails=3 fail_timeout=20s;
  25. server 192.168.1.34:80 weight=1 max_fails=3 fail_timeout=20s;
  26. }
  27. server {
  28. listen 80;
  29. server_name www.mtian.org;
  30. location / {
  31. proxy_pass http://backend;
  32. proxy_set_header Host $host:$proxy_port;
  33. proxy_set_header X-Forwarded-For $remote_addr;
  34. }
  35. }
  36. }
  1. # systemctl start nginx //启动nginx
  2. # systemctl enable nginx //加入开机自启动

4)在测试机(192.168.1.35)上面添加host解析,并测试lb集群是否正常。(测试机任意都可以,只要能访问lb节点。)

  1. [root@node01 ~]# cat /etc/hosts
  2. 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
  3. ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
  4. 192.168.1.32 www.mtian.org
  5. 192.168.1.31 www.mtian.org
  1. // 测试时候轮流关闭lb1 和 lb2 节点,关闭后还是能够访问并看到轮循效果即表示 nginx lb集群搭建成功。
  2. [root@node01 ~]# curl www.mtian.org
  3. web01 192.168.1.33
  4. [root@node01 ~]# curl www.mtian.org
  5. web02 192.168.1.34
  6. [root@node01 ~]# curl www.mtian.org
  7. web01 192.168.1.33
  8. [root@node01 ~]# curl www.mtian.org
  9. web02 192.168.1.34
  10. [root@node01 ~]# curl www.mtian.org
  11. web01 192.168.1.33
  12. [root@node01 ~]# curl www.mtian.org
  13. web02 192.168.1.34

5)上面步骤成功后,开始搭建keepalived,在两台 lb节点上面安装keepalived(也可以源码编译安装、此处直接使用yum安装)

  1. # yum install keepalived -y

6)配置 LB-01节点

  1. [root@LB-01 ~]# vim /etc/keepalived/keepalived.conf
  2. ! Configuration File for keepalived
  3. global_defs {
  4. notification_email {
  5. 381347268@qq.com
  6. }
  7. smtp_server 192.168.200.1
  8. smtp_connect_timeout 30
  9. router_id LVS_DEVEL
  10. }
  11. vrrp_instance VI_1 {
  12. state MASTER
  13. interface ens33
  14. virtual_router_id 51
  15. priority 150
  16. advert_int 1
  17. authentication {
  18. auth_type PASS
  19. auth_pass 1111
  20. }
  21. virtual_ipaddress {
  22. 192.168.1.110/24 dev ens33 label ens33:1
  23. }
  24. }
  1. [root@LB-01 ~]# systemctl start keepalived //启动keepalived
  2. [root@LB-01 ~]# systemctl enable keepalived //加入开机自启动
  1. [root@LB-01 ~]# ip a //查看IP,会发现多出了VIP 192.168.1.110
  2. ......
  3. 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
  4. link/ether 00:0c:29:94:17:44 brd ff:ff:ff:ff:ff:ff
  5. inet 192.168.1.31/24 brd 192.168.1.255 scope global ens33
  6. valid_lft forever preferred_lft forever
  7. inet 192.168.1.110/24 scope global secondary ens33:1
  8. valid_lft forever preferred_lft forever
  9. inet6 fe80::20c:29ff:fe94:1744/64 scope link
  10. valid_lft forever preferred_lft forever
  11. ......

7)配置 LB-02节点

  1. [root@LB-02 ~]# vim /etc/keepalived/keepalived.conf
  2. ! Configuration File for keepalived
  3. global_defs {
  4. notification_email {
  5. 381347268@qq.com
  6. }
  7. smtp_server 192.168.200.1
  8. smtp_connect_timeout 30
  9. router_id LVS_DEVEL
  10. }
  11. vrrp_instance VI_1 {
  12. state BACKUP
  13. interface ens33
  14. virtual_router_id 51
  15. priority 100
  16. advert_int 1
  17. authentication {
  18. auth_type PASS
  19. auth_pass 1111
  20. }
  21. virtual_ipaddress {
  22. 192.168.1.110/24 dev ens33 label ens33:1
  23. }
  24. }
  1. [root@LB-02 ~]# systemctl start keepalived //启动keepalived
  2. [root@LB-02 ~]# systemctl enable keepalived //加入开机自启动
  1. [root@LB-02 ~]# ifconfig //查看IP,此时备节点不会有VIP(只有当主挂了的时候,VIP才会飘到备节点)
  2. ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
  3. inet 192.168.1.32 netmask 255.255.255.0 broadcast 192.168.1.255
  4. inet6 fe80::20c:29ff:feab:6532 prefixlen 64 scopeid 0x20<link>
  5. ether 00:0c:29:ab:65:32 txqueuelen 1000 (Ethernet)
  6. RX packets 43752 bytes 17739987 (16.9 MiB)
  7. RX errors 0 dropped 0 overruns 0 frame 0
  8. TX packets 4177 bytes 415805 (406.0 KiB)
  9. TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
  10. ......

8)在测试机器上面访问 Keepalived上面配置的VIP 192.168.1.110

  1. [root@node01 ~]# curl 192.168.1.110
  2. web01 192.168.1.33
  3. [root@node01 ~]# curl 192.168.1.110
  4. web02 192.168.1.34
  5. [root@node01 ~]# curl 192.168.1.110
  6. web01 192.168.1.33
  7. [root@node01 ~]# curl 192.168.1.110
  8. web02 192.168.1.34
  1. //关闭LB-01 节点上面keepalived主节点。再次访问
  2. [root@LB-01 ~]# systemctl stop keepalived
  3. [root@node01 ~]#
  4. [root@node01 ~]# curl 192.168.1.110
  5. web01 192.168.1.33
  6. [root@node01 ~]# curl 192.168.1.110
  7. web02 192.168.1.34
  8. [root@node01 ~]# curl 192.168.1.110
  9. web01 192.168.1.33
  10. [root@node01 ~]# curl 192.168.1.110
  11. web02 192.168.1.34
  1. //此时查看LB-01 主节点上面的IP ,发现已经没有了 VIP
  2. [root@LB-01 ~]# ifconfig
  3. ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
  4. inet 192.168.1.31 netmask 255.255.255.0 broadcast 192.168.1.255
  5. inet6 fe80::20c:29ff:fe94:1744 prefixlen 64 scopeid 0x20<link>
  6. ether 00:0c:29:94:17:44 txqueuelen 1000 (Ethernet)
  7. RX packets 46813 bytes 18033403 (17.1 MiB)
  8. RX errors 0 dropped 0 overruns 0 frame 0
  9. TX packets 9350 bytes 1040882 (1016.4 KiB)
  10. TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
  11. ...
  1. //查看LB-02 备节点上面的IP,发现 VIP已经成功飘过来了
  2. [root@LB-02 ~]# ifconfig
  3. ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
  4. inet 192.168.1.32 netmask 255.255.255.0 broadcast 192.168.1.255
  5. inet6 fe80::20c:29ff:feab:6532 prefixlen 64 scopeid 0x20<link>
  6. ether 00:0c:29:ab:65:32 txqueuelen 1000 (Ethernet)
  7. RX packets 44023 bytes 17760070 (16.9 MiB)
  8. RX errors 0 dropped 0 overruns 0 frame 0
  9. TX packets 4333 bytes 430037 (419.9 KiB)
  10. TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
  11. ens33:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
  12. inet 192.168.1.110 netmask 255.255.255.0 broadcast 0.0.0.0
  13. ether 00:0c:29:ab:65:32 txqueuelen 1000 (Ethernet)
  14. ...

到此,Keepalived+Nginx高可用集群(主从)就搭建完成了。

Keepalived+Nginx 高可用集群(双主模式)

将keepalived做成双主模式,其实很简单,就是再配置一段新的vrrp_instance(实例)规则,主上面加配置一个从的实例规则,从上面加配置一个主的实例规则。

集群架构图:

说明:还是按照上面的环境继续做实验,只是修改LB节点上面的keepalived服务的配置文件即可。此时LB-01节点即为Keepalived的主节点也为备节点,LB-02节点同样即为Keepalived的主节点也为备节点。LB-01节点默认的主节点VIP(192.168.1.110),LB-02节点默认的主节点VIP(192.168.1.210)

1)配置 LB-01 节点

  1. [root@LB-01 ~]# vim /etc/keepalived/keepalived.conf //编辑配置文件,增加一段新的vrrp_instance规则
  2. ! Configuration File for keepalived
  3. global_defs {
  4. notification_email {
  5. 381347268@qq.com
  6. }
  7. smtp_server 192.168.200.1
  8. smtp_connect_timeout 30
  9. router_id LVS_DEVEL
  10. }
  11. vrrp_instance VI_1 {
  12. state MASTER
  13. interface ens33
  14. virtual_router_id 51
  15. priority 150
  16. advert_int 1
  17. authentication {
  18. auth_type PASS
  19. auth_pass 1111
  20. }
  21. virtual_ipaddress {
  22. 192.168.1.110/24 dev ens33 label ens33:1
  23. }
  24. }
  25. vrrp_instance VI_2 {
  26. state BACKUP
  27. interface ens33
  28. virtual_router_id 52
  29. priority 100
  30. advert_int 1
  31. authentication {
  32. auth_type PASS
  33. auth_pass 2222
  34. }
  35. virtual_ipaddress {
  36. 192.168.1.210/24 dev ens33 label ens33:2
  37. }
  38. }
  1. [root@LB-01 ~]# systemctl restart keepalived //重新启动keepalived
  1. // 查看LB-01 节点的IP地址,发现VIP(192.168.1.110)同样还是默认在该节点
  2. [root@LB-01 ~]# ip a
  3. 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
  4. link/ether 00:0c:29:94:17:44 brd ff:ff:ff:ff:ff:ff
  5. inet 192.168.1.31/24 brd 192.168.1.255 scope global ens33
  6. valid_lft forever preferred_lft forever
  7. inet 192.168.1.110/24 scope global secondary ens33:1
  8. valid_lft forever preferred_lft forever
  9. inet6 fe80::20c:29ff:fe94:1744/64 scope link
  10. valid_lft forever preferred_lft forever

2)配置 LB-02 节点

  1. [root@LB-02 ~]# vim /etc/keepalived/keepalived.conf //编辑配置文件,增加一段新的vrrp_instance规则
  2. ! Configuration File for keepalived
  3. global_defs {
  4. notification_email {
  5. 381347268@qq.com
  6. }
  7. smtp_server 192.168.200.1
  8. smtp_connect_timeout 30
  9. router_id LVS_DEVEL
  10. }
  11. vrrp_instance VI_1 {
  12. state BACKUP
  13. interface ens33
  14. virtual_router_id 51
  15. priority 100
  16. advert_int 1
  17. authentication {
  18. auth_type PASS
  19. auth_pass 1111
  20. }
  21. virtual_ipaddress {
  22. 192.168.1.110/24 dev ens33 label ens33:1
  23. }
  24. }
  25. vrrp_instance VI_2 {
  26. state MASTER
  27. interface ens33
  28. virtual_router_id 52
  29. priority 150
  30. advert_int 1
  31. authentication {
  32. auth_type PASS
  33. auth_pass 2222
  34. }
  35. virtual_ipaddress {
  36. 192.168.1.210/24 dev ens33 label ens33:2
  37. }
  38. }
  1. [root@LB-02 ~]# systemctl restart keepalived //重新启动keepalived
  1. // 查看LB-02节点IP,会发现也多了一个VIP(192.168.1.210),此时该节点也就是一个主了。
  2. [root@LB-02 ~]# ip a
  3. 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
  4. link/ether 00:0c:29:ab:65:32 brd ff:ff:ff:ff:ff:ff
  5. inet 192.168.1.32/24 brd 192.168.1.255 scope global ens33
  6. valid_lft forever preferred_lft forever
  7. inet 192.168.1.210/24 scope global secondary ens33:2
  8. valid_lft forever preferred_lft forever
  9. inet6 fe80::20c:29ff:feab:6532/64 scope link
  10. valid_lft forever preferred_lft forever

3)测试

  1. [root@node01 ~]# curl 192.168.1.110
  2. web01 192.168.1.33
  3. [root@node01 ~]# curl 192.168.1.110
  4. web02 192.168.1.34
  5. [root@node01 ~]# curl 192.168.1.210
  6. web01 192.168.1.33
  7. [root@node01 ~]# curl 192.168.1.210
  8. web02 192.168.1.34
  1. // 停止LB-01节点的keepalived再次测试
  2. [root@LB-01 ~]# systemctl stop keepalived
  3. [root@node01 ~]# curl 192.168.1.110
  4. web01 192.168.1.33
  5. [root@node01 ~]# curl 192.168.1.110
  6. web02 192.168.1.34
  7. [root@node01 ~]# curl 192.168.1.210
  8. web01 192.168.1.33
  9. [root@node01 ~]# curl 192.168.1.210
  10. web02 192.168.1.34

测试可以发现我们访问keepalived中配置的两个VIP都可以正常调度等,当我们停止任意一台keepalived节点,同样还是正常访问;到此,keepalived+nginx高可用集群(双主模式)就搭建完成了。

 

原文链接:http://www.cnblogs.com/yanjieli/p/10682064.html

 友情链接:直通硅谷  点职佳  北美留学生论坛

本站QQ群:前端 618073944 | Java 606181507 | Python 626812652 | C/C++ 612253063 | 微信 634508462 | 苹果 692586424 | C#/.net 182808419 | PHP 305140648 | 运维 608723728

W3xue 的所有内容仅供测试,对任何法律问题及风险不承担任何责任。通过使用本站内容随之而来的风险与本站无关。
关于我们  |  意见建议  |  捐助我们  |  报错有奖  |  广告合作、友情链接(目前9元/月)请联系QQ:27243702 沸活量
皖ICP备17017327号-2 皖公网安备34020702000426号