Nginx +keepalived

?

配置高可用的Nginx

准备环境:

节点node17,node18

?

lftp 172.16.0.1:/pub/Sources/6.x86_64/nginx

?

[[email protected] ~]# rpm -ivh nginx-1.6.2-1.el6.ngx.x86_64.rpm

?

[[email protected] ~]# scp -rp nginx-1.6.2-1.el6.ngx.x86_64.rpm node18:/root

?

[[email protected] ~]# rpm -ivh nginx-1.6.2-1.el6.ngx.x86_64.rpm

?

为两个 节点上都配置默认的网页,来以示区别

?

[[email protected] html]# vim index.html

<h1>node15.stu21.com</h1>

?

[[email protected] html]# vim index.html

<h1>node2.stu21.com </h1>

?

接下来启动两个 节点上的nginx 服务:

?

[[email protected] html]# service nginx start;ssh node18 ‘service nginx start‘;

?

测试:

?

?

?

?

?

?

?

?

?


题:我们一般进行主从切换测试时都是关闭keepalived或关闭网卡接口,有没有一种方法能实现在不关闭keepalived下或网卡接口来实现维护
呢?以及监测nginx的状态??
方法肯定是有的,在keepalived新版本中,支持脚本vrrp_srcipt,具体如何使用大家可以man keepalived.conf查看。下面我们来演示一下具体怎么实现。

?

????vrrp_srcipt chk_schedown {?

???script "[ -e /etc/keepalived/down ] && exit 1 || exit 0"??

???interval 1 #监控间隔??

???weight -5 #减小优先级??

???fall 2 #监控失败次数??

???rise 1 #监控成功次数??

}

????vrrp_script chk_nginx {

????????script "killall -0 nginx"

????????interval 1

????????weight -5

????????fall 2

????????rise 1

}

?

(2).执行脚本

????track_script {

???chk_schedown #执行chk_schedown脚本

???? chk_nginx #执行chk_nginx脚本

}

?

?

node17:我就继续沿用apache+lvs+keepalived的节点node17上keepalived相关配置,稍作修改

[[email protected] ~]# cat /etc/keepalived/keepalived.conf

! Configuration File for keepalived

?

global_defs {

notification_email {

[email protected]

[email protected] #配置管理员邮箱

}

notification_email_from [email protected] #配置发件人

smtp_server 127.0.0.1 #配置邮件服务器

smtp_connect_timeout 30

router_id LVS_DEVEL

}

vrrp_script chk_schedown { #定义vrrp执行脚本

script "[ -e /etc/keepalived/down ] && exit 1 || exit 0" #查看是否有down文件,有就进入维护模式

interval 1 #监控间隔时间

weight -5 #降低优先级

fall 2 #失败次数

rise 1 #成功数次

}

????vrrp_script chk_nginx {

????????script "killall -0 nginx"

????????interval 1

????????weight -5

????????fall 2

????????rise 1

}

?

vrrp_instance VI_1 {

state MASTER #配置模式

#state BACKUP #测试配置模式

interface eth0

virtual_router_id 51

priority 100 #配置优先级

advert_int 1

authentication {

auth_type PASS

auth_pass 1111

}

virtual_ipaddress {

172.16.21.100 #配置虚拟IP地址

}

track_script { #执行脚本

chk_schedown

????chk_nginx #执行chk_nginx脚本

}

#增加以下三行

notify_master "/etc/keepalived/notify.sh -n master -a 172.16.21.100"

notify_backup "/etc/keepalived/notify.sh -n backup -a 172.16.21.100"

notify_fault "/etc/keepalived/notify.sh -n fault -a 172.16.21.100"

}

?

vrrp_instance VI_2 {

state BACKUP #配置模式 #修改为BACKUP

#state MASTER #测试配置模式

interface eth0

virtual_router_id 52

?

priority 99 #配置优先级 #修改优先级

advert_int 1

authentication {

auth_type PASS

auth_pass 1111

}

virtual_ipaddress {

172.16.21.101 #配置虚拟IP地址

}

track_script {

chk_schedown

chk_nginx #执行chk_nginx脚本

}

#增加以下三行

notify_master "/etc/keepalived/notify.sh -n master -a 172.16.21.101"

notify_backup "/etc/keepalived/notify.sh -n backup -a 172.16.21.101"

notify_fault "/etc/keepalived/notify.sh -n fault -a 172.16.21.101"

}

?

?

#virtual_server 172.16.21.100 80 {

# delay_loop 6

# lb_algo rr

# lb_kind DR

# nat_mask 255.255.255.0

# #persistence_timeout 50

# protocol TCP

#

# real_server 172.16.21.15 80 { #配置realaserver

# weight 1

# HTTP_GET {#监控配置

# url {

# path /

# #digest ff20ad2481f97b1754ef3e12ecd3a9cc

#???? status_code 200

# }

#

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 1

# }

# }

# real_server 172.16.21.16 80 { #配置realaserver

# weight 1

# HTTP_GET {#监控配置

# url {

# path /

# #digest ff20ad2481f97b1754ef3e12ecd3a9cc

# status_code 200

# }

#

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 1

# }

# }

# sorry_server 127.0.0.1 80 #增加一行sorry_server

#}

#

#

#virtual_server 172.16.21.101 80 {

# delay_loop 6

# lb_algo rr

# lb_kind DR

# nat_mask 255.255.255.0

# #persistence_timeout 50

# protocol TCP

#

# real_server 172.16.21.15 80 { #配置realaserver

# weight 1

# HTTP_GET {#监控配置

# url {

# path /

# #digest ff20ad2481f97b1754ef3e12ecd3a9cc

# status_code 200

# }

#

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 1

# }

# }

# real_server 172.16.21.16 80 { #配置realaserver

# weight 1

# HTTP_GET {#监控配置

# url {

# path /

# #digest ff20ad2481f97b1754ef3e12ecd3a9cc

# status_code 200

# }

#

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 1

# }

# }

# sorry_server 127.0.0.1 80 #增加一行sorry_server

#}

#

?

#virtual_server#10.10.10.2 1358 {

# delay_loop 6

# lb_algo rr

# lb_kind NAT

# persistence_timeout 50

# protocol TCP

?

# sorry_server 192.168.200.200 1358

?

# real_server 192.168.200.2 1358 {

# weight 1

# HTTP_GET {

# url {

# path /testurl/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# url {

# path /testurl2/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# url {

# path /testurl3/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 3

# }

# }

?

# real_server 192.168.200.3 1358 {

# weight 1

# HTTP_GET {

# url {

# path /testurl/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334c

# }

# url {

# path /testurl2/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334c

# }

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 3

# }

# }

#}

?

#virtual_server#10.10.10.3 1358 {

# delay_loop 3

# lb_algo rr

# lb_kind NAT

# nat_mask 255.255.255.0

# persistence_timeout 50

# protocol TCP

?

# real_server 192.168.200.4 1358 {

# weight 1

# HTTP_GET {

# url {

# path /testurl/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# url {

# path /testurl2/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# url {

# path /testurl3/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 3

# }

# }

?

# real_server 192.168.200.5 1358 {

# weight 1

# HTTP_GET {

# url {

# path /testurl/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# url {

# path /testurl2/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# url {

# path /testurl3/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 3

# }

# }

#}

?

node18:同样也是沿用apache+lvs+keepalived的节点node17上keepalived相关配置,稍作修改,着色说明需要加上

?

[[email protected] keepalived]# cat keepalived.conf

! Configuration File for keepalived

?

global_defs {

notification_email {

[email protected]

[email protected] #配置管理员邮箱

}

notification_email_from [email protected] #配置发件人

smtp_server 127.0.0.1 #配置邮件服务器

smtp_connect_timeout 30

router_id LVS_DEVEL

}

vrrp_script chk_schedown {

script "[ -e /etc/keepalived/down ] && exit 1 || exit 0"

interval 1

weight -5

fall 2

rise 1

}

????vrrp_script chk_nginx {

????????script "killall -0 nginx"

????????interval 1

????????weight -5

????????fall 2

????????rise 1

}

?

vrrp_instance VI_1 {

state BACKUP #配置模式 #修改为BACKUP

# state MASTER #测试配置模式

interface eth0

virtual_router_id 51

priority 99 #配置优先级 #修改优先级

advert_int 1

authentication {

auth_type PASS

auth_pass 1111

}

virtual_ipaddress {

172.16.21.100 #配置虚拟IP地址

}

track_script {

chk_schedown

chk_nginx #执行chk_nginx脚本

}

#增加以下三行

notify_master "/etc/keepalived/notify.sh -n master -a 172.16.21.100"

notify_backup "/etc/keepalived/notify.sh -n backup -a 172.16.21.100"

notify_fault "/etc/keepalived/notify.sh -n fault -a 172.16.21.100"

}

?

vrrp_instance VI_2 {

#state BACKUP #配置模式 #修改为BACKUP

state MASTER #测试配置模式

interface eth0

virtual_router_id 52

?

priority 101 #配置优先级 #修改优先级

advert_int 1

authentication {

auth_type PASS

auth_pass 1111

}

virtual_ipaddress {

172.16.21.101 #配置虚拟IP地址

}

track_script {

chk_schedown

chk_nginx #执行chk_nginx脚本

}

#增加以下三行

notify_master "/etc/keepalived/notify.sh -n master -a 172.16.21.101"

notify_backup "/etc/keepalived/notify.sh -n backup -a 172.16.21.101"

notify_fault "/etc/keepalived/notify.sh -n fault -a 172.16.21.101"

}

?

?

#virtual_server 172.16.21.100 80 {

# delay_loop 6

# lb_algo rr

# lb_kind DR

# nat_mask 255.255.255.0

# #persistence_timeout 50

# protocol TCP

#

# real_server 172.16.21.15 80 { #配置realaserver

# weight 1

# HTTP_GET {#监控配置

# url {

# path /

# #digest ff20ad2481f97b1754ef3e12ecd3a9cc

#???? status_code 200

# }

#

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 1

# }

# }

# real_server 172.16.21.16 80 { #配置realaserver

# weight 1

# HTTP_GET {#监控配置

# url {

# path /

# #digest ff20ad2481f97b1754ef3e12ecd3a9cc

# status_code 200

# }

#

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 1

# }

# }

# sorry_server 127.0.0.1 80 #增加一行sorry_server

#}

#

#virtual_server 172.16.21.101 80 {

# delay_loop 6

# lb_algo rr

# lb_kind DR

# nat_mask 255.255.255.0

# #persistence_timeout 50

# protocol TCP

#

# real_server 172.16.21.15 80 { #配置realaserver

# weight 1

# HTTP_GET {#监控配置

# url {

# path /

# #digest ff20ad2481f97b1754ef3e12ecd3a9cc

# status_code 200

# }

#

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 1

# }

# }

# real_server 172.16.21.16 80 { #配置realaserver

# weight 1

# HTTP_GET {#监控配置

# url {

# path /

# #digest ff20ad2481f97b1754ef3e12ecd3a9cc

# status_code 200

# }

#

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 1

# }

# }

# sorry_server 127.0.0.1 80 #增加一行sorry_server

#}

#

?

#virtual_server#10.10.10.2 1358 {

# delay_loop 6

# lb_algo rr

# lb_kind NAT

# persistence_timeout 50

# protocol TCP

?

# sorry_server 192.168.200.200 1358

?

# real_server 192.168.200.2 1358 {

# weight 1

# HTTP_GET {

# url {

# path /testurl/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# url {

# path /testurl2/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# url {

# path /testurl3/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 3

# }

# }

?

# real_server 192.168.200.3 1358 {

# weight 1

# HTTP_GET {

# url {

# path /testurl/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334c

# }

# url {

# path /testurl2/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334c

# }

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 3

# }

# }

#}

?

#virtual_server#10.10.10.3 1358 {

# delay_loop 3

# lb_algo rr

# lb_kind NAT

# nat_mask 255.255.255.0

# persistence_timeout 50

# protocol TCP

?

# real_server 192.168.200.4 1358 {

# weight 1

# HTTP_GET {

# url {

# path /testurl/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# url {

# path /testurl2/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# url {

# path /testurl3/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 3

# }

# }

?

# real_server 192.168.200.5 1358 {

# weight 1

# HTTP_GET {

# url {

# path /testurl/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# url {

# path /testurl2/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# url {

# path /testurl3/test.jsp

# digest 640205b7b0fc66c1ea91c463fac6334d

# }

# connect_timeout 3

# nb_get_retry 3

# delay_before_retry 3

# }

# }

#}

?

?

?

时间: 2024-11-03 01:30:09

Nginx +keepalived的相关文章

nginx+keepalived的高可用负载均衡集群构建

实验架构图: 实验环境 Nginx和Keepalived原理介绍 参考博客:http://467754239.blog.51cto.com/4878013/1541421 1.nginx Nginx进程基于于Master+Slave(worker)多进程模型,自身具有非常稳定的子进程管理功能.在Master进程分配模式下,Master进程永远不进行业务处理,只是进行任务分发, 从而达到Master进程的存活高可靠性,Slave(worker)进程所有的业务信号都 由主进程发出,Slave(wor

Nginx+Keepalived 实现反代 负载均衡 高可用(HA)配置

Nginx+Keepalived实现反代负载均衡高可用(HA)配置 Nginx+Keepalived实现反代负载均衡高可用配置 OS IP 子网掩码 路由网关 Centos6.6 nginx Keepalived Eth0:192.168.26.210 255.255.252.0 192.168.25.3 VIP:192.168.27.210 Centos6.6 Nginx Keepalived Eth0:192.168.26.211 255.255.252.0 192.168.25.3 VIP

nginx+keepalived高可用

nginx+keepalived高可用 1.环境如下 lb-01:192.168.75.136/24  nginx+keepalived-master lb-02:192.168.75.137/24  nginx+keepalived-backup VIP:192.168.75.135/24   rs-01:192.168.75.133/24 apache rs-02:192.168.75.13424 apache lb操作系统centos7.rs操作系统ubuntu14.04 2.lb-01/

nginx+keepalived双主高可用负载均衡

实验环境及软件版本:CentOS版本: 6.6(2.6.32.-504.el6.x86_64)nginx版本: nginx-1.6.3keepalived版本:keepalived-1.2.7 主LB1:LB-110-05 主LB2:LB-111-06 一.安装准备及依赖(用SecureCRT的交互窗口同时对两台LB操作,只贴出LB1的操作过程在此) [[email protected] ~]# mkdir tools [[email protected] ~]# mkdir /applicat

【Linux运维-集群技术进阶】Nginx+Keepalived+Tomcat搭建高可用/负载均衡/动静分离的Webserver集群

额.博客名字有点长.. . 前言 最终到这篇文章了,心情是有点激动的. 由于这篇文章会集中曾经博客讲到的全部Nginx功能点.包含主要的负载均衡,还有动静分离技术再加上这篇文章的重点.通过Keepalived实现的HA(High Available).为什么要实现高可用呢?曾经在搭建的时候仅仅用了一台Nginxserver,这种话假设Nginxserver宕机了,那么整个站点就会挂掉.所以要实现Nginx的高可用,一台挂掉还会有还有一台顶上去.从而保证站点能够持续的提供服务. 关于负载均衡和动静

Centos7+Nginx+Keepalived实现Apache服务的高可用&负载均衡

Centos7+Nginx+Keepalived实现Apache服务的高可用&负载均衡 今天是2017年的第一天,昨天也就是2016年的最后一天,我尝试部署了Centos7+Nginx+Keepalived实现WEB服务的高可用负载均衡服务,终于在2017年的第一天前完成了,所以在此分享给有需要的朋友:说到负载均衡,其实在linux下有很多服务可以实现,比如nginx.haproxy.lvs等服务,当前我们在前面的文章有介绍过了,但是对于高可用服务,我们在linux下最常见也是应用最多的是Kee

CentOS Linux 负载均衡高可用WEB集群之Nginx+Keepalived配置

Nginx+Keepalived实现负载均衡高可用的WEB服务集群,nginx作为负载均衡器,keepalived作为高可用,当其中的一台负载均衡器(nginx)发生故障时可以迅速切换到备用的负载均衡器(nginx),保持业务的连续性. 1.服务器的环境配置及IP分配 操作系统:CentOS release 6.7 (Final) nginx版本:nginx/1.8.0 keepalived版本:Keepalived v1.2.13 Nginx + keepalived服务器的IP分配表 服务器

Nginx+Keepalived主备切换(包含nginx服务停止)

原文地址:http://blog.sina.com.cn/s/blog_79ac6aa80101bmed.html Nginx+Keepalived主备切换(包含nginx服务停止) 环境: VM中4台CentOS 本机PC充当测试机win8 步骤: 1.搭建 nginx+keepalived主备切换(不考虑主nginx停掉现象)文章中的环境 2.修改两台部署的keepalived的配置文件 3.编写观察nginx进程情况的脚本内容 #!/bin/bash A=`ps -C nginx --no

centos6中三台物理机配置nginx+keepalived+lvs

以下只是简单的安装配置,并没有测试这套负载,各种参数大家可以自己测试 vip:10.0.50.170 lvs server:10.0.50.183 real server:10.0.50.184/185 183/184/185同步时间,并且安装nginx # ntpdate time.nist.gov # yum install nginx # /etc/init.d/nginx start 在184/185上编写测试页面/usr/share/nginx/html/index.html 183上

实战:ansible自动化部署nginx+keepalived+mysql负载均衡集群

一.目的 使用ansible自动化部署nginx+keepalived+mysql负载均衡集群. 二.拓扑规划 三.详细步骤 1.环境的搭建 (1).安装ansible,同时配置私钥免密码进行通信 [[email protected] ~]# ssh-keygen  -t rsa #-t表示使用的加密类型,其中rsa1表示version1版本,rsa.dsa.ecdsa的加密对于的是version2版本 Generating public/private rsa key pair. #这里询问你