LVS+NGINX+TOMCAT_集群实施操作记录.docx

LVS

IP:

Eth0:192.168.100.115

Eth1:192.168.100.215

Vi 
/etc/init.d./lvs

#!/bin/sh

#

# lvs     
Start lvs

#

# chkconfig: 2345 08 92

# description:  Starts, stops and saves lvs

#

SNS_VIP=192.168.100.215

SNS_RIP1=192.168.100.114

SNS_RIP2=192.168.100.113

. /etc/rc.d/init.d/functions

#logger $0 called with $1

retval=0

start()

{

#set squid vip

/sbin/ipvsadm --set 30 5 60

#/sbin/ifconfig eth0:0 $SNS_VIP broadcast $SNS_VIP netmask
255.255.255.255 broadcast $SNS_VIP up

#/sbin/route add -host $SNS_VIP dev eth0:0

/sbin/ipvsadm -A -t $SNS_VIP:80 -s rr

/sbin/ipvsadm -a -t $SNS_VIP:80 -r $SNS_RIP1 -g

/sbin/ipvsadm -a -t $SNS_VIP:80 -r $SNS_RIP2 -g

touch /var/lock/subsys/ipvsadm > /dev/null 2 >&1

echo "ipvsadm started"

}

stop()

{

/sbin/ipvsadm -C

/sbin/ipvsadm -Z

#ifconfig eth0:0 down

#route del $SNS_VIP

rm -rf /var/lock/subsys/ipvsadm > /dev/null 2 >&1

echo "ipvsadm stoped"

}

status()

{

if [ ! -e /var/lock/subsys/ipvsadm ];then

echo "ipvsadm stoped"

exit 1

else

echo "ipvsadm OK"

fi

}

case "$1" in

start)

start

;;

stop)

stop

;;

status)

status

;;

restart)

stop

start

;;

*)

echo $"Usage: $0 {start|stop|status}"

retval=1

esac

exit $retval

keepalvived

tar xf keepalived-1.2.7.tar.gz

cd keepalived-1.2.7

./configure --prefix=/ --mandir=/usr/local/share/man/
--with-kernel-dir=/usr/src/kernels/2.6.32-279.el6.x86_64

Make

make install

cd /etc/keepalived/

mv keepalived.conf keepalived.conf.default

chkconfig --add keepalived

chkconfig keepalived on

vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {

notification_email {

[email protected]

}

notification_email_from [email protected]

smtp_server 127.0.0.1

smtp_connect_timeout 30

router_id Director1

}

#VRRP(虚拟路由冗余协议)实例配置

vrrp_instance VI_1 {

state MASTER

interface eth0

virtual_router_id 51

priority 150

advert_int 1

authentication {

auth_type PASS

auth_pass 1111

}

virtual_ipaddress {

192.168.122.254/24 eth1

}

}

#LVS配置

virtual_server 192.168.100.215 80 {

delay_loop 3

lb_algo rr

lb_kind DR

nat_mask 255.255.255.0

#
persistence_timeout 50

protocol TCP

real_server
192.168.100.113 80 {

weight 1

TCP_CHECK {

connect_port 80

connect_timeout 10

nb_get_retry 3

delay_before_retry 3

}

}

real_server 192.168.100.114 80 {

weight 1

TCP_CHECK {

connect_port 80

connect_timeout 10

nb_get_retry 3

delay_before_retry 3

}

}

}

}

LVS-realserver

Ip 192.168.100.113

Ip 192.168.100.114

Vi  /etc/init.d/rsup

#!/bin/bash

VIP=192.168.100.215

ifconfig lo:0 $VIP broadcast $VIP netmask
255.255.255.255 up

#route add –host $VIP dev lo:0

echo "1"
>/proc/sys/net/ipv4/conf/lo/arp_ignore

echo "2"
>/proc/sys/net/ipv4/conf/lo/arp_announce

echo "1"
>/proc/sys/net/ipv4/conf/all/arp_ignore

echo "2"
>/proc/sys/net/ipv4/conf/all/arp_announce

#sysctl –p

Nginx

Ip 192.168.100.122(nfs)

Ip 192.168.100.113

Ip 192.168.100.114

#安装zlib

tar xzvf zlib-1.2.3.tar.gz

cd zlib-1.2.3

./configure

make && make install

#安装pcre

tar zxvf pcre-7.9.tar.gz

cd pcre-7.9

./configure --prefix=/usr/local/pcre

make && make install

wget http://h264.code-shop.com/download/nginx_mod_h264_streaming-2.2.7.tar.gz

tar -zxvf
nginx_mod_h264_streaming-2.2.7.tar.gz

unzip
nginx_upstream_check_module-master.zip

mv
./nginx_upstream_check_module-master /root/health

tar -xvf nginx-1.4.1.tar.gz  -C /usr/src/

useradd nginx

cd /usr/src/nginx-1.4.1

patch
-p1 < /root/health/check_1.2.6+.patch

./configure --user=nginx
--group=nginx --with-http_stub_status_module --with-http_ssl_module  --with-http_gzip_static_module --with-http_flv_module  --add-module=../nginx_mod_h264_streaming-2.2.7
 --with-pcre=/software/pcre-7.9 --with-zlib=/software/zlib-1.2.3
--prefix=/usr/local/nginx --add-module=/root/health

make
&& make install

/usr/local/nginx/sbin/nginx

开机自动启动

vi /etc/init.d/nginx

#!/bin/bash

#

#
nginx - this script starts and stops the nginx daemin

#

#
chkconfig:   - 85 15

#
description:  Nginx is an HTTP(S) server,
HTTP(S) reverse \

#               proxy and
IMAP/POP3 proxy server

#
processname: nginx

#
config:     
/usr/local/nginx/conf/nginx.conf

#
pidfile:    
/usr/local/nginx/logs/nginx.pid

#
Source function library.

.
/etc/rc.d/init.d/functions

#
Source networking configuration.

.
/etc/sysconfig/network

#
Check that networking is up.

[
"$NETWORKING" = "no" ] && exit 0

nginx="/usr/local/nginx/sbin/nginx"

prog=$(basename $nginx)

NGINX_CONF_FILE="/usr/local/nginx/conf/nginx.conf"

lockfile=/var/lock/subsys/nginx

start() {

[ -x $nginx ] || exit 5

[ -f $NGINX_CONF_FILE ] || exit 6

echo -n $"Starting $prog: "

daemon $nginx -c $NGINX_CONF_FILE

retval=$?

echo

[ $retval -eq 0 ] && touch $lockfile

return $retval

}

stop()
{

echo
-n $"Stopping $prog: "

killproc $prog -QUIT

retval=$?

echo

[
$retval -eq 0 ] && rm -f $lockfile

return $retval

}

restart()
{

configtest || return $?

stop

start

}

reload()
{

configtest || return $?

echo
-n $"Reloading $prog: "

killproc $nginx -HUP

RETVAL=$?

echo

}

force_reload() {

restart

}

configtest() {

$nginx
-t -c $NGINX_CONF_FILE

}

rh_status() {

status $prog

}

rh_status_q() {

rh_status >/dev/null 2>&1

}

case "$1" in

start)

rh_status_q && exit 0

$1

;;

stop)

rh_status_q || exit 0

$1

;;

restart|configtest)

$1

;;

reload)

rh_status_q || exit 7

$1

;;

force-reload)

force_reload

;;

status)

rh_status

;;

condrestart|try-restart)

rh_status_q || exit 0

;;

*)

echo $"Usage: $0
{start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"

exit 2

esac

##############################################################################

改权限

chmod 755
/etc/init.d/nginx

chkconfig nginx on

chkconfig --list nginx

nginx           0:off   1:off  
2:on    3:on    4:on   
5:on    6:off

service nginx
start

service nginx
stop

service nginx
restart

service nginx
reload

/etc/init.d/nginx
start

/etc/init.d/nginx
stop

/etc/init.d/nginx
restart

/etc/init.d/nginx
reload

Nginx 配置文件

vi /usr/local/nginx/conf/nginx.conf

#user  nginx nginx;

worker_processes
12;

error_log  /usr/local/nginx/logs/error.log  crit;

pid        /usr/local/nginx/logs/nginx.pid;

worker_rlimit_nofile
65535;

events

{

use epoll;

worker_connections 204800;

}

http

{

include      
mime.types;

default_type 
application/octet-stream;

charset 
utf-8;

server_names_hash_bucket_size 128;

client_header_buffer_size 2k;

large_client_header_buffers 4 4k;

client_max_body_size 8m;

sendfile on;

tcp_nopush    
on;

keepalive_timeout 60;

fastcgi_cache_path
/usr/local/nginx/fastcgi_temp levels=1:2

keys_zone=TEST:10m

inactive=5m;

fastcgi_connect_timeout 300;

fastcgi_send_timeout 300;

fastcgi_read_timeout 300;

fastcgi_buffer_size 1638;

fastcgi_buffers 16 16k;

fastcgi_busy_buffers_size 16k;

fastcgi_temp_file_write_size 16k;

fastcgi_cache TEST;

fastcgi_cache_valid 200 302 1h;

fastcgi_cache_valid 301 1d;

fastcgi_cache_valid any 1m;

fastcgi_cache_min_uses 1;

fastcgi_cache_use_stale error timeout
invalid_header http_500;

open_file_cache max=204800 inactive=20s;

open_file_cache_min_uses 1;

open_file_cache_valid 30s;

tcp_nodelay on;

gzip on;

gzip_min_length  1k;

gzip_buffers     4 16k;

gzip_http_version 1.0;

gzip_comp_level 2;

gzip_types       text/plain application/x-javascript
text/css application/xml;

gzip_vary on;

upstream
nginx_server {

ip_hash;

server 192.168.100.122:80;

}

upstream
web_server {

ip_hash;

server 192.168.100.131:9001;

server 192.168.100.132:9001;

server 192.168.100.133:9001;

server 192.168.100.134:9001;

check interval=3000 rise=2 fall=5
timeout=1000;

}

upstream
napi_server {

ip_hash;

server 192.168.100.131:9002;

server 192.168.100.132:9002;

server 192.168.100.133:9002;

server 192.168.100.134:9002;

server 192.168.100.131:9003;

server 192.168.100.132:9003;

server 192.168.100.133:9003;

server 192.168.100.134:9003;

server 192.168.100.131:9004;

server 192.168.100.132:9004;

server 192.168.100.133:9004;

server 192.168.100.134:9004;

check interval=3000 rise=2 fall=5
timeout=1000;

}

upstream
oapi_server {

ip_hash;

server 192.168.100.131:9005;

server 192.168.100.132:9005;

server 192.168.100.133:9005;

server 192.168.100.134:9005;

server 192.168.100.131:9006;

server 192.168.100.132:9006;

server 192.168.100.133:9006;

server 192.168.100.134:9006;

check interval=3000 rise=2 fall=5
timeout=1000;

}

server {

listen       80;

server_name  localhost;

location ~
.*\.(gif|jpg|jpeg|png|bmp|swf|html|mp3|wma|wmv)$

{

root /usr/wodm/;

proxy_pass http://nginx_server;

}

location ~ .*\.(js|css)?$

{

expires      1h;

}

location /NginxStatus

{

stub_status                 on;

access_log                  on;

auth_basic                  "NginxStatus";

#auth_basic_user_file       conf/htpasswd;

}

location /nstatus {

check_status;

access_log off;

}

error_page  
500 502 503 504  /50x.html;

location = /50x.html {

root   html;

}

}

log_format  access 
‘$remote_addr - $remote_user [$time_local] "$request" ‘

‘$status $body_bytes_sent
"$http_referer" ‘       ‘"$http_user_agent"
$http_x_forwarded_for‘;

access_log  /usr/local/nginx/logs/access.log access;

include
/usr/local/nginx/conf/vhost/*.conf;

}

Nginx(nfs)

Nginx.conf

#user  nginx nginx;

worker_processes
16;

#worker_cpu_affinity
00000001 00000010 00000100 00001000 00010000 00100000 01000000 10000000;

error_log
/usr/local/nginx/logs/error.log  crit;

pid        /usr/local/nginx/logs/nginx.pid;

worker_rlimit_nofile
204800;

events

{

use epoll;

worker_connections 204800;

}

http

{

include      
mime.types;

default_type 
application/octet-stream;

charset 
utf-8;

server_names_hash_bucket_size 128;

client_header_buffer_size 2k;

large_client_header_buffers 4 4k;

client_max_body_size 8m;

sendfile on;

tcp_nopush    
on;

keepalive_timeout 60;

fastcgi_cache_path
/usr/local/nginx/fastcgi_cache levels=1:2

keys_zone=TEST:10m

inactive=5m;

fastcgi_connect_timeout 300;

fastcgi_send_timeout 300;

fastcgi_read_timeout 300;

fastcgi_buffer_size 1638;

fastcgi_buffers 16 16k;

fastcgi_busy_buffers_size 16k;

fastcgi_temp_file_write_size 16k;

fastcgi_cache TEST;

fastcgi_cache_valid 200 302 1h;

fastcgi_cache_valid 301 1d;

fastcgi_cache_valid any 1m;

fastcgi_cache_min_uses 1;

fastcgi_cache_use_stale error timeout
invalid_header http_500;

open_file_cache max=204800 inactive=20s;

open_file_cache_min_uses 1;

open_file_cache_valid 30s;

tcp_nodelay on;

gzip on;

gzip_min_length  1k;

gzip_buffers     4 16k;

gzip_http_version 1.0;

gzip_comp_level 2;

gzip_types       text/plain application/x-javascript
text/css application/xml;

gzip_vary on;

server

{

listen      
80;

server_name 
localhost;

index index.php index.htm;

root 
/usr/wodm/;

location /status

{

stub_status on;

}

location ~
.*\.(gif|jpg|jpeg|png|bmp|swf|html|mp3|wma|wmv)$

{

expires      30d;

}

}

log_format  access 
‘$remote_addr - $remote_user [$time_local] "$request" ‘

‘$status $body_bytes_sent
"$http_referer" ‘      
‘"$http_user_agent" $http_x_forwarded_for‘;

access_log  /usr/local/nginx/logs/access.log access;

标准字符集

vi
/etc/sysconfig/i18n

LANG="zh_CN.UTF-8"
SYSFONT="latarcyrheb-sun16"
SUPPORTED="zh_CN.UTF-8:zh_CN:zh"

LANG="zh_CN.GB18030"

SUPPORTED="zh_CN.GB18030:zh_CN:zh:en_US.UTF-8:en_US:en"

打开文件数

vi /etc/security/limits.conf

* soft nofile 65535

* hard nofile 65535

vi /etc/pam.d/login

session required /lib/security/pam_limits.so

nginx  日志按天分割

vi  nginx_log.sh

#!/bin/bash

logs_path="/usr/local/nginx/logs/"

pid_path="/usr/local/nginx/nginx.pid"

mv ${logs_path}access.log ${logs_path}access_$(date -d
"yesterday" +"%Y%m%d").log

kill -USR1 `cat ${pid_path}`

chmod 755 nginx_log.sh

crontab –e

0 0 * * *  bash
/usr/local/nginx/nginx_log.sh

Tomcat  memcache

tar xf libevent-1.4.11-stable.tar.gz

cd libevent-1.4.11-stable

./configure

make

make install

cd ../

tar xf memcached-1.4.5.tar.gz

cd memcached-1.4.5

./configure

make

make install

/usr/local/bin/memcached -d -m 10 -u root -l 192.168.1.113 -p
11211 -c 1024 -P /tmp/memcached.pid

Memcache+tomcat 共享session

后续计划尚未配置

LVS+NGINX+TOMCAT_集群实施操作记录.docx

时间: 2024-11-10 15:47:13

LVS+NGINX+TOMCAT_集群实施操作记录.docx的相关文章

LVS+Heartbeat 高可用集群方案操作记录

Heartbeat 项目是 Linux-HA 工程的一个组成部分,它实现了一个高可用集群系统.心跳服务和集群通信是高可用集群的两个关键组件,在 Heartbeat 项目里,由 heartbeat 模块实现了这两个功能. Heartbeat的高可用集群采用的通信方式是udp协议和串口通信,而且heartbeat插件技术实现了集群间的串口.多播.广播和组播通信.它实现了HA 功能中的核心功能——心跳,将Heartbeat软件同时安装在两台服务器上,用于监视系统的状态,协调主从服务器的工作,维护系统的

Haproxy+Heartbeat 高可用集群方案操作记录

之前详细介绍了haproxy的基础知识点, 下面记录下Haproxy+Heartbeat高可用web集群方案实现过程, 以加深理解. 架构草图如下: 1) 基本环境准备 (centos6.9系统) 172.16.60.208(eth0) HA主节点(ha-master) haproxy,heartbeat 172.16.60.207(eth0) HA备节点(ha-slave) haproxy,heartbeat 172.16.60.229 VIP地址 172.16.60.204(eth0) 后端

自己动手之k8s etcd ssl集群部署操作记录

参考文档:https://github.com/opsnull/follow-me-install-kubernetes-cluster 1.下载和分发二进制安装包 [[email protected] kubernetes]# wget https://github.com/coreos/etcd/releases/download/v3.3.7/etcd-v3.3.7-linux-amd64.tar.gz [[email protected] kubernetes]# ls etcd-v3.

lvs、keepalived 集群管理

常用的负载均衡开源软件有: nginx.lvs.keepalived 商业的硬件负载设备: F5.Netscale 1. LB.LVS介绍LB集群是load balance 集群的简写,翻译成中文就是负载均衡集群 LVS是一个实现负载均衡集群的开源软件项目 LVS架构从逻辑上可分为调度层(Director).server集群层(Real server)和共享存储层LVS可分为三种工作模式:  ( dr模式参考这篇文章  http://os.51cto.com/art/201105/264303.

使用LVS+NAT搭建集群实现负载均衡

使用LVS+NAT搭建集群实现负载均衡 LVS集群简介    计算机集群简称集群是一种计算机系统,它通过一组松散集成的计算机软件或硬件连接起来高度紧密地协作完成计算工作.在某种意义上,他们可以被看作是一台计算机.集群系统中的单个计算机通常称为节点,通常通过局域网连接,但也有其它的可能连接方式.集群计算机通常用来改进单个计算机的计算速度和/或可靠性.一般情况下集群计算机比单个计算机,比如工作站或超级计算机性能价格比要高得多        LVS集群分布图   集群有三种类型:           

LVS负载均衡集群技术学习笔记

一.ARP协议 1. Address Resolution Protocal地址解析协议,功能是根据主机IP地址来解析到主机的MAC地址. 2. ARP协议和DNS协议对比: a. DNS是在域名和IP之间进行解析,ARP是在IP和MAC之间解析. b. ARP协议不需要服务,DNS需要开启服务. c. ARP协议需要互联的主机处于同一个物理网段之内(即局域网环境)! 3. ARP协议工作过程(第一次通信) 4. 高可用服务器切换机器以及更换路由器的时候要注意刷新ARP缓存 5. IP切换时候A

Nginx实现集群的负载均衡配置过程详解

Nginx实现集群的负载均衡配置过程详解 Nginx 的负载均衡功能,其实实际上和 nginx 的代理是同一个功能,只是把代理一台机器改为多台机器而已. Nginx 的负载均衡和 lvs 相比,nginx属于更高级的应用层,不牵扯到 ip 和内核的修改,它只是单纯地把用户的请求转发到后面的机器上.这就意味着,后端的 RS 不需要配置公网. 一.实验环境 Nginx 调度器 (public 172.16.254.200 privite 192.168.0.48)RS1只有内网IP (192.168

使用LVS+DR搭建集群实现负载均衡

使用LVS+DR搭建集群实现负载均衡 DR模式的概述与工作原理 DR模式服务概述:        Direct Routing(直接路由) --在同一个地域,同一个网段 Director分配请求到不同的real server.real server处理请求后直接回应给用户,这样director负载均衡器仅处理客户机与服务器的一半连接.负载均衡器仅处理一半的连接,避免了新的性能瓶颈,同样增加了系统的可伸缩性.Direct Routing由与采用物理层(修改MAC地址)技术,因此所有服务器都必须在一

构建高可用的LVS负载均衡集群 入门篇

一.LVS简介 LVS是Linux Virtual Server的简称,也就是Linux虚拟服务器, 是一个由章文嵩博士发起的自由软件项目,它的官方站点是www.linuxvirtualserver.org.现在LVS已经是 Linux标准内核的一部分,在Linux2.4内核以前,使用LVS时必须要重新编译内核以支持LVS功能模块,但是从Linux2.4内核以后,已经完全内置了LVS的各个功能模块,无需给内核打任何补丁,可以直接使用LVS提供的各种功能. LVS 集群采用IP负载和基于内容请求分