超详细saltstack安装部署及应用

1.环境准备

准备两台虚拟机


主机名


ip


role


linux-node1


10.0.0.7


master


linux-node2


10.0.0.8


minion

在节点1上安装 master 和 minion

[[email protected] ~]yum install salt-master salt-minion -y

在节点2上安装 minion

[[email protected] ~]yum install  salt-minion -y

分别设置开机自启动

[[email protected] ~]chkconfig  salt-master on

[[email protected] ~]chkconfig  --add salt-master

[[email protected] ~]chkconfig  salt-minion on

[[email protected] ~]chkconfig  --add salt-minion

[[email protected] ~]chkconfig  salt-minion on

[[email protected] ~]chkconfig  --add salt-minion

指定master

vim /etc/salt/minion

master: 10.0.0.7

授权节点1和节点2

slat-key -a linux*

2.测试

测试 ping 节点1 和节点2

salt ‘*‘ test.ping

执行 cmd.run  执行bash查看负载命令

salt ‘*‘ cmd.run ‘uptime‘

设置sls文件的路径

[[email protected] ~]mkdir -p /srv/salt/base

[[email protected] ~]mkdir -p /srv/salt/test

[[email protected] ~]mkdir -p /srv/salt/prod

vim /etc/salt/master

file_roots:

base:

- /srv/salt/base

test:

- /srv/salt/test

prod:

- /srv/salt/prod

重启master

/etc/init.d/salt-master restart

编写YMAL安装Apache 并设置启动文件

cd /srv/salt

vim apache.sls

apache-install:

pkg.installed:

- names:

- httpd

- httpd-devel

apache-service:

service.running:

- name: httpd

- enable: True

- reload: True

执行状态文件

salt ‘*‘ state.sls apache

编写高级状态文件

vim top.sls

base:

‘linux-node2‘:

- apache

slat ‘*‘ state.highstate   #执行高级状态 top.sls

3.数据系统之 Grains

salt ‘linux-node1‘ grains.items  #查询所有键值

salt ‘linux-node1‘ grains.get fqdn #查询单个主机值

显示所有 节点1 eth0的ip

[[email protected] ~]# salt ‘linux-node1‘ grains.get ip_interfaces:eth0

linux-node1:

- 10.0.0.7

- fe80::20c:29ff:fe9d:57e8

#根据系统名称匹配执行cmd.run命令

[[email protected] ~]# salt -G os:CentOS cmd.run ‘w‘  #-G 代表使用grains匹配

linux-node2:

03:47:49 up  9:58,  2 users,  load average: 0.00, 0.00, 0.00

USER     TTY      FROM              [email protected]   IDLE   JCPU   PCPU WHAT

root     pts/1    10.0.0.1         17:50    1:31m  0.14s  0.14s -bash

root     pts/0    10.0.0.1         03:37    5:40   0.00s  0.00s -bash

linux-node1:

03:47:49 up  1:35,  2 users,  load average: 0.00, 0.00, 0.00

USER     TTY      FROM              [email protected]   IDLE   JCPU   PCPU WHAT

root     pts/0    10.0.0.1         02:13    1:01m  0.08s  0.01s vim top.sls

root     pts/1    10.0.0.1         03:37    0.00s  0.52s  0.34s /usr/bin/python

vim /etc/salt/grains

web: nginx

salt -G web:nginx cmd.run ‘w‘

4.数据系统之 Pillar

设置pillar文件的路径

vim /etc/salt/master

pillar_roots:

base:

- /srv/pillar

mkdir /srv/pillar #创建默认pillar目录

/etc/init.d/salt-master restart

vim /srv/pillar/apache.sls  #使用jinja模板语言

{%if grains[‘os‘] == ‘CentOS‘ %}

apache: httpd

{% elif grains[‘os‘] == ‘Debian‘ %}

apache: apche2

{% endif %}

vim /srv/pillar/top.sls

base:

‘*‘:

- apache

[[email protected] ~]# salt ‘*‘ pillar.items

linux-node2:

----------

apache:

httpd

linux-node1:

----------

apache:

httpd

配置完 pillar需要刷新 生效

[[email protected] ~]salt ‘*‘ saltutil.refresh_pillar

[[email protected] ~]#  salt -I ‘apache:httpd‘ test.ping

linux-node2:

True

linux-node1:

True

http://docs.saltstack.cn/topics/index.html    #slatstack中文网站

slatstack 之远程执行

targeting

moudles

returners

基于对模块的访问控制

[[email protected] ~]vim /etc/salt/master

client_acl:

oldboy:                      #oldboy用户下只能使用test.ping network的所有方法

- test.ping

- network.*

user01:

- linux-node1*:

- test.ping

权限设置

chmod 755 /var/cache/salt /var/cache/salt/master /var/cache/salt/master/jobs /var/run/salt /var/run/salt/master

[[email protected] ~]/etc/ini.d/salt-master restart

[[email protected] ~]# su - oldboy

[[email protected] ~]$ salt ‘*‘ cmd.run ‘df -h‘

[WARNING ] Failed to open log file, do you have permission to write to /var/log/salt/master?

Failed to authenticate! This is most likely because this user is not permitted to execute commands, but there is a small possibility that a disk error occurred (check disk/inode usage).

创建表结构 3个表:

CREATE DATABASE `salt`

DEFAULT CHARACTER SET utf8

DEFAULT COLLATE utf8_general_ci;

USE `salt`;

CREATE TABLE `jids` (

`jid` varchar(255) NOT NULL,

`load` mediumtext NOT NULL,

UNIQUE KEY `jid` (`jid`)

) ENGINE=InnoDB DEFAULT CHARSET=utf8;

CREATE INDEX jid ON jids(jid) USING BTREE;

CREATE TABLE `salt_returns` (

`fun` varchar(50) NOT NULL,

`jid` varchar(255) NOT NULL,

`return` mediumtext NOT NULL,

`id` varchar(255) NOT NULL,

`success` varchar(10) NOT NULL,

`full_ret` mediumtext NOT NULL,

`alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,

KEY `id` (`id`),

KEY `jid` (`jid`),

KEY `fun` (`fun`)

) ENGINE=InnoDB DEFAULT CHARSET=utf8;

CREATE TABLE `salt_events` (

`id` BIGINT NOT NULL AUTO_INCREMENT,

`tag` varchar(255) NOT NULL,

`data` mediumtext NOT NULL,

`alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,

`master_id` varchar(255) NOT NULL,

PRIMARY KEY (`id`),

KEY `tag` (`tag`)

) ENGINE=InnoDB DEFAULT CHARSET=utf8;

授权salt用户

grant all on salt.* to [email protected]‘10.0.0.0/255.255.255.0 identified by ‘salt‘;

yum install -y MySQL-python     #同步数据依赖 MySQL-python包

vim /etc/salt/master

底部添加

master_job_cache: mysql   #加上这一句 执行的命令自动保存到数据库不用加--return mysql

mysql.host: ‘10.0.0.7‘

mysql.user: ‘salt‘

mysql.pass: ‘salt‘

mysql.db: ‘salt‘

mysql.port: 3306

/etc/init.d/salt-master restart

测试命令执行结果是否同步到数据库

[[email protected] ~]# salt ‘*‘ cmd.run ‘ls‘ --return mysql

编译安装所需的依赖包

yum install gcc gcc-c++ glibc autoconf make openssl openssl-devel

5.web集群架构自动化部署

5.1安装haproxy

cd /usr/local/src && tar zxf haproxy-1.7.9.tar.gz && cd haproxy-1.7.9 && make TARGET=linux26 PREFIX=/usr/local/haproxy && make install PREFIX=/usr/local/haproxy

cd /usr/local/src/haproxy-1.7.9/examples/

vim haproxy.init

BIN=/usr/local/haproxy/sbin/$BASENAME  更改启动脚本的默认路径

cp haproxy-init /srv/salt/prod/haproxy/files/

编写YMAL脚本

mkdir /srv/salt/prod/pkg            #源码安装依赖包sls

mkdir /srv/salt/prod/haproxy        #haproxy安装 sls

mkdir /srv/salt/prod/haproxy/files    #存放haproxy源码压缩包

haproxy自动化编译安装。

cd /srv/salt/prod/pkg

编译安装所需依赖包的自动化安装

vim pkg-init.sls

pkg-init:

pkg.installed:                 #pkg的installed

- names:

- gcc

- gcc-c++

- glibc

- make

- autoconf

- openssl

- openssl-devel

cd /srv/salt/prod/haproxy

vim install.sls   #haproxy自动化编译安装YMAL脚本

include:

- pkg.pkg-init

haproxy-install:

file.managed:

- name: /usr/local/src/haproxy-1.7.9.tar.gz

- source: salt://haproxy/files/haproxy-1.7.9.tar.gz #salt:相当于/srv/salt/prod

- user: root

- group: root

- mode: 755

cmd.run:

- name: cd /usr/local/src && tar zxf haproxy-1.7.9.tar.gz && cd haproxy-1.7.9 && make TARGET=linux26 PREFIX=/usr/local/haproxy && make install PREFIX=/usr/local/haproxy

- unless: test -d /usr/local/haproxy

- require:

- pkg: pkg-init

- file: haproxy-install

haproxy-init:

file.managed:

- name: /etc/init.d/haproxy   创建一个/etc/init.d/haproxy 文件

- source: salt://haproxy/files/haproxy.init

- user: root

- group: root

- mode: 755

- require:

- cmd: haproxy-install

cmd.run:

- name: chkconfig --add haproxy

- unless: chkconfig --list | grep haproxy #返回false才执行和-onlyif相反,有就不执行上面的命令

- require:

- file: haproxy-init

net.ipv4.ip_nonlocal_bind:   #cat /proc/sys/net/ipv4/ip_nonlocal_bind 默认是0改为1,意思是可以监听非本地的ip

sysctl.present:             #设定内核参数的方法

- value: 1

haproxy-config-dir:

file.directory:   #文件的创建目录的方法

- name: /etc/haproxy  #创建一个/etc/haproxy的目录

- user: root

- group: root

- mode: 755

手动执行 节点1上面的安装haproxy脚本

salt ‘linux-node1‘ state.sls haproxy.install env=prod #env指定使用prod目录下的

创建集群目录

mkdir /srv/salt/prod/cluster

mkdir /srv/salt/prod/cluster/files

cd /srv/salt/prod/cluster/files

vim haproxy-outside.cfg

global

maxconn 100000

chroot /usr/local/haproxy

uid 99

gid 99

daemon

nbproc 1

pidfile /usr/local/haproxy/logs/haproxy.pid

log 127.0.0.1 local3 info

defaults

option http-keep-alive

maxconn 100000

mode http

timeout connect 5000ms

timeout client  50000ms

timeout server  50000ms

listen stats

mode http

bind 0.0.0.0:8888

stats enable

stats uri       /haproxy-status

stats auth      haproxy:saltstack

frontend frontend_www_example_com

bind    10.0.0.11:80

mode    http

option  httplog

log global

default_backend backend_www_example_com

backend backend_www_example_com

option forwardfor header X-REAL-IP

option httpchk HEAD / HTTP/1.0

balance source

server web-node1        10.0.0.7:8080 check inter 2000 rise 30 fall 15

server web-node2        10.0.0.8:8080 check inter 2000 rise 30 fall 15

cd ..

vim haproxy-outside.sls

include:

- haproxy.install

haproxy-service:

file.managed:

- name: /etc/haproxy/haproxy.cfg

- source: salt://cluster/files/haproxy-outside.cfg

- user: root

- group: root

- mode: 644

service.running:

- name: haproxy

- enable: True

- reload: True

- require:

- cmd: haproxy-init

- watch:

- file: haproxy-service

编辑top.sls

cd /srv/salt/base/

vim top.sls

base:

‘*‘:

- init.env_init

prod:

‘linux-node1‘:

- cluster.haproxy-outside

‘linux-node2‘:

- cluster.haproxy-outside

在节点1和节点2上分别修改httpd 的监听端口

vim /etc/httpd/conf/httpd.conf 将80端口改为8080

Listen 8080

然后重启 /etc/init.d/httpd restart

vim /var/www/html/index.html

linux-node1  #节点2上linux-node2

在浏览器中输入 10.0.0.7:8888/haproxy-status  健康检查

账号密码 haproxy/saltstack

[[email protected] html]# cd /srv/salt/prod/

[[email protected] prod]# tree

.

|-- cluster

|   |-- files

|   |   `-- haproxy-outside.cfg

|   `-- haproxy-outside.sls

|-- haproxy

|   |-- files

|   |   |-- haproxy-1.7.9.tar.gz

|   |   `-- haproxy.init

|   `-- install.sls

`-- pkg

`-- pkg-init.sls

5.2安装keepalived

wget http://www.keepalived.org/software/keepalived-1.2.19.tar.gz && tar zxf keepalived-1.2.19.tar.gz && cd keepalived-1.2.19 && ./configure --prefix=/usr/local/keepalived --disable-fwmark && make && make install

/usr/local/src/keepalived-1.2.19/keepalived/etc/init.d/keepalived.init #启动脚本

/usr/local/src/keepalived-1.2.19/keepalived/etc/keepalived/keepalived.conf #模板文件

[[email protected] etc]# mkdir /srv/salt/prod/keepalived

[[email protected] etc]# mkdir /srv/salt/prod/keepalived/files

[[email protected] etc]# cp init.d/keepalived.init /srv/salt/prod/keepalived/files/

[[email protected] etc]# cp keepalived/keepalived.conf /srv/salt/prod/keepalived/files/

[[email protected] keepalived]# cd /usr/local/keepalived/etc/sysconfig/

[[email protected] sysconfig]# cp keepalived /srv/salt/prod/keepalived/files/keepalived.sysconfig

[[email protected] etc]# cd /srv//salt/prod/keepalived/files/

[[email protected] files]# vim keepalived.init

daemon /usr/local/keepalived/sbin/keepalived ${KEEPALIVED_OPTIONS} 修改启动时的加载文件路径

[[email protected] files] cp /usr/local/src/keepalived-1.2.19.tar.gz .

[[email protected] files]# cd ..

[[email protected] keepalived]# vim install.sls

include:

- pkg.pkg-init

keepalived-install:

file.managed:

- name: /usr/local/src/keepalived-1.2.19.tar.gz

- source: salt://keepalived/files/keepalived-1.2.19.tar.gz

- user: root

- group: root

- mode: 755

cmd.run:

- name: wget http://www.keepalived.org/software/keepalived-1.2.19.tar.gz && tar zxf keepalived-1.2.19.tar.gz && cd keepalived-1.2.19 && ./configure --prefix=/usr/local/keepalived --disable-fwmark && make && make install

- unless: test -d /usr/local/keepalived

- require:

- pkg: pkg-init

- file: keepalived-install

keepalived-init:

file.managed:

- name: /etc/init.d/keepalived

- source: salt://keepalived/files/keepalived.init

- user: root

- group: root

- mode: 755

cmd.run:

- name: chkconfig --add keepalived

- unless: chkconfig --list | grep keepalived

- require:

- file: keepalived-init

/etc/sysconfig/keepalived:

file.managed:

- source: salt://keepalived/files/keepalived.sysconfig

- user: root

- group: root

- mode: 644

/etc/keepalived:

file.directory:

- user: root

- group: root

- mode: 755

[[email protected] ~]# cd /srv/salt/prod/cluster/files/

[[email protected] files]# vim haproxy-outside-keepalived.conf

! Configuration File for keepalived

global_defs {

notification_email {

[email protected]

}

notification_email_from [email protected]

smtp_server 127.0.0.1

smtp_connect_timeout 30

router_id {{ROUTEID}}

}

vrrp_instance haproxy_ha {

state {{STATEID}}

interface eth0

virtual_router_id 36

priority {{PRIORITYID}}

advert_int 1

authentication {

auth_type PASS

auth_pass 1111

}

virtual_ipaddress {

10.0.0.11

}

[[email protected] cluster]# vim haproxy-outside-keepalived.sls

include:

- keepalived.install

keepalived-service:

file.managed:

- name: /etc/keepalived/keepalived.conf

- source: salt://cluster/files/haproxy-outside-keepalived.conf

- user: root

- group: root

- mode: 644

- template: jinja

{% if grains[‘fqdn‘] == ‘linux-node1‘ %}

- ROUTEID: haproxy_ha

- STATEID: MASTER

- PRIORITYID: 150

{% elif grains[‘fqdn‘] == ‘linux-node2‘ %}

- ROUTEID: haproxy_ha

- STATEID: BACKUP

- PRIORITYID: 100

{% endif %}

service.running:

- name: keepalived

- enable: True

- watch:

- file: keepalived-service

[[email protected] cluster]salt ‘*‘ state.sls cluster.haproxy-outside-keepalived env=prod

[[email protected] base]# cd /srv/salt/base/

[[email protected] base]# vim top.sls

base:

‘*‘:

- init.env_init

prod:

‘linux-node1‘:

- cluster.haproxy-outside

- cluster.haproxy-outside-keepalived

‘linux-node2‘:

- cluster.haproxy-outside

- cluster.haproxy-outside-keepalived

验证keeplivedalived

[[email protected] prod]# ip ad li

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN

link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

inet 127.0.0.1/8 scope host lo

inet6 ::1/128 scope host

valid_lft forever preferred_lft forever

2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000

link/ether 00:0c:29:9d:57:e8 brd ff:ff:ff:ff:ff:ff

inet 10.0.0.7/24 brd 10.0.0.255 scope global eth0

inet 10.0.0.11/32 scope global eth0

inet6 fe80::20c:29ff:fe9d:57e8/64 scope link

valid_lft forever preferred_lft forever

[[email protected] html]# ip ad li

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN

link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

inet 127.0.0.1/8 scope host lo

inet6 ::1/128 scope host

valid_lft forever preferred_lft forever

2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000

link/ether 00:0c:29:ca:41:95 brd ff:ff:ff:ff:ff:ff

inet 10.0.0.8/24 brd 10.0.0.255 scope global eth0

inet6 fe80::20c:29ff:feca:4195/64 scope link

valid_lft forever preferred_lft forever

[[email protected] prod]# /etc/init.d/keepalived stop

Stopping keepalived:                                       [  OK  ]

[[email protected] html]# ip ad li

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN

link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

inet 127.0.0.1/8 scope host lo

inet6 ::1/128 scope host

valid_lft forever preferred_lft forever

2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000

link/ether 00:0c:29:ca:41:95 brd ff:ff:ff:ff:ff:ff

inet 10.0.0.8/24 brd 10.0.0.255 scope global eth0

inet 10.0.0.11/32 scope global eth0

inet6 fe80::20c:29ff:feca:4195/64 scope link

valid_lft forever preferred_lft forever

[[email protected] prod]# /etc/init.d/keepalived start

Starting keepalived:                                       [  OK  ]

[[email protected] html]# ip ad li

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN

link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

inet 127.0.0.1/8 scope host lo

inet6 ::1/128 scope host

valid_lft forever preferred_lft forever

2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000

link/ether 00:0c:29:ca:41:95 brd ff:ff:ff:ff:ff:ff

inet 10.0.0.8/24 brd 10.0.0.255 scope global eth0

inet6 fe80::20c:29ff:feca:4195/64 scope link

valid_lft forever preferred_lft forever

[[email protected] prod]# vim /srv/salt/prod/cluster/files/haproxy-outside.cfg

balance roundrobin   #roundrobin表示轮询,source表示固定。

5.3安装zabbix-agent

[[email protected] prod]# cd /srv/salt/base/init

[[email protected] init]# vim zabbix.agent.sls

zabbix-agent-install:

pkg.installed:

- name: zabbix-agent

file.managed:

- name: /etc/zabbix/zabbix_agentd.conf

- source: salt://init/files/zabbix_agent.conf

- template: jinja

- defaults:

Server: {{ pillar[‘zabbix-agent‘][‘Zabbix_Server‘] }}

- require:

- pkg: zabbix-agent-install

service.running:

- name: zabbix-agent

- enable: True

- watch:

- pkg: zabbix-agent-install

- file: zabbix-agent-install

[[email protected] init]# vim /etc/salt/master

pillar_roots:

base:

- /srv/pillar/base

[[email protected]ode1 init]# mkdir /srv/pillar/base

[[email protected] init]# /etc/init.d/salt-master restart

[[email protected] init]# cd /srv/pillar/base/

[[email protected] base]# vim top.sls

base:

‘*‘:

- zabbix

[[email protected] base]# vim zabbix.sls

zabbix-agent:

Zabbix_Server: 10.0.0.7

[[email protected] base]# cd /srv/salt/base/init/files

[[email protected] files]# cp /etc/zabbix/zabbix_agent.conf .

[[email protected] files]# vim zabbix_agent.conf  #使用模板语言的变量引用

Server={{ Server }}

[[email protected] init]# vim env_init.sls

include:

- init.dns

- init.history

- init.audit

- init.sysctl

- init.zabbix_agent

[[email protected] ~]# salt ‘*‘ state.highstate

nginx+php 以及 memcache 的安装

https://github.com/a7260488/slat-test

percona-zabbix-templates  #zabbix监控mysql的软件

5.4配置master-syndic

功能有点类似 zabbix-proxy

[[email protected] ~]# yum install salt-master salt-syndic -y

[[email protected] ~]# vim /etc/salt/master

syndic_master 10.0.0.7

[[email protected] ~]# vim /etc/salt/master

[[email protected] ~]# /etc/init.d/salt-master start

Starting salt-master daemon:                               [  OK  ]

[[email protected] ~]# /etc/init.d/salt-syndic start

Starting salt-syndic daemon:                               [  OK  ]

[[email protected] ~]# vim /etc/salt/mast

order_masters: True

[[email protected] ~]# /etc/init.d/salt-master restart

[[email protected] ~]# /etc/init.d/salt-minion stop

Stopping salt-minion daemon:                               [  OK  ]

[[email protected] ~]# /etc/init.d/salt-minion stop

Stopping salt-minion daemon:                               [  OK  ]

[[email protected] ~]# salt-key -D

[[email protected] ~]# cd /etc/salt/pki/minion/

[[email protected] minion]# rm -fr *

[[email protected] ~]# cd  /etc/salt/pki/minion

[[email protected] minion]# rm -fr *

[[email protected] salt]# vim /etc/salt/minion

master 10.0.0.8

[[email protected] salt]# vim /etc/salt/minion

master 10.0.0.8

[[email protected] salt]# /etc/init.d/salt-minion start

Starting salt-minion daemon:                               [  OK  ]

[[email protected] salt]# /etc/init.d/salt-minion start

Starting salt-minion daemon:                               [  OK  ]

[[email protected] minion]# salt-key -A

The following keys are going to be accepted:

Unaccepted Keys:

linux-node2

Proceed? [n/Y] y

Key for minion linux-node2 accepted.

[[email protected] minion]# salt-key

Accepted Keys:

linux-node2

Denied Keys:

Unaccepted Keys:

Rejected Keys:

[[email protected] salt]# salt-key

Accepted Keys:

Denied Keys:

Unaccepted Keys:

linux-node1

linux-node2

Rejected Keys:

[[email protected] salt]# salt-key -A

The following keys are going to be accepted:

Unaccepted Keys:

linux-node1

linux-node2

Proceed? [n/Y] y

Key for minion linux-node1 accepted.

Key for minion linux-node2 accepted.

5.5saltstack自动扩容

zabbix监控--->Action---->创建一台虚拟机/Docker容器---->部署服务---->部署代码---->测试状态----->加入集群--->加入监控--->通知

基于域名下载etcd

https://github.com/coreos/etcd/releases/download/v2.2.1/etcd-v2.2.1-linux-amd64.tar.gz

rz etcd-v2.2.1-linux-amd64.tar.gz (2进制包)

[[email protected] src]# cd etcd-v2.0.5-linux-amd64

[[email protected] etcd-v2.0.5-linux-amd64]# cp etcd etcdctl  /usr/local/bin/

[[email protected] etcd-v2.0.5-linux-amd64] . /etcd &

或者这样启动

nohub etcd --name auto_scale --data-dir /data/etcd/ \

--listen-peer-urls ‘http://10,0,0,7:2380,http://10.0.0.7:7001‘ \

--listen-client-urls ‘http://10.0.0.7:2379,http://10.0.0.7:4001‘ \

--adevertise-client-urls ‘http://10.0.0.7:2379,http://10.0.0.7:4001‘ &

设置key的值

[[email protected] wal]# curl -s http://localhost:2379/v2/keys/message -XPUT -d value="Hello world" | python -m json.tool

{

"action": "set",

"node": {

"createdIndex": 8,

"key": "/message",

"modifiedIndex": 8,

"value": "Hello world"

},

"prevNode": {

"createdIndex": 7,

"key": "/message",

"modifiedIndex": 7,

"value": "Hello world"

}

}

获取key的值

[[email protected] wal]# curl -s http://localhost:2379/v2/keys/message |python -m json.tool          {

"action": "get",

"node": {

"createdIndex": 8,

"key": "/message",

"modifiedIndex": 8,

"value": "Hello world"

}

}

删除key

[[email protected] wal]# curl -s http://localhost:2379/v2/keys/message -XDELETE |python -m json.tool

{

"action": "delete",

"node": {

"createdIndex": 8,

"key": "/message",

"modifiedIndex": 9

},

"prevNode": {

"createdIndex": 8,

"key": "/message",

"modifiedIndex": 8,

"value": "Hello world"

}

}

删除key以后再次获取key not found

[[email protected] wal]# curl -s http://localhost:2379/v2/keys/message |python -m json.tool          {

"cause": "/message",

"errorCode": 100,

"index": 9,

"message": "Key not found"

}

设置key 有效时间5秒 5秒后过期  "message": "Key not found"

[[email protected] wal]# curl -s http://localhost:2379/v2/keys/ttl_use -XPUT -d valu=="Hello world" |"Hello world 1" -d ttl=5 |python -m json.tool

{

"action": "set",

"node": {

"createdIndex": 10,

"expiration": "2017-11-17T12:59:41.572099187Z",

"key": "/ttl_use",

"modifiedIndex": 10,

"ttl": 5,

"value": ""

}

}

[[email protected] ~]# vim /etc/salt/master  #行尾添加

etcd_pillar_config:

etcd.host: 10.0.0.7

etcd.port: 4001

ext_pillar:

- etcd: etcd_pillar_config root=/salt/haproxy/

[[email protected] ~]# /etc/init.d/salt-master restart

[[email protected] ~]# curl -s http://localhost:2379/v2/keys/salt/haproxy/backend_www_oldboyedu_com/web-node1 -XPUT -d value="10.0.0.7:8080" | python -m json.tool

{

"action": "set",

"node": {

"createdIndex": 10,

"key": "/salt/haproxy/backend_www_oldboyedu_com/web-node1", #添加一个web-node1的节点

"modifiedIndex": 10,

"value": "10.0.0.7:8080"

}

}

[[email protected] ~]#pip install python-etcd

[[email protected] etcd-v2.2.1-linux-amd64]# salt ‘*‘ pillar.items

linux-node2:

----------

backend_www_oldboyedu_com:

----------

web-node1:

10.0.0.7:8080

zabbix-agent:

----------

Zabbix_Server:

10.0.0.7

linux-node1:

----------

backend_www_oldboyedu_com:

----------

web-node1:

10.0.0.7:8080

zabbix-agent:

----------

Zabbix_Server:

10.0.0.7

[[email protected] ~]# vi /srv/salt/prod/cluster/files/haproxy-outside.cfg  #行尾添加

{% for web,web_ip in pillar.backend_www_oldboyedu_com.iteritems() -%}

server {{ web }} {{ web_ip }} check inter 2000 rise 30 fall 15

{% endfor %}

vim /srv/salt/prod/cluster/haproxy-outside.sls

- template: jinja

重启master

执行状态 salt ‘*‘ statehighstate

时间: 2024-07-28 16:54:37

超详细saltstack安装部署及应用的相关文章

saltstack安装部署以及简单实用

一,saltstack简介:     SaltStack是一种新的基础设施管理方法开发软件,简单易部署,可伸缩的足以管理成千上万的服务器,和足够快的速度控制,与他们交流,以毫秒为单位. SaltStack提供了一个动态基础设施通信总线用于编排,远程执行.配置管理等等.SaltStack基于python开发,项目于2011年启动,年增长速度较快,五年期 固定基础设施编制和配置管理的开源项目.SaltStack社区致力于保持盐项目集中.友好.健康.开放. (网上摘抄的,说白了saltStack就是一

saltstack安装部署与入门使用

一.saltstack简介 SaltStack 一种基于 C/S 架构的服务器基础架构集中化管理平台,管理端称为 Master,客户端称为 Minion.SaltStack 具备配置管理.远程执行.监控等功能,一般可以理解为是简化版的 Puppet 和加强版的 Func.SaltStack 本身是基于 Python 语言开发实现,结合了轻量级的消息队列软件 ZeroMQ 与 Python 第三方模块(Pyzmq.PyCrypto.Pyjinjia2.python-msgpack 和 PyYAML

自动化运维工具---SaltStack安装部署及简单案例

SaltStack原理 SaltStack由Master(服务端)和Minion(客户端)组成,Master和Minion之间通过ZeroMQ(消息队列)进行通讯,Master和Minion分别监听4505与4506端口,4505为master与minion认证通信端口,4506为master用来发送或者接受minion的命令执行返回信息. 当客户端启动后,会主动链接master端注册,然后一直保持该TCP连接,而master通过这条TCP连接对客户端进行控制,如果连接断开,master将对客户

saltstack 安装部署

官方源:https://repo.saltstack.com/yum/redhat/    saltstack yum源 系统环境 # cat /etc/redhat-release CentOS Linux release 7.2.1511 (Core) Saltstack安装版本 # salt -V Salt Version: Salt: 2017.7.2 Dependency Versions: cffi: 0.8.6 cherrypy: Not Installed dateutil: 1

集中化管理平台Saltstack安装部署

Saltstack一般认为是puppet的简化版和func的加强版 安装: 部署环境: 角色 hostname ip 操作系统 python版本 master master 192.168.224.141 CentOS release 6.4 (Final) Python 2.6.6 minion001 minion001 192.168.224.142 CentOS release 6.4 (Final) Python 2.6.6 minion002 minion002 192.168.224

Saltstack 安装部署和模块使用

Saltstack概念 Saltstack 比 Puppet 出来晚几年,是基于Python 开发的,也是基于 C/S 架构,服务端 master 和客户端 minions :Saltstack 和 Puppet 很像,可以说 Saltstatck 整合了 Puppet 和 Chef 的功能,更加强大,更适合大规模批量管理服务器,并且它比 Puppet 更容易配置. salt特点: 并行,管理的工具,Python开发的.可二次开发. salt远程执行 salt配置管理 salt云管理(阿里云升级

calamari + ceph + saltstack 安装部署

准备工作 1.基础环境 ceph-admin ceph-mon  为同一台服务器 ceph-osd1 为一台服务器 ceph-osd2 为另一台服务器 2.关闭防火墙 # systemctl stop firewalld.service # systemctl disable firewalld.service 3.关闭Selinux # sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config # setenforce

SaltStack安装部署

环境: 10.1.21.225(master) 10.1.20.85(minion) 安装: 一. master: 1. 配置yum源安装 1 # rpm --import https://repo.saltstack.com/yum/redhat/6/x86_64/latest/SALTSTACK-GPG-KEY.pub #载入yum认证文件 2 # cd /etc/yum.repos.d/ 3 # vim saltstack.repo 4 [saltstack-repo] 5 name=Sa

Saltstack安装部署(CentOS 6.7)

系统版本: CentOS6.7 X86_64 部署规划: 序号 主机名 IP 角色 1 linux-node1.example.com 10.0.0.7 salt-master.salt-minion 2 linux-node2.example.com 10.0.0.8 salt-minion linux-node1.example.com: 安装salt-master salt-minion yum install salt-master salt-minion -y 启动salt-maste