1. 分布式块设备DRBD部署
1.1 基础环境初始化
1.2 部署DRBD环境
主备服务器都需要执行
# 更新系统内核,并重启服务器
[[email protected] ~]# yum install kernel-devel kernel -y
[[email protected] ~]# reboot
# 开始安装DRBD
[[email protected] software]# tar zxf drbd-9.0.18-1.tar.gz
[[email protected] drbd-9.0.18-1]# make KDIR=/usr/src/kernels/3.10.0-957.21.3.el7.x86_64/
[[email protected] drbd-9.0.18-1]# make install
[[email protected] software]# yum install drbd90-utils-9.6.0-1.el7.elrepo.x86_64.rpm -y
[[email protected] software]# yum install drbd90-utils-sysvinit-9.6.0-1.el7.elrepo.x86_64.rpm -y
1.3 创建drbd资源文件
[[email protected] software]# vim /etc/drbd.d/data.res
resource data {
on node1 { # on 主机名
device /dev/drbd0; # 映射的drbd磁盘,可默认,本教程默认/dev/sdb1
disk /dev/sdb1; # 设置后面存放数据的drbd磁盘
address 192.168.10.30:7789;
meta-disk internal;
}
on node2 {
device /dev/drbd0;
disk /dev/sdb1;
address 192.168.10.40:7789;
meta-disk internal;
}
}
1.4 修改drbd全局和通用配置文件
[[email protected] ~]# vim /etc/drbd.d/global_common.conf
global {
usage-count yes;
}
common {
handlers {
}
startup {
}
options {
# 当块设备被挂在时自动提升为primary,被卸载时候自动降级为secondary
auto-promote yes;
}
disk {
}
net {
# 使用协议C,保证实时同步
protocol C;
}
}
1.5 启动drbd服务
[[email protected] ~]# systemctl start drbd
1.6 创建drbd磁盘分区
# 如果用于drbd存储分区的磁盘之前已经存在文件系统例如执行过mkfs.xfs /dev/sdb1操作的话,在创建drbd metadata时候会报错此时需要破坏原先的文件系统,执行如下命令
[[email protected] ~]# dd if=/dev/zero of=/dev/sdb1 bs=1M count=100
# 创建metadata
[[email protected] ~]# drbdadm create-md data
--== Thank you for participating in the global usage survey ==--
The server‘s response is:
initializing activity log
initializing bitmap (320 KB) to all zero
Writing meta data...
New drbd meta data block successfully created.
# 查看此时drbd运行状态,node1和node2均为secondary,且Inconsistent数据处于未同步状态
[[email protected] ~]# drbdadm status data
data role:Secondary
disk:Inconsistent
node2 role:Secondary
peer-disk:Inconsistent
1.7 设置drbd主节点并格式化文件系统
# 第一次需要时手动设置一个主节点,后续可以通过mount自动切换主备节点
[[email protected] ~]# drbdadm primary --force data
# 创建文件系统
[[email protected] ~]# mkfs.xfs /dev/drbd1
meta-data=/dev/drbd1 isize=512 agcount=4, agsize=655210 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=2620839, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none
# 挂载到本地目录
[[email protected] ~]# mkdir /mydata
[[email protected] ~]# mount /dev/drbd1 /mydata/
[[email protected] ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/centos-root 8.0G 1.5G 6.6G 18% /
devtmpfs 475M 0 475M 0% /dev
tmpfs 487M 0 487M 0% /dev/shm
tmpfs 487M 7.6M 479M 2% /run
tmpfs 487M 0 487M 0% /sys/fs/cgroup
/dev/sda1 1014M 156M 859M 16% /boot
tmpfs 98M 0 98M 0% /run/user/0
/dev/drbd1 10G 33M 10G 1% /mydata
[[email protected] ~]# drbdadm status data
data role:Primary
disk:UpToDate
node2 role:Secondary
peer-disk:UpToDate
1.8 主备切换测试
# /mydata/下写入文件后,主机卸载磁盘,备机挂载磁盘,备机上观察文件是否同步过来
[[email protected] ~]# umount /mydata/
[[email protected] ~]# drbdadm secondary data
[[email protected] ~]# drbdadm status data
data role:Secondary
disk:UpToDate
node2 role:Secondary
peer-disk:UpToDate
[[email protected] ~]# mkdir /mydata
[[email protected] ~]# mount /dev/drbd1 /mydata/
[[email protected] ~]# drbdadm status data
data role:Primary
disk:UpToDate
node1 role:Secondary
peer-disk:UpToDate
[[email protected] ~]# ls /mydata/
a b c
2. 高可用组件pacemaker+corosync部署
2.1 安装相关组件
# 主备机添加crm管理工具yum源
[[email protected] ~]# vim /etc/yum.repos.d/crmsh.repo
[network_ha-clustering_Stable]
name=Stable High Availability/Clustering packages (CentOS_CentOS-7)
type=rpm-md
baseurl=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/
gpgcheck=1
gpgkey=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/repodata/repomd.xml.key
enabled=1
# 安装crmsh管理工具
[[email protected] ~]# yum install crmsh pacemaker corosync
2.2 配置corosync
[[email protected] ~]# cd /etc/corosync/
[[email protected] corosync]# cp corosync.conf.example corosync.conf
[[email protected] corosync]# vim corosync.conf
totem {
version: 2
crypto_cipher: aes256
crypto_hash: sha1
interface {
ringnumber: 0
bindnetaddr: 192.168.10.30
mcastaddr: 239.255.1.1
mcastport: 5405
ttl: 1
}
}
logging {
fileline: off
to_stderr: no
to_logfile: yes
logfile: /var/log/cluster/corosync.log
to_syslog: yes
debug: off
timestamp: on
logger_subsys {
subsys: QUORUM
debug: off
}
}
quorum {
provider: corosync_votequorum
}
nodelist {
node {
ring0_addr: node1
nodeid: 1
}
node {
ring0_addr: node2
nodeid: 2
}
}
# 生成corosync密钥
[[email protected] corosync]# corosync-keygen
[[email protected] corosync]# scp authkey [email protected]:/etc/corosync/
[[email protected] corosync]# scp corosync.conf [email protected]:/etc/corosync/
# 启动corosync和pacemaker
[[email protected] ~]# systemctl start corosync
[[email protected] ~]# systemctl start pacemaker
# 查看集群状态
[[email protected] corosync]# crm_mon
Stack: corosync
Current DC: node1 (version 1.1.19-8.el7_6.4-c3c624ea3d) - partition with quorum
Last updated: Fri Jul 5 21:48:22 2019
Last change: Fri Jul 5 21:45:52 2019 by hacluster via crmd on node1
2 nodes configured
0 resources configured
Online: [ node1 node2 ]
No active resources
2.3 关闭stonish设备
[[email protected] ~]# crm
crm(live)# configure
crm(live)configure# show
node 1: node1
node 2: node2
property cib-bootstrap-options: have-watchdog=false dc-version=1.1.19-8.el7_6.4-c3c624ea3d cluster-infrastructure=corosync
crm(live)configure# property stonith-enabled=false
crm(live)configure# property no-quorum-policy=ignore
crm(live)configure# property start-failure-is-fatal=false
crm(live)configure# property default-action-timeout=180s
crm(live)configure# rsc_defaults resource-stickiness=100
crm(live)configure# verify
crm(live)configure# commit
crm(live)configure# show
node 1: node1
node 2: node2
property cib-bootstrap-options: have-watchdog=false dc-version=1.1.19-8.el7_6.4-c3c624ea3d cluster-infrastructure=corosync stonith-enabled=false no-quorum-policy=ignore start-failure-is-fatal=false default-action-timeout=180s
rsc_defaults rsc-options: resource-stickiness=100
2.4 添加Virtual IP资源
crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=192.168.10.50 op monitor 30s
crm(live)configure# commit
2.4 添加DRBD磁盘资源
添加drbd磁盘开机自启动,因为集群只是接管drbd的挂载,而不是启动drbd,所有drbd状态必须都是secondary
crm(live)configure# primitive drbd ocf:heartbeat:Filesystem params device=/dev/drbd1 directory=/mydata fstype=xfs
crm(live)configure# commit
2.5 绑定VIP和DRBD资源,并设置先启动VIP后启动DRBD
利用group命令进行分组和排序启动
crm(live)configure# group vip_drbd vip drbd
crm(live)configure# commit
原文地址:https://blog.51cto.com/11267188/2417624
时间: 2024-11-06 09:39:04