ã„ã¤ã‚‚ãŠä¸–話ã«ãªã£ã¦ãŠã‚Šã¾ã™ã€ç¬¹ã¨ç”³ã—ã¾ã™ã€‚
下記Pacemaker稼åƒç’°å¢ƒã«ã¦ã€ãƒ›ã‚¹ãƒˆåã®å¤‰æ›´ã‚’実施ã™ã‚‹å¿…è¦ãŒã‚ã‚Š
評価環境ã«ã¦è©•ä¾¡ã‚’実施ã—ã¾ã—ãŸã€‚
・OS
 CentOS6.5
・SW
 pacemaker-1.1.12-1.el6.x86_64
 corosync-2.3.4-1.el6.x86_64
 drbd84-utils-8.9.2-1.el6.elrepo.x86_64
 kmod-drbd84-8.4.5-1.el6.elrepo.x86_64
æ‰‹é †ã¨ã—ã¾ã—ã¦ã¯
â‘ Pacemakerã®ã‚µãƒ¼ãƒ“スåœæ¢
②ホストå変更
â‘¢drbd.resã®onパラメータã®ãƒ›ã‚¹ãƒˆåを変更
â‘£OSå†èµ·å‹•
⑤Pacemaker起動
ä¸Šè¨˜æ‰‹é †ã§å®Ÿæ–½ã—ã€Pacemakerã€Corosyncã€DRBDã®æ£å¸¸èµ·å‹•ã‚’
確èªã—ã€crm configure showã«ã¦nodeパラメータã®ãƒ›ã‚¹ãƒˆåãŒæ–°ãƒ›ã‚¹ãƒˆåã«
変更ã•ã‚Œã¦ã„ã‚‹ã“ã¨ã‚‚確èªã—ã¦ãŠã‚Šã¾ã™ã€‚
Pacemaker環境ã«ã¦ãƒ›ã‚¹ãƒˆåを変更ã™ã‚‹éš›ã€ä»–ã«å¤‰æ›´ã™ã¹ãパラメータ
è¨å®šãƒ•ã‚¡ã‚¤ãƒ«ã€ç•™æ„ã™ã¹ã点ç‰ã”ã–ã„ã¾ã—ãŸã‚‰ã€ã”指摘ã„ãŸã ã‘ã‚‹ã¨å¹¸ã„ã§ã™ã€‚
ã”å‚考ã¾ã§ã«ã€crmã®è¨å®šã€corosync.confã€drbd.resを記載致ã—ã¾ã™ã€‚
â—crm
node 1: test-node1
node 2: test-node2
primitive r0_drbd ocf:linbit:drbd \
meta migration-threshold=3 \
params drbd_resource=r0 \
op start interval=0s timeout=240s on-fail=restart \
op stop interval=0s timeout=240s on-fail=block \
op monitor interval=45s role=Master timeout=240s on-fail=restart \
op monitor interval=46s role=Slave timeout=240s on-fail=restart
primitive r0_fs Filesystem \
meta migration-threshold=2 \
params device="/dev/drbd0" directory="/opt/test_vol1" fstype=ext4 \
op start interval=0s timeout=60s on-fail=restart \
op stop interval=0s timeout=60s on-fail=block \
op monitor interval=45s timeout=60s on-fail=restart
primitive ping ocf:pacemaker:ping \
meta migration-threshold=2 \
params host_list=10.18.49.60 multiplier=100 dampen=0 \
op start interval=0s timeout=60s on-fail=restart \
op monitor interval=45s timeout=60s on-fail=restart \
op stop interval=0s timeout=60s on-fail=block
primitive ldap_drbd ocf:linbit:drbd \
meta migration-threshold=2 \
params drbd_resource=r1 \
op start interval=0s timeout=240s on-fail=restart \
op stop interval=0s timeout=240s on-fail=block \
op monitor interval=45s role=Master timeout=240s on-fail=restart \
op monitor interval=46s role=Slave timeout=240s on-fail=restart
primitive r1_fs Filesystem \
meta migration-threshold=2 \
params device="/dev/drbd1" directory="/opt/test_vol2" fstype=ext4 \
op start interval=0s timeout=60s on-fail=restart \
op stop interval=0s timeout=60s on-fail=block \
op monitor interval=45s timeout=60s on-fail=restart
primitive vip IPaddr2 \
meta migration-threshold=2 \
params ip=10.18.49.38 nic=eth0 cidr_netmask=26 iflabel=0 \
op start interval=0s timeout=60s on-fail=restart \
op stop interval=0s timeout=60s on-fail=block \
op monitor interval=45s timeout=60s on-fail=restart
primitive vip_check VIPcheck \
meta migration-threshold=2 \
params target_ip=10.18.49.38 count=3 wait=5 \
op start interval=0s timeout=60s on-fail=restart \
op stop interval=0s timeout=60s on-fail=ignore \
op monitor interval=45s timeout=60s on-fail=restart
group test_grp vip_check r0_fs r1_fs vip
ms r0_ms r0_drbd \
meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
ms r1_ms ldap_drbd \
meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
clone ping_cln ping \
meta clone-max=2 clone-node-max=1 target-role=Started
colocation col_drbd1 inf: test_grp r0_ms:Master
colocation col_drbd2 inf: test_grp r1_ms:Master
colocation col_ping inf: test_grp ping_cln
order drbd_odr inf: r0_ms:promote r1_ms:start
order mail_odr inf: r1_ms:promote test_grp:start
property cib-bootstrap-options: \
dc-version=1.1.12-561c4cf \
cluster-infrastructure=corosync \
stonith-enabled=false \
no-quorum-policy=ignore \
pe-input-series-max=100 \
pe-error-series-max=100 \
pe-warn-series-max=100 \
maintenance-mode=false
rsc_defaults rsc-options: \
resource-stickiness=INFINITY
â—corosync.conf
# Please read the corosync.conf.5 manual page
totem {
version: 2
token: 1000
crypto_cipher: none
crypto_hash: none
rrp_mode: active
interface {
ringnumber: 0
bindnetaddr: 10.18.49.0
mcastport: 5405
ttl: 1
}
interface {
ringnumber: 1
bindnetaddr: 192.168.10.0
mcastport: 5407
ttl: 1
}
transport: udpu
}
logging {
fileline: off
to_logfile: yes
to_syslog: no
logfile: /var/log/cluster/corosync.log
debug: off
timestamp: on
logger_subsys {
subsys: QUORUM
debug: off
}
}
nodelist {
node {
ring0_addr: 10.18.49.36
ring1_addr: 192.168.10.21
nodeid: 1
}
node {
ring0_addr: 10.18.49.37
ring1_addr: 192.168.10.22
nodeid: 2
}
}
quorum {
# Enable and configure quorum subsystem (default: off)
# see also corosync.conf.5 and votequorum.5
provider: corosync_votequorum
expected_votes: 2
two_node: 1
}
â—drbd.res
resource r0 {
protocol C;
disk {
on-io-error detach;
}
syncer {
rate 30M;
verify-alg sha1;
}
on test-node1 {
device /dev/drbd0;
disk /dev/sdc;
address 172.21.50.10:7789;
meta-disk internal;
}
on test-node2 {
device /dev/drbd0;
disk /dev/sdc;
address 172.21.50.11:7789;
meta-disk internal;
}
}
resource r1 {
protocol C;
disk {
on-io-error detach;
}
syncer {
rate 30M;
verify-alg sha1;
}
on test-node1 {
device /dev/drbd1;
disk /dev/sdd;
address 172.21.50.10:7790;
meta-disk internal;
}
on test-node2 {
device /dev/drbd1;
disk /dev/sdd;
address 172.21.50.11:7790;
meta-disk internal;
}
}
_______________________________________________
Linux-ha-japan mailing list
Linux-ha-japan@lists.osdn.me
http://lists.osdn.me/mailman/listinfo/linux-ha-japan
下記Pacemaker稼åƒç’°å¢ƒã«ã¦ã€ãƒ›ã‚¹ãƒˆåã®å¤‰æ›´ã‚’実施ã™ã‚‹å¿…è¦ãŒã‚ã‚Š
評価環境ã«ã¦è©•ä¾¡ã‚’実施ã—ã¾ã—ãŸã€‚
・OS
 CentOS6.5
・SW
 pacemaker-1.1.12-1.el6.x86_64
 corosync-2.3.4-1.el6.x86_64
 drbd84-utils-8.9.2-1.el6.elrepo.x86_64
 kmod-drbd84-8.4.5-1.el6.elrepo.x86_64
æ‰‹é †ã¨ã—ã¾ã—ã¦ã¯
â‘ Pacemakerã®ã‚µãƒ¼ãƒ“スåœæ¢
②ホストå変更
â‘¢drbd.resã®onパラメータã®ãƒ›ã‚¹ãƒˆåを変更
â‘£OSå†èµ·å‹•
⑤Pacemaker起動
ä¸Šè¨˜æ‰‹é †ã§å®Ÿæ–½ã—ã€Pacemakerã€Corosyncã€DRBDã®æ£å¸¸èµ·å‹•ã‚’
確èªã—ã€crm configure showã«ã¦nodeパラメータã®ãƒ›ã‚¹ãƒˆåãŒæ–°ãƒ›ã‚¹ãƒˆåã«
変更ã•ã‚Œã¦ã„ã‚‹ã“ã¨ã‚‚確èªã—ã¦ãŠã‚Šã¾ã™ã€‚
Pacemaker環境ã«ã¦ãƒ›ã‚¹ãƒˆåを変更ã™ã‚‹éš›ã€ä»–ã«å¤‰æ›´ã™ã¹ãパラメータ
è¨å®šãƒ•ã‚¡ã‚¤ãƒ«ã€ç•™æ„ã™ã¹ã点ç‰ã”ã–ã„ã¾ã—ãŸã‚‰ã€ã”指摘ã„ãŸã ã‘ã‚‹ã¨å¹¸ã„ã§ã™ã€‚
ã”å‚考ã¾ã§ã«ã€crmã®è¨å®šã€corosync.confã€drbd.resを記載致ã—ã¾ã™ã€‚
â—crm
node 1: test-node1
node 2: test-node2
primitive r0_drbd ocf:linbit:drbd \
meta migration-threshold=3 \
params drbd_resource=r0 \
op start interval=0s timeout=240s on-fail=restart \
op stop interval=0s timeout=240s on-fail=block \
op monitor interval=45s role=Master timeout=240s on-fail=restart \
op monitor interval=46s role=Slave timeout=240s on-fail=restart
primitive r0_fs Filesystem \
meta migration-threshold=2 \
params device="/dev/drbd0" directory="/opt/test_vol1" fstype=ext4 \
op start interval=0s timeout=60s on-fail=restart \
op stop interval=0s timeout=60s on-fail=block \
op monitor interval=45s timeout=60s on-fail=restart
primitive ping ocf:pacemaker:ping \
meta migration-threshold=2 \
params host_list=10.18.49.60 multiplier=100 dampen=0 \
op start interval=0s timeout=60s on-fail=restart \
op monitor interval=45s timeout=60s on-fail=restart \
op stop interval=0s timeout=60s on-fail=block
primitive ldap_drbd ocf:linbit:drbd \
meta migration-threshold=2 \
params drbd_resource=r1 \
op start interval=0s timeout=240s on-fail=restart \
op stop interval=0s timeout=240s on-fail=block \
op monitor interval=45s role=Master timeout=240s on-fail=restart \
op monitor interval=46s role=Slave timeout=240s on-fail=restart
primitive r1_fs Filesystem \
meta migration-threshold=2 \
params device="/dev/drbd1" directory="/opt/test_vol2" fstype=ext4 \
op start interval=0s timeout=60s on-fail=restart \
op stop interval=0s timeout=60s on-fail=block \
op monitor interval=45s timeout=60s on-fail=restart
primitive vip IPaddr2 \
meta migration-threshold=2 \
params ip=10.18.49.38 nic=eth0 cidr_netmask=26 iflabel=0 \
op start interval=0s timeout=60s on-fail=restart \
op stop interval=0s timeout=60s on-fail=block \
op monitor interval=45s timeout=60s on-fail=restart
primitive vip_check VIPcheck \
meta migration-threshold=2 \
params target_ip=10.18.49.38 count=3 wait=5 \
op start interval=0s timeout=60s on-fail=restart \
op stop interval=0s timeout=60s on-fail=ignore \
op monitor interval=45s timeout=60s on-fail=restart
group test_grp vip_check r0_fs r1_fs vip
ms r0_ms r0_drbd \
meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
ms r1_ms ldap_drbd \
meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
clone ping_cln ping \
meta clone-max=2 clone-node-max=1 target-role=Started
colocation col_drbd1 inf: test_grp r0_ms:Master
colocation col_drbd2 inf: test_grp r1_ms:Master
colocation col_ping inf: test_grp ping_cln
order drbd_odr inf: r0_ms:promote r1_ms:start
order mail_odr inf: r1_ms:promote test_grp:start
property cib-bootstrap-options: \
dc-version=1.1.12-561c4cf \
cluster-infrastructure=corosync \
stonith-enabled=false \
no-quorum-policy=ignore \
pe-input-series-max=100 \
pe-error-series-max=100 \
pe-warn-series-max=100 \
maintenance-mode=false
rsc_defaults rsc-options: \
resource-stickiness=INFINITY
â—corosync.conf
# Please read the corosync.conf.5 manual page
totem {
version: 2
token: 1000
crypto_cipher: none
crypto_hash: none
rrp_mode: active
interface {
ringnumber: 0
bindnetaddr: 10.18.49.0
mcastport: 5405
ttl: 1
}
interface {
ringnumber: 1
bindnetaddr: 192.168.10.0
mcastport: 5407
ttl: 1
}
transport: udpu
}
logging {
fileline: off
to_logfile: yes
to_syslog: no
logfile: /var/log/cluster/corosync.log
debug: off
timestamp: on
logger_subsys {
subsys: QUORUM
debug: off
}
}
nodelist {
node {
ring0_addr: 10.18.49.36
ring1_addr: 192.168.10.21
nodeid: 1
}
node {
ring0_addr: 10.18.49.37
ring1_addr: 192.168.10.22
nodeid: 2
}
}
quorum {
# Enable and configure quorum subsystem (default: off)
# see also corosync.conf.5 and votequorum.5
provider: corosync_votequorum
expected_votes: 2
two_node: 1
}
â—drbd.res
resource r0 {
protocol C;
disk {
on-io-error detach;
}
syncer {
rate 30M;
verify-alg sha1;
}
on test-node1 {
device /dev/drbd0;
disk /dev/sdc;
address 172.21.50.10:7789;
meta-disk internal;
}
on test-node2 {
device /dev/drbd0;
disk /dev/sdc;
address 172.21.50.11:7789;
meta-disk internal;
}
}
resource r1 {
protocol C;
disk {
on-io-error detach;
}
syncer {
rate 30M;
verify-alg sha1;
}
on test-node1 {
device /dev/drbd1;
disk /dev/sdd;
address 172.21.50.10:7790;
meta-disk internal;
}
on test-node2 {
device /dev/drbd1;
disk /dev/sdd;
address 172.21.50.11:7790;
meta-disk internal;
}
}
_______________________________________________
Linux-ha-japan mailing list
Linux-ha-japan@lists.osdn.me
http://lists.osdn.me/mailman/listinfo/linux-ha-japan