Hello Alan, et al.
I've been playing with Software RAID in a high availability setup and
thought I would contribute this example RAID script back to the
heartbeat project. I'm currently working with kimberlite,
(http://oss.missioncriticallinux.com) but I mocked up this script as a
proof of concept.
I've got some STONITH hardware that works with DB-9 serial ports
coming in "Real Soon Now" they tell me that I will probably contribute
patches for as well:
Western Telematics RPC10 (do you support that already? http://www.wti.com/)
Nightware RPC100S (http://www.nightware.com/)
An 'EMB port' on an industrial PC platform (don't have a URL)
-Eric.
#!/bin/bash
#
# Example Linux RAID high availability RAID resource script
#
# Author: Eric Z. Ayers <eric.ayers@compgen.com>
#
# License: Copyright 2000, Computer Generation Incorporated.
# You are free to use and modify this file. You are also free
# to redistribute this file and any changes you make.
#
# You are encouraged to submit modifications/updates to
# linux-ha-dev@lists.tummy.com
#
# This script is an example start/stop script for a
# high availability RAID resource.
#
# Note, I hardcoded config variables at the top of the script. It might
# be more "elegant" to pass these in as parameters, but that was too much
# like hard work for me.
#
# This script assumes you are running linux kernel 2.2.X with
# the RAID MD driver version 0.90 or better for help with linux Software
# RAID, see the Software RAID howto or try the Linux RAID mailing list:
# linux-raid@vger.kernel.org
#
# You might need to pass the command line parameter: raid=noautodetect
# in an HA environment so that the kernel doesn't automatically start
# up your raid partitions when you boot the node. This means that it isn't
# going to work to use RAID for the system disks and the shared disks.
#
# 0) partition the disks to use for RAID
# 1) Create /etc/raidtab.md? on both systems (see example file below)
# 2) Initialize your raid partition with
# /sbin/mkraid --configfile /etc/raidtab.md? /dev/md?
# 3) Format your filesystem
# mke2fs /dev/md0
# 3) Create the mount point on both systems.
# DO NOT add your raid filesystem to /etc/fstab
# 4) copy this script (to /etc/rc.d/init.d if you wish) and edit it to
# reflect your desired settings.
# 5) Modify the heartbeat 'haresources' setup file
# 6) unmount the filesystem and stop the raid device with 'raidstop'
# 7) fire up heartbeat!
#
#
# EXAMPLE config file /etc/raidtab.md0
# This file must exist on both machines!
#
# raiddev /dev/md0
# raid-level 1
# nr-raid-disks 2
# chunk-size 64k
# persistent-superblock 1
# #nr-spare-disks 0
# device /dev/sda1
# raid-disk 0
# device /dev/sdb1
# raid-disk 1
#
MD=md0
MOUNTPOINT=/data1 # where the filesystem should be mounted
SCSI_MODULE=0 # set to 1 if there is a SCSI module to be loaded
RAID0_MODULE=0 # set to 1 if there is a RAID0 module to be loaded
RAID1_MODULE=1 # set to 1 if there is a RAID1 module to be loaded
MDDEV=/dev/$MD # Device name for RAID block device
# Utilities used by this script
MODPROBE=/sbin/modprobe
FSCK=/sbin/fsck
FUSER=/sbin/fuser
RAIDSTART=/sbin/raidstart
MOUNT=/bin/mount
UMOUNT=/bin/umount
RAIDSTOP=/sbin/raidstop
function check_util {
if [ ! -x "$1" ] ; then
echo "ERROR: setup problem: Couldn't find utility $1"
exit 1
fi
}
check_util $MODPROBE
check_util $FSCK
check_util $FUSER
check_util $RAIDSTART
check_util $MOUNT
check_util $UMOUNT
check_util $RAIDSTOP
# Look for the 'start' or 'stop' argument
case "$1" in
#
# Start up the RAID device and mount the filesystem
#
start)
$MOUNT | grep -e "^$MDDEV" >/dev/null
if [ $? -ne 1 ] ; then
echo "ERROR: Device $MDDEV is already mounted!"
exit 1;
fi
# Insert SCSI module
if [ "$SCSI_MODULE" -gt 0 ] ; then
$MODPROBE scsi_hostadapter
if [ $? -ne 0 ] ; then
echo "ERROR: Couldn't insert SCSI module."
exit 1;
fi
fi
# Insert raid personality modules
if [ "$RAID0_MODULE" -gt 0 ] ; then
$MODPROBE raid0
if [ $? -ne 0 ] ; then
echo "ERROR: Couldn't insert RAID0 module"
exit 1
fi
fi
if [ "$RAID1_MODULE" -gt 0 ] ; then
$MODPROBE raid1
if [ $? -ne 0 ] ; then
echo "ERROR: Couldn't insert RAID1 module"
exit 1
fi
fi
# Run raidstart to start up the RAID array
$RAIDSTART --configfile /etc/raidtab.$MD $MDDEV
if [ $? -ne 0 ] ; then
echo "ERROR: Couldn't start RAID for $MDDEV"
exit 1
fi
# Check the filesystem & auto repair
$FSCK -a $MDDEV
# NOTE: if any errors at all are detected, it returns non-zero
# if the error is >4 then there is a big problem
if [ $? -ge 4 ] ; then
echo "ERROR: Couldn't sucessfully fsck filesystem for $MDDEV"
exit 1
fi
# Mount the filesystem
$MOUNT $MDDEV $MOUNTPOINT
if [ $? -ne 0 ] ; then
echo "ERROR: Couldn't mount filesystem for $MDDEV on $MOUNTPOINT"
exit 1
fi
;;
stop)
# See if the MD device is mounted
$MOUNT | grep -e "^$MDDEV" >/dev/null
if [ $? -ne 1 ] ; then
# Kill all processes open on filesystem
$FUSER -mk $MOUNTPOINT
# the return from fuser doesn't tell us much
#if [ $? -ne 0 ] ; then
# echo "ERROR: Couldn't kill processes on $MOUNTPOINT"
# exit 1;
#fi
# Unmount the filesystem
$UMOUNT $MDDEV
if [ $? -ne 0 ] ; then
echo "ERROR: Couldn't unmount filesystem for $MDDEV"
exit 1
fi
else
echo "WARNING: Filesystem $MOUNTPOINT not mounted?"
fi
# Turn off raid
$RAIDSTOP --configfile /etc/raidtab.$MD $MDDEV
if [ $? -ne 0 ] ; then
echo "ERROR: Couldn't stop RAID for $MDDEV"
exit 1
fi
;;
*)
echo "This script should be run with the argument 'start' or 'stop'"
exit 1
;;
esac
# If you got to this point, chances are everything is O.K.
exit 0;
# Revision History
# $Log: ha_raid1.sh,v $
# Revision 1.1 2000/08/24 16:25:09 eric
# Initial revision
#
#
I've been playing with Software RAID in a high availability setup and
thought I would contribute this example RAID script back to the
heartbeat project. I'm currently working with kimberlite,
(http://oss.missioncriticallinux.com) but I mocked up this script as a
proof of concept.
I've got some STONITH hardware that works with DB-9 serial ports
coming in "Real Soon Now" they tell me that I will probably contribute
patches for as well:
Western Telematics RPC10 (do you support that already? http://www.wti.com/)
Nightware RPC100S (http://www.nightware.com/)
An 'EMB port' on an industrial PC platform (don't have a URL)
-Eric.
#!/bin/bash
#
# Example Linux RAID high availability RAID resource script
#
# Author: Eric Z. Ayers <eric.ayers@compgen.com>
#
# License: Copyright 2000, Computer Generation Incorporated.
# You are free to use and modify this file. You are also free
# to redistribute this file and any changes you make.
#
# You are encouraged to submit modifications/updates to
# linux-ha-dev@lists.tummy.com
#
# This script is an example start/stop script for a
# high availability RAID resource.
#
# Note, I hardcoded config variables at the top of the script. It might
# be more "elegant" to pass these in as parameters, but that was too much
# like hard work for me.
#
# This script assumes you are running linux kernel 2.2.X with
# the RAID MD driver version 0.90 or better for help with linux Software
# RAID, see the Software RAID howto or try the Linux RAID mailing list:
# linux-raid@vger.kernel.org
#
# You might need to pass the command line parameter: raid=noautodetect
# in an HA environment so that the kernel doesn't automatically start
# up your raid partitions when you boot the node. This means that it isn't
# going to work to use RAID for the system disks and the shared disks.
#
# 0) partition the disks to use for RAID
# 1) Create /etc/raidtab.md? on both systems (see example file below)
# 2) Initialize your raid partition with
# /sbin/mkraid --configfile /etc/raidtab.md? /dev/md?
# 3) Format your filesystem
# mke2fs /dev/md0
# 3) Create the mount point on both systems.
# DO NOT add your raid filesystem to /etc/fstab
# 4) copy this script (to /etc/rc.d/init.d if you wish) and edit it to
# reflect your desired settings.
# 5) Modify the heartbeat 'haresources' setup file
# 6) unmount the filesystem and stop the raid device with 'raidstop'
# 7) fire up heartbeat!
#
#
# EXAMPLE config file /etc/raidtab.md0
# This file must exist on both machines!
#
# raiddev /dev/md0
# raid-level 1
# nr-raid-disks 2
# chunk-size 64k
# persistent-superblock 1
# #nr-spare-disks 0
# device /dev/sda1
# raid-disk 0
# device /dev/sdb1
# raid-disk 1
#
MD=md0
MOUNTPOINT=/data1 # where the filesystem should be mounted
SCSI_MODULE=0 # set to 1 if there is a SCSI module to be loaded
RAID0_MODULE=0 # set to 1 if there is a RAID0 module to be loaded
RAID1_MODULE=1 # set to 1 if there is a RAID1 module to be loaded
MDDEV=/dev/$MD # Device name for RAID block device
# Utilities used by this script
MODPROBE=/sbin/modprobe
FSCK=/sbin/fsck
FUSER=/sbin/fuser
RAIDSTART=/sbin/raidstart
MOUNT=/bin/mount
UMOUNT=/bin/umount
RAIDSTOP=/sbin/raidstop
function check_util {
if [ ! -x "$1" ] ; then
echo "ERROR: setup problem: Couldn't find utility $1"
exit 1
fi
}
check_util $MODPROBE
check_util $FSCK
check_util $FUSER
check_util $RAIDSTART
check_util $MOUNT
check_util $UMOUNT
check_util $RAIDSTOP
# Look for the 'start' or 'stop' argument
case "$1" in
#
# Start up the RAID device and mount the filesystem
#
start)
$MOUNT | grep -e "^$MDDEV" >/dev/null
if [ $? -ne 1 ] ; then
echo "ERROR: Device $MDDEV is already mounted!"
exit 1;
fi
# Insert SCSI module
if [ "$SCSI_MODULE" -gt 0 ] ; then
$MODPROBE scsi_hostadapter
if [ $? -ne 0 ] ; then
echo "ERROR: Couldn't insert SCSI module."
exit 1;
fi
fi
# Insert raid personality modules
if [ "$RAID0_MODULE" -gt 0 ] ; then
$MODPROBE raid0
if [ $? -ne 0 ] ; then
echo "ERROR: Couldn't insert RAID0 module"
exit 1
fi
fi
if [ "$RAID1_MODULE" -gt 0 ] ; then
$MODPROBE raid1
if [ $? -ne 0 ] ; then
echo "ERROR: Couldn't insert RAID1 module"
exit 1
fi
fi
# Run raidstart to start up the RAID array
$RAIDSTART --configfile /etc/raidtab.$MD $MDDEV
if [ $? -ne 0 ] ; then
echo "ERROR: Couldn't start RAID for $MDDEV"
exit 1
fi
# Check the filesystem & auto repair
$FSCK -a $MDDEV
# NOTE: if any errors at all are detected, it returns non-zero
# if the error is >4 then there is a big problem
if [ $? -ge 4 ] ; then
echo "ERROR: Couldn't sucessfully fsck filesystem for $MDDEV"
exit 1
fi
# Mount the filesystem
$MOUNT $MDDEV $MOUNTPOINT
if [ $? -ne 0 ] ; then
echo "ERROR: Couldn't mount filesystem for $MDDEV on $MOUNTPOINT"
exit 1
fi
;;
stop)
# See if the MD device is mounted
$MOUNT | grep -e "^$MDDEV" >/dev/null
if [ $? -ne 1 ] ; then
# Kill all processes open on filesystem
$FUSER -mk $MOUNTPOINT
# the return from fuser doesn't tell us much
#if [ $? -ne 0 ] ; then
# echo "ERROR: Couldn't kill processes on $MOUNTPOINT"
# exit 1;
#fi
# Unmount the filesystem
$UMOUNT $MDDEV
if [ $? -ne 0 ] ; then
echo "ERROR: Couldn't unmount filesystem for $MDDEV"
exit 1
fi
else
echo "WARNING: Filesystem $MOUNTPOINT not mounted?"
fi
# Turn off raid
$RAIDSTOP --configfile /etc/raidtab.$MD $MDDEV
if [ $? -ne 0 ] ; then
echo "ERROR: Couldn't stop RAID for $MDDEV"
exit 1
fi
;;
*)
echo "This script should be run with the argument 'start' or 'stop'"
exit 1
;;
esac
# If you got to this point, chances are everything is O.K.
exit 0;
# Revision History
# $Log: ha_raid1.sh,v $
# Revision 1.1 2000/08/24 16:25:09 eric
# Initial revision
#
#