RSS

How to configure pacemaker with zimbra

13 Nov

Step: 1

To install both nodes corosync pcs pacemaker

[root@zmail1]# yum install corosync pcs pacemaker

Step: 2

To set password hacluster for both nodes with same password

[root@zmail1 /]# passwd hacluster
Changing password for user hacluster.
New password:
BAD PASSWORD: The password contains the user name in some form
Retype new password:
passwd: all authentication tokens updated successfully.

Step:3

To start pcs both nodes

[root@zmail1 /]# systemctl start pcsd

Step: 4

Now we need to authenticate all nodes before configuration

[root@zmail1 ~]# pcs cluster auth zmail1 zmail2
Username: hacluster
Password:
zmail2: Authorized
zmail1: Authorized
Note If you getting error on unable to authenticate as following link
https://github.com/ClusterLabs/pcs/commit/2ea12dacf23a57fc767d3ef29f0f73cbf9c90a16

Step: 5

To create a cluster and add nodes

[root@zmail1 /]# pcs cluster setup –name cluster_zimbra zmail1 zmail2

After creating the cluster then we can start it

[root@zmail1 /]# pcs cluster start –all

[root@zmail1 ~]# pcs cluster start –all
zmail1: Starting Cluster…
zmail2: Starting Cluster…
To check the status of the cluster, nodes and corosync

[root@zmail1 /]# pcs status cluster
Cluster Status:
Stack: corosync
Current DC: zmail2.server.lan (version 1.1.18-11.el7_5.3-2b07d5c5a9) – partition with quorum
Last updated: Mon Oct 29 09:02:06 2018
Last change: Mon Oct 29 06:48:36 2018 by root via cibadmin on zmail2.server.lan
2 nodes configured
5 resources configured

PCSD Status:
zmail1: Online
zmail2: Online
[root@zmail1 ~]# pcs status nodes
Pacemaker Nodes:
Online: zmail1 zmail2
Standby:
Maintenance:
Offline:
Pacemaker Remote Nodes:
Online:
Standby:
Maintenance:
Offline:

[root@zmail1 ~]# corosync-cmapctl | grep members
runtime.totem.pg.mrp.srp.members.1.config_version (u64) = 0
runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(172.16.5.151)
runtime.totem.pg.mrp.srp.members.1.join_count (u32) = 1
runtime.totem.pg.mrp.srp.members.1.status (str) = joined
runtime.totem.pg.mrp.srp.members.2.config_version (u64) = 0
runtime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(172.16.5.152)
runtime.totem.pg.mrp.srp.members.2.join_count (u32) = 1
runtime.totem.pg.mrp.srp.members.2.status (str) = joined
[root@zmail1 ~]# pcs status corosync

Membership information
———————-
Nodeid Votes Name
1 1 zmail1.server.lan (local)
2 1 zmail2.server.lan
Step: 6

To configure the cluster on node1, there is no need required to repeat all commands on both nodes

And disable the stonith and ignore a low quorum

[root@zmail1 /]# pcs property set stonith-enabled=false

[root@zmail1 /]# pcs property set no-quorum-policy=ignore

Step: 7

To configure Vitual / Cluster IP address for to reach the services as following resource

[root@zmail1 /]# pcs resource create virtual_ip ocf:heartbeat:IPaddr2 ip=172.16.5.150 cidr_netmask=32 op monitor interval=30s

To check who is the current resource or cluster IP / virtual

[root@zmail1 /]# pcs status|grep virtual_ip

To create a zimbra file as following script /usr/lib/ocf/resource.d/btactic


#!/bin/sh
#
# Resource script for Zimbra
#
# Description: Manages Zimbra as an OCF resource in
# an high-availability setup.
#
# Author: Adrian Gibanel
# <adrian.gibanel@btactic.com> : Original Author
# License: GNU General Public License (GPL)
# Note: Aimed at an active/passive cluster originally
# Inspired from postfix OCF script
# Inspired from Ubuntu LSB script.
# Not sure it will work
# for other distros without modifying
#
#
# usage: $0 {start|stop|reload|status
# |monitor|validate-all|meta-data}
#
# The “start” arg starts Zimbra
#
# The “stop” arg stops it.
#
# OCF parameters:
# OCF_RESKEY_binary
# OCF_RESKEY_config_dir
# OCF_RESKEY_parameters
#
######################

# Initialization:

: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs

: ${OCF_RESKEY_binary=”zmcontrol”}
: ${OCF_RESKEY_zimbra_dir=”/opt/zimbra”}
: ${OCF_RESKEY_zimbra_user=”zimbra”}
: ${OCF_RESKEY_zimbra_group=”zimbra”}
USAGE=”Usage: $0 {start|stop|reload\
|status|monitor|validate-all|meta-data}”;

###############################

usage() {
echo $USAGE >&2
}

meta_data() {
cat <<END
<?xml version=”1.0″?>
<!DOCTYPE resource-agent SYSTEM “ra-api-1.dtd”>
<resource-agent name=”zimbra”>
<version>0.1</version>
<longdesc lang=”en”>
This script manages Zimbra as an
OCF resource in a high-availability setup.
</longdesc>
<shortdesc lang=”en”>
Manages a highly available Zimbra mail server instance
</shortdesc>

<parameters>

<parameter name=”binary” unique=”0″ required=”0″>
<longdesc lang=”en”>
Short name to the Zimbra control script.
For example, “zmcontrol”.
</longdesc>
<shortdesc lang=”en”>
Short name to the Zimbra control script</shortdesc>
<content type=”string” default=”zmcontrol” />
</parameter>

<parameter name=”zimbra_dir” unique=”1″ required=”0″>
<longdesc lang=”en”>
Full path to Zimbra directory.
For example, “/opt/zimbra”.
</longdesc>
<shortdesc lang=”en”>
Full path to Zimbra directory</shortdesc>
<content type=”string” default=”/opt/zimbra” />
</parameter>

<parameter name=”zimbra_user” unique=”1″ required=”0″>
<longdesc lang=”en”>
Zimbra username.
For example, “zimbra”.
</longdesc>
<shortdesc lang=”en”>Zimbra username</shortdesc>
<content type=”string” default=”zimbra” />
</parameter>

<parameter name=”zimbra_group”
unique=”1″ required=”0″>
<longdesc lang=”en”>
Zimbra group.
For example, “zimbra”.
</longdesc>
<shortdesc lang=”en”>Zimbra group</shortdesc>
<content type=”string” default=”zimbra” />
</parameter>

</parameters>

<actions>
<action name=”start” timeout=”360s” />
<action name=”stop” timeout=”360s” />
<action name=”reload” timeout=”360s” />
<action name=”monitor” depth=”0″ timeout=”40s”
interval=”60s” />
<action name=”validate-all” timeout=”360s” />
<action name=”meta-data” timeout=”5s” />
</actions>
</resource-agent>
END
}

command()
{
if [ -f ${zimbra_dir}/redolog/redo.log ]; then
chown -f ${zimbra_user}:${zimbra_group} \
${zimbra_dir}/redolog/redo.log
fi

su – ${zimbra_user} -c “${binary} $1 </dev/null”
}

running() {
# run Zimbra status
command status
}
zimbra_status()
{
running
}

zimbra_start()
{
# if Zimbra is running return success
if zimbra_status; then
ocf_log info “Zimbra already running.”
return $OCF_SUCCESS
fi

# start Zimbra
command startup
ret=$?
if [ -d /var/lock/subsys -a $ret -eq 0 ]; then
touch /var/lock/subsys/zimbra
fi

if [ $ret -ne 0 ]; then
ocf_log err “Zimbra returned error.” $ret
return $OCF_ERR_GENERIC
fi

# grant some time for
# startup/forking the sub processes
sleep 2

# initial monitoring action
running
ret=$?
if [ $ret -ne $OCF_SUCCESS ]; then
ocf_log err “Zimbra failed \
initial monitor action.” $ret
return $OCF_ERR_GENERIC
fi

ocf_log info “Zimbra started.”
return $OCF_SUCCESS
}
zimbra_stop()
{
# if Zimbra is not running return success
if ! zimbra_status; then
ocf_log info “Zimbra already stopped.”
return $OCF_SUCCESS
fi

# stop Zimbra
command shutdown
ret=$?

if [ -d /var/lock/subsys -a $ret -eq 0 ]; then
rm -f /var/lock/subsys/zimbra
fi

if [ $ret -ne 0 ]; then
ocf_log err “Zimbra returned \
an error while stopping.” $ret
return $OCF_ERR_GENERIC
fi

# grant some time for shutdown and recheck 5 times
for i in 1 2 3 4 5; do
if zimbra_status; then
sleep 1
fi
done

# escalate to abort if we did not stop by now
# @TODO shall we loop here too?
if zimbra_status; then
ocf_log err “Zimbra failed to stop. \
Escalating to ‘abort’.”

ORPHANED=`ps -u ${zimbra_user} -o \
“pid=”` && kill -9 $ORPHANED 2>&1
ret=$?
sleep 10

# zimbra abort did not succeed
if zimbra_status; then
ocf_log err “Zimbra failed to abort.”
return $OCF_ERR_GENERIC
fi
fi

ocf_log info “Zimbra stopped.”
return $OCF_SUCCESS
}

zimbra_reload()
{
if zimbra_status; then
ocf_log info “Reloading Zimbra.”
command reload
fi
}

zimbra_monitor()
{
if zimbra_status; then
return $OCF_SUCCESS
fi

return $OCF_NOT_RUNNING
}

zimbra_validate_all()
{
# check zimbra_dir parameter
if [ ! -d “$zimbra_dir” ]; then
ocf_log err “Zimbra directory \
‘$config_dir’ does not exist.” $ret
return $OCF_ERR_INSTALLED
fi
# check that the Zimbra binaries
# exist and can be executed
if ! have_binary \
“${zimbra_dir}/bin/${binary}” ; then
return $OCF_ERR_INSTALLED
fi

# check permissions
user=${zimbra_user}
zimbra_writable_dirs=”${zimbra_dir}/conf”
for dir in “$zimbra_writable_dirs”; do
if ! su -s /bin/sh – \
$user -c “test -w $dir”; then
ocf_log err “Directory \
‘$dir’ is not writable by user ‘$user’.”
exit $OCF_ERR_PERM;
fi
done

return $OCF_SUCCESS
}

#
# Main
#

if [ $# -ne 1 ]; then
usage
exit $OCF_ERR_ARGS
fi

binary=$OCF_RESKEY_binary
zimbra_dir=$OCF_RESKEY_zimbra_dir
zimbra_user=$OCF_RESKEY_zimbra_user
zimbra_group=$OCF_RESKEY_zimbra_group
parameters=$OCF_RESKEY_parameters

# debugging stuff
#echo OCF_RESKEY_binary=$OCF_RESKEY_binary \
# >> /tmp/prox_conf_$OCF_RESOURCE_INSTANCE
#echo OCF_RESKEY_binary=$OCF_RESKEY_zimbra_dir \
#>> /tmp/prox_conf_$OCF_RESKEY_zimbra_dir
#echo OCF_RESKEY_binary=$OCF_RESKEY_zimbra_user \
#>> /tmp/prox_conf_$OCF_RESKEY_zimbra_user
#echo OCF_RESKEY_binary=$OCF_RESKEY_zimbra_group \
#>> /tmp/prox_conf_$OCF_RESKEY_zimbra_group
#echo OCF_RESKEY_binary=$OCF_RESKEY_parameters \
#>> /tmp/prox_conf_$OCF_RESKEY_parameters
# build Zimbra options string
# *outside* to access from each method
OPTIONS=”
OPTION_CONFIG_DIR=”

# check if the Zimbra config_dir exist
if [ “x$config_dir” != “x” ]; then
# check for postconf binary
#check_binary “${zimbra_dir}/bin/${binary}”

# remove all trailing slashes
zimbra_dir=`echo $zimbra_dir | sed ‘s/\/*$//’`

fi

case $1 in
meta-data) meta_data
exit $OCF_SUCCESS
;;

usage|help) usage
exit $OCF_SUCCESS
;;
esac

zimbra_validate_all
ret=$?

#echo “debug[$1:$ret]”
LSB_STATUS_STOPPED=3
if [ $ret -ne $OCF_SUCCESS ]; then
case $1 in
stop) exit $OCF_SUCCESS ;;
monitor) exit $OCF_NOT_RUNNING;;
status) exit $LSB_STATUS_STOPPED;;
*) exit $ret;;
esac
fi

case $1 in
monitor) zimbra_monitor
exit $?
;;
start) zimbra_start
exit $?
;;

stop) zimbra_stop
exit $?
;;

reload) zimbra_reload
exit $?
;;

status) if zimbra_status; then
ocf_log info “Zimbra is running.”
exit $OCF_SUCCESS
else
ocf_log info “Zimbra is stopped.”
exit $OCF_NOT_RUNNING
fi
;;

validate-all) exit $OCF_SUCCESS
;;

*) usage
exit $OCF_ERR_UNIMPLEMENTED
;;
esac


Step: 8 To configure the drbd resource in primary/standby way. In order to do it, the following commands are performed. To constraint is present for forcing the ClusterIP resource to be up in the same node of DRBD resource (constraint colocation ). To starting the ClusterIP before the DRBD resource (constraint order).

## For Create

# pcs resource create MailIP ocf:heartbeat:IPaddr2 ip=172.16.5.150 cidr_netmask=32 op monitor interval=30s
# pcs resource create opt_disk ocf:linbit:drbd drbd_resource=r0 op monitor interval=35s role=Master op monitor interval=45s role=Slave
# pcs resource master opt_disk_manager opt_disk master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true target-role=Master
# pcs resource create fs_res ocf:heartbeat:Filesystem device=/dev/drbd0 directory=/opt fstype=ext4 options=noatime
# pcs resource create zimbra_start ocf:btactic:zimbra op monitor interval=120s
# pcs resource create mail_outgoing ocf:heartbeat:IPsrcaddr ipaddress=172.16.5.150 op monitor interval=10 timeout=20
## For ordering
pcs constraint order promote opt_disk_manager then start fs_res
pcs constraint order start fs_res then start zimbra_start kind=Mandatory
pcs constraint order start fs_res then start MailIP kind=Mandatory
pcs constraint order promote opt_disk_manager then start MailIP kind=Mandatory
pcs constraint order start MailIP then start mail_outgoing kind=Mandatory

## For colocation
pcs constraint colocation add fs_res with opt_disk_manager INFINITY rsc-role=Master
pcs constraint colocation add mail_outgoing with MailIP INFINITY
pcs constraint colocation add zimbra_start with MailIP INFINITY
pcs constraint colocation add MailIP with fs_res INFINITY

[root@zmail2 mnt]# pcs status
Cluster name: cluster_zimbra
Stack: corosync
Current DC: zmail2.server.lan (version 1.1.18-11.el7_5.3-2b07d5c5a9) – partition with quorum
Last updated: Mon Oct 29 11:26:07 2018
Last change: Mon Oct 29 06:48:36 2018 by root via cibadmin on zmail2.server.lan

2 nodes configured
5 resources configured

Online: [ zmail2.server.lan ]
OFFLINE: [ zmail1.server.lan ]

Full list of resources:

MailIP (ocf::heartbeat:IPaddr2): Started zmail2.server.lan
Master/Slave Set: opt_disk_manager [opt_disk]
Masters: [ zmail2.server.lan ]
Stopped: [ zmail1.server.lan ]
fs_res (ocf::heartbeat:Filesystem): Started zmail2.server.lan
zimbra_start (ocf::btactic:zimbra): Started zmail2.server.lan

Daemon Status:
corosync: active/disabled
pacemaker: active/disabled
pcsd: active/enabled

 
Leave a comment

Posted by on November 13, 2018 in Cluster, Zimbra

 

Leave a comment