Second synchronized disk with drbd, corosync and pacemaker

A couple years a go, I configured a Fail Over system using DRBD, Corosync and Pacemaker with this tutorial. After years, I need more disk space on the system and also make it synchronize each other. So here I show the configuration example for that, if you want to adapt this configuration into your system, just make sure that you start the service in a correct order within crm console because if not, your Fail Over system will not work correctly.

DRBD configuration:

/etc/drbd.d/r0.res:

resource r0 {
        protocol C;
        flexible-meta-disk internal;

        on fo2 {
                address 10.0.0.2:7801;
                device /dev/drbd0 minor 0;
                disk /dev/sda7;
        }
        on fo3 {
                address 10.0.0.3:7801;
                device /dev/drbd0 minor 0;
                disk /dev/sda7;
        }

        net {
                after-sb-0pri discard-younger-primary;
                after-sb-1pri discard-secondary;
                after-sb-2pri call-pri-lost-after-sb;
        }
}

/etc/drbd.d/r1.res:

resource r1 {
         protocol C;
         flexible-meta-disk internal;

         on fo2 {
                 address 10.0.0.2:7802;
                 device /dev/drbd1;
                 disk /dev/sdc1;
         }
         on fo3 {
                 address 10.0.0.3:7802;
                 device /dev/drbd1;
                 disk /dev/sdc1;
         }
         net {
                   after-sb-0pri discard-younger-primary;
                   after-sb-1pri discard-secondary;
                   after-sb-2pri call-pri-lost-after-sb;
         }
}

Pacemaker Configuration:

Below is the pacemaker crm configuration for the Fail Over system:

node fo2 \
        attributes standby="off"
node fo3 \
        attributes standby="off"
primitive ClusterIP ocf:heartbeat:IPaddr2 \
        params ip="10.0.0.1" cidr_netmask="24" \
        op monitor interval="30s"
primitive Links heartbeat:drbdlinks
primitive WebFS ocf:heartbeat:Filesystem \
        params device="/dev/drbd0" directory="/sync" fstype="ext4" \
        op start interval="0" timeout="60" \
        op stop interval="0" timeout="60"
primitive WebFS2 ocf:heartbeat:Filesystem \
        params device="/dev/drbd1" directory="/sync2" fstype="ext4" \
        op start interval="0" timeout="60" \
        op stop interval="0" timeout="60" \
        meta target-role="Started"
primitive WebSite ocf:heartbeat:apache \
        params configfile="/etc/apache2/apache2.conf" \
        op monitor interval="1min" timeout="20s" \
        op start interval="0" timeout="40s" \
        op stop interval="0" timeout="60s"
primitive r0 ocf:linbit:drbd \
        params drbd_resource="r0" \
        op monitor interval="29s" role="Master" \
        op monitor interval="31s" role="Slave" \
        op start interval="0" timeout="240" \
        op stop interval="0" timeout="100" \
        op promote interval="0" timeout="90" \
        op demote interval="0" timeout="90"
primitive r1 ocf:linbit:drbd \
        params drbd_resource="r1" \
        op monitor interval="29s" role="Master" \
        op monitor interval="31s" role="Slave" \
        op start interval="0" timeout="240" \
        op stop interval="0" timeout="100" \
        op promote interval="0" timeout="90" \
        op demote interval="0" timeout="90"
group WebServer ClusterIP WebFS WebFS2 Links WebSite \
        meta target-role="Started"
ms ms_r0 r0 \
        meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
ms ms_r1 r1 \
        meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true" target-role="Started"
location prefer-fo2 WebServer 50: fo2
colocation WebFS2-with-ms_r1 inf: WebFS2 ms_r1:Master
colocation WebServer-with-ms_ro inf: WebServer ms_r0:Master
colocation ms_r1-with-ms_r0 inf: ms_r1:Master ms_r0:Master
#order WebFS2-after-WebFS inf: WebFS:start WebFS2:start
order WebServer-after-ms_ro inf: ms_r0:promote WebServer:start
order ms_r0-after-ms_r1 inf: ms_r1:promote ms_r0:promote
property $id="cib-bootstrap-options" \
        dc-version="1.1.7-ee0730e13d124c3d58f00016c3376a1de5323cff" \
        cluster-infrastructure="openais" \
        expected-quorum-votes="2" \
        stonith-enabled="false" \
        no-quorum-policy="ignore" \
        last-lrm-refresh="1427300070"
Done, your second disk will also be synchronize.

Resources:

Pacemaker Doc

Comments

Popular Posts