[Linux-cluster] Service is not migrated when owner fails

Maykel Moya moya at latertulia.org
Wed Mar 18 18:59:21 UTC 2009


I have a 4-node cluster, each node running one of four services. Each
service is an ip/fs combination. I'm trying to test service failover.
After disconnecting the network to one of the nodes (ip link set eth0
down), its running service is not migrated to another node until the
node get successfully fenced.

I tried to add 'recovery="relocated"' to <service> declaration but in
that case the service is relocated when the failing node is back
online after a succesful fence.

I'd like the service being migrated to another node as soon as a fail
in the current owner node gets detected.

Regards,
maykel

====

<cluster>
  ....
  <rm log_level="7" log_facility="local4">
    <service domain="vmail1_domain" name="vmail1_svc" autostart="1">
      <fs name="vmail1_fs" fstype="ext3" mountpoint="/vmail/part1"
device="/dev/disk/by-id/dm-uuid-mpath-3600508b40006da020001000000090000"
options="noatime,nodiratime,noquota"/>
      <ip address="10.10.3.21"/>
    </service>
    <service domain="vmail2_domain" name="vmail2_svc" autostart="1">
      <fs name="vmail2_fs" fstype="ext3" mountpoint="/vmail/part2"
device="/dev/disk/by-id/dm-uuid-mpath-3600508b40006da0200010000000c0000"
options="noatime,nodiratime,noquota"/>
      <ip address="10.10.3.22"/>
    </service>
    <service domain="vmail3_domain" name="vmail3_svc" autostart="1">
      <fs name="vmail3_fs" fstype="ext3" mountpoint="/vmail/part3"
device="/dev/disk/by-id/dm-uuid-mpath-3600508b40006da0200010000000f0000"
options="noatime,nodiratime,noquota"/>
      <ip address="10.10.3.23"/>
    </service>
    <service domain="vmail4_domain" name="vmail4_svc" autostart="1">
      <fs name="vmail4_fs" fstype="ext3" mountpoint="/vmail/part4"
device="/dev/disk/by-id/dm-uuid-mpath-3600508b40006da020001000000120000"
options="noatime,nodiratime,noquota"/>
      <ip address="10.10.3.24"/>
    </service>
    <failoverdomains>
      <failoverdomain name="vmail1_domain" ordered="1" restricted="0">
        <failoverdomainnode name="e1b01" priority="1"/>
        <failoverdomainnode name="e1b02" priority="2"/>
        <failoverdomainnode name="e1b03" priority="3"/>
        <failoverdomainnode name="e1b04" priority="4"/>
      </failoverdomain>
      <failoverdomain name="vmail2_domain" ordered="1" restricted="0">
        <failoverdomainnode name="e1b01" priority="4"/>
        <failoverdomainnode name="e1b02" priority="1"/>
        <failoverdomainnode name="e1b03" priority="2"/>
        <failoverdomainnode name="e1b04" priority="3"/>
      </failoverdomain>
      <failoverdomain name="vmail3_domain" ordered="1" restricted="0">
        <failoverdomainnode name="e1b01" priority="3"/>
        <failoverdomainnode name="e1b02" priority="4"/>
        <failoverdomainnode name="e1b03" priority="1"/>
        <failoverdomainnode name="e1b04" priority="2"/>
      </failoverdomain>
      <failoverdomain name="vmail4_domain" ordered="1" restricted="0">
        <failoverdomainnode name="e1b01" priority="2"/>
        <failoverdomainnode name="e1b02" priority="3"/>
        <failoverdomainnode name="e1b03" priority="4"/>
        <failoverdomainnode name="e1b04" priority="1"/>
      </failoverdomain>
    </failoverdomains>
  </rm>
  ....
</cluster>




More information about the Linux-cluster mailing list