[Cluster-devel] cluster/rgmanager ChangeLog include/resgroup.h ...
mgrac at sourceware.org
mgrac at sourceware.org
Thu Jun 14 13:36:00 UTC 2007
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL5
Changes by: mgrac at sourceware.org 2007-06-14 13:35:59
Modified files:
rgmanager : ChangeLog
rgmanager/include: resgroup.h
rgmanager/src/daemons: main.c rg_state.c rg_thread.c
rgmanager/src/utils: clusvcadm.c
Log message:
New flag -F for clusvcadm to respect failover domain (#211469). Also changes clusvcadm -e service00 which enable service on local node and do not respect failover (same as in RHEL4, in RHEL 5.0 it just wrote Failure).
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/ChangeLog.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.31.2.11&r2=1.31.2.12
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/include/resgroup.h.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.15.2.3&r2=1.15.2.4
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/main.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.34.2.4&r2=1.34.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_state.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.24.2.6&r2=1.24.2.7
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_thread.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.15.2.4&r2=1.15.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/utils/clusvcadm.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.12.2.3&r2=1.12.2.4
--- cluster/rgmanager/ChangeLog 2007/06/13 20:12:19 1.31.2.11
+++ cluster/rgmanager/ChangeLog 2007/06/14 13:35:58 1.31.2.12
@@ -1,3 +1,9 @@
+2007-06-14 Marek Grac <mgrac at redhat.com>
+ * src/daemons/main.c, rg_state.c, rg_thread.c,
+ src/utils/clusvcadm.c
+ * #211469 - RFE: Flag for clusvcadm to respect failover domain
+ * 'clusvcadm -e service00' works same as in RHEL4 (differs from RHEL5.0)
+
2007-06-13 Lon Hohberger <lhh at redhat.com>
* src/daemons/restree.c: Fix #229650 uninitialized bug
--- cluster/rgmanager/include/resgroup.h 2007/03/20 17:09:11 1.15.2.3
+++ cluster/rgmanager/include/resgroup.h 2007/06/14 13:35:59 1.15.2.4
@@ -86,6 +86,7 @@
int handle_relocate_req(char *svcName, int request, int preferred_target,
int *new_owner);
int handle_start_req(char *svcName, int req, int *new_owner);
+int handle_fd_start_req(char *svcName, int req, int *new_owner);
int handle_recover_req(char *svcName, int *new_owner);
int handle_start_remote_req(char *svcName, int req);
--- cluster/rgmanager/src/daemons/main.c 2007/05/10 16:23:43 1.34.2.4
+++ cluster/rgmanager/src/daemons/main.c 2007/06/14 13:35:59 1.34.2.5
@@ -493,7 +493,9 @@
/* Queue request */
rt_enqueue_request(msg_sm->sm_data.d_svcName,
msg_sm->sm_data.d_action,
- ctx, 0, msg_sm->sm_data.d_svcOwner, 0, 0);
+ ctx, 0, msg_sm->sm_data.d_svcOwner,
+ msg_sm->sm_hdr.gh_arg1,
+ msg_sm->sm_hdr.gh_arg2);
return 0;
case RG_EVENT:
--- cluster/rgmanager/src/daemons/rg_state.c 2007/04/19 18:05:37 1.24.2.6
+++ cluster/rgmanager/src/daemons/rg_state.c 2007/06/14 13:35:59 1.24.2.7
@@ -1211,7 +1211,7 @@
strncpy(msg_relo.sm_data.d_svcName, svcName,
sizeof(msg_relo.sm_data.d_svcName));
msg_relo.sm_data.d_ret = 0;
-
+ msg_relo.sm_data.d_svcOwner = target;
/* Open a connection to the other node */
if (msg_open(MSG_CLUSTER, target, RG_PORT, &ctx, 2)< 0) {
@@ -1493,7 +1493,7 @@
*/
if (req == RG_ENABLE)
tolerance = FOD_GOOD;
-
+/*
if (req != RG_RESTART &&
req != RG_START_RECOVER &&
(node_should_start_safe(my_id(), membership, svcName) <
@@ -1514,7 +1514,7 @@
}
}
free_member_list(membership);
-
+*/
/* Check for dependency. We cannot start unless our
dependency is met */
if (check_depend_safe(svcName) == 0)
@@ -1635,10 +1635,12 @@
}
free_member_list(membership);
- if (svc_start(svcName, req) == 0) {
+ x = svc_start(svcName, req);
+
+ if ((x == 0) || (x == RG_ERUN)) {
if (need_check)
pthread_mutex_unlock(&exclusive_mutex);
- return 0;
+ return x;
}
if (need_check)
pthread_mutex_unlock(&exclusive_mutex);
@@ -1670,3 +1672,47 @@
return handle_start_req(svcName, RG_START_RECOVER, new_owner);
}
+
+int
+handle_fd_start_req(char *svcName, int request, int *new_owner)
+{
+ cluster_member_list_t *allowed_nodes;
+ int target, me = my_id();
+ int ret;
+
+ allowed_nodes = member_list();
+
+ while (memb_count(allowed_nodes)) {
+ target = best_target_node(allowed_nodes, -1,
+ svcName, 1);
+ if (target == me) {
+ ret = handle_start_remote_req(svcName, request);
+ } else if (target < 0) {
+ free_member_list(allowed_nodes);
+ return RG_EFAIL;
+ } else {
+ ret = relocate_service(svcName, request, target);
+ }
+
+ switch(ret) {
+ case RG_ESUCCESS:
+ return RG_ESUCCESS;
+ case RG_ERUN:
+ return RG_ERUN;
+ case RG_EFAIL:
+ memb_mark_down(allowed_nodes, target);
+ continue;
+ case RG_EABORT:
+ svc_report_failure(svcName);
+ free_member_list(allowed_nodes);
+ return RG_EFAIL;
+ default:
+ clulog(LOG_ERR,
+ "#6X: Invalid reply [%d] from member %d during"
+ " relocate operation!\n", ret, target);
+ }
+ }
+
+ free_member_list(allowed_nodes);
+ return RG_EFAIL;
+}
--- cluster/rgmanager/src/daemons/rg_thread.c 2007/05/10 16:23:43 1.15.2.4
+++ cluster/rgmanager/src/daemons/rg_thread.c 2007/06/14 13:35:59 1.15.2.5
@@ -248,8 +248,15 @@
break;
}
case RG_START:
- error = handle_start_req(myname, req->rr_request,
- &newowner);
+ if (req->rr_arg0) {
+ error = handle_fd_start_req(myname,
+ req->rr_request,
+ &newowner);
+ } else {
+ error = handle_start_req(myname,
+ req->rr_request,
+ &newowner);
+ }
break;
case RG_RELOCATE:
--- cluster/rgmanager/src/utils/clusvcadm.c 2007/03/20 17:09:12 1.12.2.3
+++ cluster/rgmanager/src/utils/clusvcadm.c 2007/06/14 13:35:59 1.12.2.4
@@ -39,11 +39,14 @@
void
-build_message(SmMessageSt *msgp, int action, char *svcName, int target)
+build_message(SmMessageSt *msgp, int action, char *svcName, int target,
+ int arg1, int arg2)
{
msgp->sm_hdr.gh_magic = GENERIC_HDR_MAGIC;
msgp->sm_hdr.gh_command = RG_ACTION_REQUEST;
msgp->sm_hdr.gh_length = sizeof(*msgp);
+ msgp->sm_hdr.gh_arg1 = arg1;
+ msgp->sm_hdr.gh_arg2 = arg2;
msgp->sm_data.d_action = action;
strncpy(msgp->sm_data.d_svcName, svcName,
sizeof(msgp->sm_data.d_svcName));
@@ -155,6 +158,8 @@
printf(" %s -d <group> Disable <group>\n", name);
printf(" %s -e <group> Enable <group>\n",
name);
+printf(" %s -e <group> -F Enable <group> according to failover\n"
+ " domain rules\n", name);
printf(" %s -e <group> -m <member> Enable <group>"
" on <member>\n", name);
printf(" %s -r <group> -m <member> Relocate <group> [to <member>]\n",
@@ -230,6 +235,7 @@
SmMessageSt msg;
generic_msg_hdr *h = (generic_msg_hdr *)&msg;
int action = RG_STATUS;
+ int fod = 0;
int node_specified = 0;
int me, svctarget = 0;
char *actionstr = NULL;
@@ -240,7 +246,7 @@
return 1;
}
- while ((opt = getopt(argc, argv, "lSue:M:d:r:n:m:vR:s:qh?")) != EOF) {
+ while ((opt = getopt(argc, argv, "lSue:M:d:r:n:m:FvR:s:qh?")) != EOF) {
switch (opt) {
case 'l':
return do_lock();
@@ -257,6 +263,14 @@
action = RG_ENABLE;
svcname = optarg;
break;
+ case 'F':
+ if (node_specified) {
+ fprintf(stderr,
+ "Cannot use '-F' with '-n' or '-m'\n");
+ return 1;
+ }
+ fod = 1;
+ break;
case 'd':
/* DISABLE */
actionstr = "disabling";
@@ -288,6 +302,11 @@
break;
case 'm': /* member ... */
case 'n': /* node .. same thing */
+ if (fod) {
+ fprintf(stderr,
+ "Cannot use '-F' with '-n' or '-m'\n");
+ return 1;
+ }
strncpy(nodename,optarg,sizeof(nodename));
node_specified = 1;
break;
@@ -351,8 +370,8 @@
*/
//strcpy(nodename,"me");
}
-
- build_message(&msg, action, svcname, svctarget);
+
+ build_message(&msg, action, svcname, svctarget, fod, 0);
if (action != RG_RELOCATE && action != RG_MIGRATE) {
if (!node_specified)
More information about the Cluster-devel
mailing list