[Cluster-devel] cluster/rgmanager ChangeLog src/daemons/rg_state.c

lhh at sourceware.org lhh at sourceware.org
Thu Jun 14 14:53:42 UTC 2007


CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL5
Changes by:	lhh at sourceware.org	2007-06-14 14:53:42

Modified files:
	rgmanager      : ChangeLog 
	rgmanager/src/daemons: rg_state.c 

Log message:
	Ancillary patch which goes with fix for #211469

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/ChangeLog.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.31.2.12&r2=1.31.2.13
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_state.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.24.2.7&r2=1.24.2.8

--- cluster/rgmanager/ChangeLog	2007/06/14 13:35:58	1.31.2.12
+++ cluster/rgmanager/ChangeLog	2007/06/14 14:53:42	1.31.2.13
@@ -1,3 +1,8 @@
+2007-06-14 Lon Hohberger <lhh at redhat.com>
+	* src/daemons/rg_state.c: Ancillary patch to (a) make clusvcadm get
+	the new owner of the service and (b) ensure allowed_nodes is freed
+	before returning.  #211469
+
 2007-06-14 Marek Grac <mgrac at redhat.com>
 	* src/daemons/main.c, rg_state.c, rg_thread.c,
 	  src/utils/clusvcadm.c
--- cluster/rgmanager/src/daemons/rg_state.c	2007/06/14 13:35:59	1.24.2.7
+++ cluster/rgmanager/src/daemons/rg_state.c	2007/06/14 14:53:42	1.24.2.8
@@ -1676,43 +1676,46 @@
 int
 handle_fd_start_req(char *svcName, int request, int *new_owner)
 {
-       cluster_member_list_t *allowed_nodes;
-       int target, me = my_id();
-       int ret;
+	cluster_member_list_t *allowed_nodes;
+	int target, me = my_id();
+	int ret = RG_EFAIL;
 
-       allowed_nodes = member_list();
+	allowed_nodes = member_list();
 
-       while (memb_count(allowed_nodes)) {
-               target = best_target_node(allowed_nodes, -1,
-                                         svcName, 1);
-               if (target == me) {
-                       ret = handle_start_remote_req(svcName, request);
-               } else if (target < 0) {
-                       free_member_list(allowed_nodes);
-                       return RG_EFAIL;
-               } else {
-                       ret = relocate_service(svcName, request, target);
-               }
+	while (memb_count(allowed_nodes)) {
+		target = best_target_node(allowed_nodes, -1,
+		    			  svcName, 1);
+		if (target == me) {
+		      	ret = handle_start_remote_req(svcName, request);
+		} else if (target < 0) {
+			ret = RG_EFAIL;
+			goto out;
+		} else {
+			ret = relocate_service(svcName, request, target);
+		}
 
-               switch(ret) {
-               case RG_ESUCCESS:
-                       return RG_ESUCCESS;
-               case RG_ERUN:
-                       return RG_ERUN;
-               case RG_EFAIL:
-                       memb_mark_down(allowed_nodes, target);
-                       continue;
-               case RG_EABORT:
-                       svc_report_failure(svcName);
-                       free_member_list(allowed_nodes);
-                       return RG_EFAIL;
-               default:
-                       clulog(LOG_ERR,
-                              "#6X: Invalid reply [%d] from member %d during"
-                              " relocate operation!\n", ret, target);
-               }
-       }
+		switch(ret) {
+		case RG_EABORT:
+			svc_report_failure(svcName);
+			ret = RG_EFAIL;
+			goto out;
+		case RG_ESUCCESS:
+			*new_owner = target;
+			goto out;
+		case RG_ERUN:
+			/* Already running */
+			goto out;
+		case RG_EFAIL:
+			memb_mark_down(allowed_nodes, target);
+			continue;
+		default:
+			clulog(LOG_ERR,
+			       "#6X: Invalid reply [%d] from member %d during"
+			       " relocate operation!\n", ret, target);
+		}
+	}
 
-       free_member_list(allowed_nodes);
-       return RG_EFAIL;
+out:
+	free_member_list(allowed_nodes);
+	return ret;
 }




More information about the Cluster-devel mailing list