[Cluster-devel] cluster/rgmanager ChangeLog src/daemons/groups ...

lhh at sourceware.org lhh at sourceware.org
Wed Dec 12 21:41:33 UTC 2007


CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	lhh at sourceware.org	2007-12-12 21:41:32

Modified files:
	rgmanager      : ChangeLog 
	rgmanager/src/daemons: groups.c slang_event.c 
	rgmanager/src/resources: default_event_script.sl 

Log message:
	Misc. minor central processing bugfixes

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/ChangeLog.diff?cvsroot=cluster&r1=1.63&r2=1.64
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/groups.c.diff?cvsroot=cluster&r1=1.41&r2=1.42
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/slang_event.c.diff?cvsroot=cluster&r1=1.2&r2=1.3
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/resources/default_event_script.sl.diff?cvsroot=cluster&r1=1.1&r2=1.2

--- cluster/rgmanager/ChangeLog	2007/12/12 15:27:34	1.63
+++ cluster/rgmanager/ChangeLog	2007/12/12 21:41:32	1.64
@@ -1,5 +1,12 @@
 2007-12-12 Lon Hohberger <lhh at redhat.com>
 	* Misc changes; add missing ds.h
+	* src/resources/default*.sl: Make clusvcadm -r go to a different
+	node.  Don't notice() that we're starting a service if it's
+	disabled.
+	* src/daemons/groups.c: Make clustat not interfere with startup
+	when central processing is enabled
+	* src/daemons/slang_event.c: Make clusvcadm correctly report
+	service owner after relocate/restart/enable requests.
 
 2007-11-30 Lon Hohberger <lhh at redhat.com>
 	* Commit RIND / S-Lang script engine [untested]
--- cluster/rgmanager/src/daemons/groups.c	2007/11/30 21:36:28	1.41
+++ cluster/rgmanager/src/daemons/groups.c	2007/12/12 21:41:32	1.42
@@ -1126,48 +1126,6 @@
 }
 
 
-#if 0
-/**
-  Send the state of the transition master to a given file descriptor.
-
-  @param fd		File descriptor to send state to
-  @param rgname		Resource group name whose state we want to send.
-  @see send_rg_states
- */
-void
-send_master_state(msgctx_t *ctx)
-{
-	rg_state_msg_t msg, *msgp = &msg;
-	event_master_t master;
-	rg_state_t *rs = &msg.rsm_state;
-
-	strncpy(rs->rs_name, "internal:CentralProcessor",
-		sizeof(rs->rs_name));
-	rs->rs_last_owner = 0;
-	rs->rs_restarts = 0;
-
-	if (event_master_info_cached(&master) < 0) {
-		rs->rs_owner = 0;
-		rs->rs_transition = master.m_master_time;
-		rs->rs_state = RG_STATE_UNINITIALIZED;
-	} else {
-		rs->rs_owner = master.m_nodeid;
-		rs->rs_transition = master.m_master_time;
-		rs->rs_state = RG_STATE_STARTED;
-	}
-
-	msgp->rsm_hdr.gh_magic = GENERIC_HDR_MAGIC;
-	msgp->rsm_hdr.gh_length = sizeof(msg);
-	msgp->rsm_hdr.gh_command = RG_STATUS;
-
-	swab_rg_state_msg_t(msgp);
-
-	if (msg_send(ctx, msgp, sizeof(msg)) < 0)
-		perror("msg_send");
-}
-#endif
-
-
 /**
   Send status from a thread because we don't want rgmanager's
   main thread to block in the case of DLM issues
@@ -1183,6 +1141,14 @@
 
 	free(arg);
 
+	if (central_events_enabled()) {
+		/* Never call get_rg_state() (distributed) if 
+		   central events are enabled, otherwise we
+		   might overwrite the rg state with 'stopped' 
+		   when it should be 'disabled' (e.g. autostart="0") */
+		fast = 1;
+	}
+
 	/* See if we have a slot... */
 	if (rg_inc_status() < 0) {
 		/* Too many outstanding status checks.  try again later. */
@@ -1327,6 +1293,7 @@
 	resource_node_t *curr;
 	rg_state_t svcblk;
 	char rg[64];
+	struct dlm_lksb lockp;
 	
 	/* Only one status thread at a time, please! */
 	if (pthread_mutex_trylock(&status_mutex) != 0)
@@ -1340,7 +1307,13 @@
 
 		/* Local check - no one will make us take a service */
 		if (get_rg_state_local(rg, &svcblk) < 0) {
-			continue;
+			if (rg_lock(rg, &lockp) != 0)
+				continue;
+			if (get_rg_state(rg, &svcblk) < 0) {
+				rg_unlock(&lockp);
+				continue;
+			}
+			rg_unlock(&lockp);
 		}
 
 		if (svcblk.rs_owner != my_id() ||
--- cluster/rgmanager/src/daemons/slang_event.c	2007/12/07 00:53:44	1.2
+++ cluster/rgmanager/src/daemons/slang_event.c	2007/12/12 21:41:32	1.3
@@ -487,7 +487,7 @@
 	char *svcname = NULL;
 	int *pref_list = NULL, pref_list_len = 0;
 	int *illegal_list = NULL, illegal_list_len = 0;
-	int nargs, t, x, ret = -1;
+	int nargs, t, newowner = 0, ret = -1;
 
 	nargs = SLang_Num_Function_Args;
 
@@ -528,8 +528,10 @@
 
 	/* TODO: Meat of function goes here */
 	ret = service_op_start(svcname, pref_list,
-			       pref_list_len, &x); ;
+			       pref_list_len, &newowner);
 
+	if (ret == 0 && newowner > 0)
+		ret = newowner;
 out:
 	if (svcname)
 		free(svcname);
@@ -1123,7 +1125,17 @@
 	/* XXX Send response code to caller - that 0 should be the
 	   new service owner, if there is one  */
 	if (ctx) {
-		send_ret(ctx, name, _user_return, request, 0);
+		if (_user_return > 0) {
+			/* sl_start_service() squashes return code and
+			   node ID into one value.  <0 = error, >0 =
+			   success, return-value == node id running
+			   service */
+			send_ret(ctx, name, 0, request, _user_return);
+		} else {
+			/* return value < 0 ... pass directly back;
+			   don't transpose */
+			send_ret(ctx, name, _user_return, request, 0);
+		}
 		msg_close(ctx);
 		msg_free_ctx(ctx);
 	}
--- cluster/rgmanager/src/resources/default_event_script.sl	2007/11/30 21:36:29	1.1
+++ cluster/rgmanager/src/resources/default_event_script.sl	2007/12/12 21:41:32	1.2
@@ -192,7 +192,8 @@
 		}
 
 		(owner, state) = service_status(services[x]);
-		if ((service_state == "started") and (owner < 0)) {
+		if ((service_state == "started") and (owner < 0) and
+		    (state == "stopped")) {
 			info("Dependency met; starting ", services[x]);
 			nodes = allowed_nodes(services[x]);
 			()=move_or_start(services[x], nodes);
@@ -245,6 +246,10 @@
 
 		if (user_target > 0) {
 			for (x = 0; x < length(nodes); x++) {
+				%
+				% Put the preferred node at the front of the 
+				% list for a user-relocate operation
+				%
 				if (nodes[x] == user_target) {
 					reordered = union(user_target, nodes);
 					nodes = reordered;
@@ -262,6 +267,13 @@
 			if (service_stop(service_name) < 0) {
 				return ERR_ABORT;
 			}
+
+			%
+			% The current owner shouldn't be the default
+			% for a relocate operation
+			%
+			reordered = subtract(nodes, owner);
+			nodes = union(reordered, owner);
 		}
 
 		ret = move_or_start(service_name, nodes);
@@ -275,7 +287,10 @@
 		ret = service_stop(service_name);
 
 	} 
+
+	%
 	% todo - migrate
+	%
 
 	return ret;
 }




More information about the Cluster-devel mailing list