[Cluster-devel] conga/luci cluster/form-macros cluster/resourc ...

rmccabe at sourceware.org rmccabe at sourceware.org
Wed May 30 05:54:05 UTC 2007


CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	EXPERIMENTAL
Changes by:	rmccabe at sourceware.org	2007-05-30 05:54:03

Modified files:
	luci/cluster   : form-macros resource-form-macros 
	luci/logs      : log_provider 
	luci/site/luci/Extensions: LuciClusterActions.py LuciDB.py 
	                           LuciZopeExternal.py RicciQueries.py 
	                           cluster_adapters.py 
	                           conga_constants.py 
	                           homebase_adapters.py 
	luci/site/luci/Extensions/ClusterModel: ModelBuilder.py 

Log message:
	- More cleanups
	- Sort log entries chronologically

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.198.2.2&r2=1.198.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/resource-form-macros.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.37&r2=1.37.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/logs/log_provider.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1&r2=1.1.6.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.6&r2=1.1.2.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.12&r2=1.1.2.13
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZopeExternal.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.2&r2=1.1.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.5&r2=1.1.2.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.255.2.10&r2=1.255.2.11
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.39.2.4&r2=1.39.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.50.2.7&r2=1.50.2.8
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ClusterModel/ModelBuilder.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.5&r2=1.1.2.6

--- conga/luci/cluster/form-macros	2007/05/18 02:36:59	1.198.2.2
+++ conga/luci/cluster/form-macros	2007/05/30 05:54:01	1.198.2.3
@@ -3627,7 +3627,7 @@
 <div metal:define-macro="nodelogs-form">
 	<h2>Recent Log Activity for <span tal:replace="request/nodename"/></h2>
 	<hr/>
-	<span tal:replace="structure python: here.getLogsForNode(request)"/>
+	<span tal:replace="structure python: here.getSystemLogs(request)"/>
 </div>
 
 <div metal:define-macro="nodeadd-form">
@@ -4660,7 +4660,12 @@
 
 <div metal:define-macro="servicedelete-form">
 	<h2>Service Delete Form</h2>
-	<tal:block tal:define="dummy python: here.delService(request)" />
+
+	<tal:block tal:define="
+		global ricci_agent ri_agent | python: here.getRicciAgentForCluster(request)" />
+
+	<span tal:define="
+		result python: here.serviceDelete(ricci_agent, request)" />
 </div>
 
 <div metal:define-macro="resources-form">
--- conga/luci/cluster/resource-form-macros	2007/03/15 22:08:42	1.37
+++ conga/luci/cluster/resource-form-macros	2007/05/30 05:54:01	1.37.2.1
@@ -92,7 +92,7 @@
 	<h2>Resources Remove Form</h2>
 
 	<tal:block tal:define="
-		msg python: here.delResource(here.getRicciAgentForCluster(request), request)">
+		msg python: here.deleteResource(here.getRicciAgentForCluster(request), request)">
 		<span class="error" tal:condition="msg" tal:content="msg" />
 	</tal:block>
 </div>
--- conga/luci/logs/log_provider	2006/10/30 23:00:50	1.1
+++ conga/luci/logs/log_provider	2007/05/30 05:54:01	1.1.6.1
@@ -1 +1 @@
-<pre tal:content="structure python: here.getLogsForNode(request)"></pre>
+<pre tal:content="structure python: here.getSystemLogs(request)"></pre>
--- conga/luci/site/luci/Extensions/Attic/LuciClusterActions.py	2007/05/23 21:21:32	1.1.2.6
+++ conga/luci/site/luci/Extensions/Attic/LuciClusterActions.py	2007/05/30 05:54:01	1.1.2.7
@@ -8,16 +8,17 @@
 from LuciSyslog import get_logger
 import RicciQueries as rq
 
-from ricci_communicator import RicciCommunicator, RicciError
+from ricci_communicator import RicciCommunicator
 
-from LuciDB import set_node_flag, getRicciAgent, delCluster, \
+from LuciDB import set_node_flag, getRicciAgent, \
 	getClusterNode, getStorageNode, NodeBusy, \
-	setNodeStatus, resolve_nodename
+	setNodeStatus, resolve_nodename, \
+	delCluster, delClusterSystem, \
+	CLUSTER_NODE_NEED_AUTH
 
 from conga_constants import CLUSTER_CONFIG, LUCI_DEBUG_MODE, \
-	NODE_DELETE, CLUSTER_DELETE, CLUSTER_FOLDER_PATH, \
-	CLUSTERLIST, CLUSTER_NODE_NEED_AUTH, NODE_FENCE, \
-	NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, NODE_REBOOT, \
+	NODE_DELETE, CLUSTER_DELETE, CLUSTERLIST, \
+	NODE_FENCE, NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, NODE_REBOOT, \
 	RESOURCE_ADD, RESOURCE_CONFIG, RESOURCE_REMOVE, \
 	SERVICE_DELETE, SERVICE_RESTART, SERVICE_START, SERVICE_STOP
 
@@ -226,7 +227,7 @@
 # Cluster node membership-related tasks
 #
 
-def nodeJoin(self, rc, clustername, nodename_resolved):
+def NodeJoinCluster(self, rc, clustername, nodename_resolved):
 	batch_number, result = rq.nodeJoinCluster(rc)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
@@ -244,17 +245,31 @@
 		return None
 	return True
 
-def nodeLeave(self, rc, clustername, nodename_resolved):
+def NodeLeaveCluster(self, rc, clustername, nodename_resolved):
+	reported_cluname = None
+	try:
+		cluster_info = rc.cluster_info()
+		reported_cluname = cluster_info[0] or cluster_info[1]
+		if not reported_cluname:
+			raise Exception, 'not a cluster member'
+		if reported_cluname.lower() != clustername.lower():
+			raise Exception, 'cluster mismatch: expected %s, got %s' \
+								% (clustername, reported_cluname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NCL0: no cluster name: %r %s' % (e, str(e)))
+		return None
+
 	if NodeBusy(self, clustername, nodename_resolved, rc) is not False:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NL0: flags still present for %s -- bailing out' \
+			luci_log.debug('NC1: %s is busy, can\'t leave cluster yet.' \
 				% nodename_resolved)
 		return None
 
 	batch_number, result = rq.nodeLeaveCluster(rc)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NL1: %s: batch_number or result is None' \
+			luci_log.debug_verbose('NLC2: %s: batch_number or result is None' \
 				% nodename_resolved)
 		return None
 
@@ -264,84 +279,37 @@
 			'Node "%s" leaving cluster "%s"' % (nodename_resolved, clustername))
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NL4: failed to set flags: %s: %r %s' \
+			luci_log.debug_verbose('NLC3: failed to set flags: %s: %r %s' \
 				% (nodename_resolved, e, str(e)))
 	return True
 
-def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=False):
+def NodeDeleteFromCluster(	self,
+							rc,
+							model,
+							clustername,
+							nodename,
+							nodename_resolved,
+							delete_cluster=False):
+
 	# We need to get a node name other than the node
 	# to be deleted, then delete the node from the cluster.conf
 	# and propogate it. We will need two ricci agents for this task,
 	# unless we are deleting the cluster itself.
 
-	if not delete_cluster:
+	if delete_cluster is False:
 		# Make sure we can find a second node before we hose anything.
-		found_one = False
-
-		path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
-
-		try:
-			clusterfolder = self.restrictedTraverse(path)
-			if not clusterfolder:
-				raise Exception, 'no cluster folder at %s' % path
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND0: node delete error for cluster %s: %r %s' % (clustername, e, str(e)))
-			return None
-
-		try:
-			nodes = clusterfolder.objectItems('Folder')
-			if not nodes or len(nodes) < 1:
-				raise Exception, 'no cluster nodes in DB'
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND1: node delete error for cluster %s: %r %s' % (clustername, e, str(e)))
-
-		for node in nodes:
-			if node[1].getId().find(nodename) != (-1):
-				continue
-			# here we make certain the node is up...
-			# XXX- we should also make certain this host is still
-			# in the cluster we believe it is.
-
-			try:
-				rc2 = RicciCommunicator(node[1].getId())
-				if not rc2:
-					raise Exception, 'ND1a: rc2 is None'
-			except Exception, e:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.info('ND2: ricci %s error: %r %s' \
-						% (node[0], e, str(e)))
-				continue
-
-			if not rc2.authed():
-				try:
-					setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
-				except:
-					pass
-
-				try:
-					snode = getStorageNode(self, node[0])
-					setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
-				except:
-					pass
+		rc2 = getRicciAgent(self, clustername,
+				exclude_names=[ nodename_resolved ], exclude_busy=True)
 
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('ND3: %s is not authed' % node[0])
-				rc2 = None
-				continue
-			else:
-				found_one = True
-				break
-
-		if not found_one:
+		if rc2 is None:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND4: unable to find ricci agent to delete %s from %s' % (nodename, clustername))
+				luci_log.debug_verbose('ND0: unable to find ricci agent to delete %s from %s' % (nodename_resolved, clustername))
 			return None
 
 	# First, delete cluster.conf from node to be deleted.
 	# next, have node leave cluster.
-	batch_number, result = rq.nodeLeaveCluster(rc, purge=True)
+
+	batch_number, result = rq.nodeLeaveCluster(rc, purge=False)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('ND5: batch_number and/or result is None')
@@ -352,7 +320,7 @@
 	# anyway. Now, we need to delete node from model and send out
 	# new cluster.conf
 
-	if delete_cluster:
+	if delete_cluster is True:
 		try:
 			set_node_flag(self, clustername, rc.hostname(),
 				str(batch_number), CLUSTER_DELETE,
@@ -363,32 +331,17 @@
 				luci_log.debug_verbose('ND5a: failed to set flags: %r %s' \
 					% (e, str(e)))
 	else:
-		delete_target = None
-		nodelist = model.getNodes()
-		find_node = nodename.lower()
-		for n in nodelist:
-			try:
-				if n.getName().lower() == find_node:
-					delete_target = n
-					break
-			except:
-				continue
-
-		if delete_target is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND6: no delete target for %s in cluster %s' % (nodename, clustername))
-			return None
-
 		try:
-			model.deleteNode(delete_target)
+			model.deleteNodeByName(nodename.lower())
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND6a: deleteNode %s failed: %r %s' \
-					% (delete_target.getName(), e, str(e)))
+				luci_log.debug_verbose('ND6: deleteNode %s: %r %s' \
+					% (nodename, e, str(e)))
+			return None
 
 		try:
 			model.setModified(True)
-			str_buf = model.exportModelAsString()
+			str_buf = str(model.exportModelAsString())
 			if not str_buf:
 				raise Exception, 'model string is blank'
 		except Exception, e:
@@ -398,21 +351,20 @@
 				return None
 
 		# propagate the new cluster.conf via the second node
-		batch_number, result = rq.setClusterConf(rc2, str(str_buf))
+		batch_number, result = rq.setClusterConf(rc2, str_buf)
 		if batch_number is None:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('ND8: batch number is None after del node in NTP')
 			return None
 
-	# Now we need to delete the node from the DB
-	path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
 	try:
-		clusterfolder = self.restrictedTraverse(path)
-		clusterfolder.manage_delObjects([nodename_resolved])
+		ret = delClusterSystem(self, clustername, nodename_resolved)
+		if ret is not None:
+			raise Exception, ret
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('ND9: error deleting %s at %s: %r %s' \
-				% (nodename_resolved, path, e, str(e)))
+			luci_log.debug_verbose('ND9: error deleting %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
 
 	if delete_cluster:
 		return True
@@ -420,7 +372,7 @@
 	try:
 		set_node_flag(self, clustername, rc2.hostname(),
 			str(batch_number), NODE_DELETE,
-			'Deleting node "%s"' % nodename_resolved)
+			'Deleting node "%s"' % nodename)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('ND10: failed to set flags: %r %s' \
@@ -431,7 +383,7 @@
 # Cluster management-related tasks.
 #
 
-def clusterStart(self, model):
+def ClusterStart(self, model):
 	if model is None:
 		return None
 
@@ -454,7 +406,8 @@
 					% (nodename_resolved, e, str(e)))
 			errors += 1
 			continue
-		if nodeJoin(self, rc, clustername, nodename_resolved) is None:
+
+		if NodeJoinCluster(self, rc, clustername, nodename_resolved) is None:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('CStart1: nodeJoin %s' \
 					% nodename_resolved)
@@ -462,7 +415,7 @@
 
 	return errors
 
-def clusterStop(self, model, delete=False):
+def ClusterStop(self, model, delete=False):
 	if model is None:
 		return None
 
@@ -487,74 +440,73 @@
 			continue
 
 		if delete is True:
-			ret = nodeDelete(self, rc, model, clustername, nodename,
-					nodename_resolved, delete_cluster=True)
+			ret = NodeDeleteFromCluster(self, rc, model, clustername,
+					nodename, nodename_resolved, delete_cluster=True)
 			if ret is None:
 				if LUCI_DEBUG_MODE is True:
 					luci_log.debug_verbose('CStop1: [1] nodeDelete failed')
 				errors += 1
 		else:
-			if nodeLeave(self, rc, clustername, nodename_resolved) is None:
+			ret = NodeLeaveCluster(self, rc, clustername, nodename_resolved)
+			if ret is None:
 				if LUCI_DEBUG_MODE is True:
 					luci_log.debug_verbose('CStop2: [0] nodeLeave %s' \
 						% (nodename_resolved))
 				errors += 1
 	return errors
 
-def clusterRestart(self, model):
-	snum_err = clusterStop(self, model)
+def ClusterRestart(self, model):
+	snum_err = ClusterStop(self, model)
 	if snum_err:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('cluRestart0: clusterStop: %d errs' \
+			luci_log.debug_verbose('cluRestart0: ClusterStop: %d errs' \
 				% snum_err)
-	jnum_err = clusterStart(self, model)
+
+	jnum_err = ClusterStart(self, model)
 	if jnum_err:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('cluRestart1: clusterStart: %d errs' \
+			luci_log.debug_verbose('cluRestart1: ClusterStart: %d errs' \
 				% jnum_err)
 	return snum_err + jnum_err
 
-def clusterDelete(self, model):
+def ClusterDelete(self, model):
+	try:
+		clustername = model.getClusterName()
+		if not clustername:
+			raise Exception, 'no cluster name found'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ClusterDelete0: %r %s' % (e, str(e)))
+		return None
+
 	# Try to stop all the cluster nodes before deleting any.
-	num_errors = clusterStop(self, model, delete=False)
+	num_errors = ClusterStop(self, model, delete=False)
 	if num_errors > 0:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('cluDelete: %d errors' % num_errors)
+			luci_log.debug_verbose('ClusterDelete1: %s: %d errors' \
+				% (clustername, num_errors))
 		return None
 
 	# If the cluster is stopped, delete all of the nodes.
-	num_errors = clusterStop(self, model, delete=True)
-	try:
-		clustername = model.getClusterName()
-	except Exception, e:
+	num_errors = ClusterStop(self, model, delete=True)
+	if num_errors > 0:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('clusterDelete0: unable to get cluster name')
+			luci_log.debug_verbose('ClusterDelete2: %s: %d errors' \
+				% (clustername, num_errors))
 		return None
 
-	if num_errors < 1:
-		try:
-			delCluster(self, clustername)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('clusterDelete1: %s: %r %s' \
-					% (clustername, e, str(e)))
-
-		try:
-			clusterfolder = self.restrictedTraverse('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
-			if len(clusterfolder.objectItems()) < 1:
-				clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
-				clusters.manage_delObjects([clustername])
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('clusterDelete2: %s %r %s' \
-					% (clustername, e, str(e)))
-		return CLUSTERLIST
-	else:
+	try:
+		ret = delCluster(self, clustername)
+		if ret is not None:
+			raise Exception, ret
+	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('clusterDelete2: %s: %d errors' \
-				% (clustername, num_errors))
+			luci_log.debug_verbose('ClusterDelete3: %s: %r %s' \
+				% (clustername, e, str(e)))
+		return None
+	return CLUSTERLIST
 
-def forceNodeReboot(self, rc, clustername, nodename_resolved):
+def NodeReboot(self, rc, clustername, nodename_resolved):
 	batch_number, result = rq.nodeReboot(rc)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
@@ -571,150 +523,35 @@
 				% (e, str(e)))
 	return True
 
-def forceNodeFence(self, clustername, nodename, nodename_resolved):
-	path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
-
-	try:
-		clusterfolder = self.restrictedTraverse(path)
-		if not clusterfolder:
-			raise Exception, 'no cluster folder at %s' % path
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('FNF0: The cluster obj %s not found: %r %s' \
-				% (clustername, e, str(e)))
-		return None
-
-	try:
-		nodes = clusterfolder.objectItems('Folder')
-		if not nodes or len(nodes) < 1:
-			raise Exception, 'no cluster nodes'
-	except Exception, e:
+def NodeFence(self, clustername, nodename, nodename_resolved):
+	rc = getRicciAgent(self, clustername,
+			exclude_names=[ nodename_resolved, nodename ], exclude_busy=True)
+	if rc is None:
+		rc = getRicciAgent(self, clustername,
+				exclude_names=[ nodename_resolved, nodename ])
+	if rc is None:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('FNF1: No cluster nodes for %s were found: %r %s' \
-				% (clustername, e, str(e)))
+			luci_log.debug_verbose('FNF0: no ricci to fence %s for cluster %s' \
+				% (nodename_resolved, clustername))
 		return None
-
-	found_one = False
-	for node in nodes:
-		if node[1].getId().find(nodename) != (-1):
-			continue
-
-		try:
-			rc = RicciCommunicator(node[1].getId())
-			if not rc:
-				raise Exception, 'rc is None'
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug('FNF2: ricci error for host %s: %r %s' \
-					% (node[0], e, str(e)))
-			continue
-
-		if not rc.authed():
-			rc = None
-			try:
-				snode = getStorageNode(self, node[1].getId())
-				setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
-			except:
-				pass
-
-			try:
-				setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
-			except:
-				pass
-
-			continue
-		found_one = True
-		break
-
-	if not found_one:
-		return None
-
-	batch_number, result = rq.nodeFence(rc, nodename)
+			
+	batch_number, result = rq.nodeFence(rc, nodename_resolved)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('FNF3: batch_number and/or result is None')
+			luci_log.debug_verbose('FNF1: batch_number and/or result is None')
 		return None
 
 	try:
 		set_node_flag(self, clustername, rc.hostname(),
 			str(batch_number), NODE_FENCE,
-			'Node "%s" is being fenced' % nodename_resolved)
+			'Node "%s" is being fenced by node "%s"' \
+				% (nodename_resolved, rc.hostname()))
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('FNF4: failed to set flags: %r %s' \
+			luci_log.debug_verbose('FNF2: failed to set flags: %r %s' \
 				% (e, str(e)))
 	return True
 
-#
-# Cluster-independent tasks.
-#
-
-def getLogsForNode(self, request):
-	try:
-		nodename = request['nodename']
-	except KeyError, e:
-		try:
-			nodename = request.form['nodename']
-		except:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GNL0: no node name')
-			return 'Unable to get node name to retrieve logging information'
-
-	clustername = None
-	try:
-		clustername = request['clustername']
-	except KeyError, e:
-		try:
-			clustername = request.form['clustername']
-			if not clustername:
-				raise
-		except:
-			clustername = None
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GNL1: no cluster for %s' % nodename)
-	except:
-		pass
-
-	if clustername is None:
-		nodename_resolved = nodename
-	else:
-		nodename_resolved = resolve_nodename(self, clustername, nodename)
-
-	try:
-		rc = RicciCommunicator(nodename_resolved)
-	except RicciError, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GNL2: ricci error %s: %r %s' \
-				% (nodename_resolved, e, str(e)))
-		return 'Ricci error while getting logs for %s' % nodename_resolved
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GNL3: unexpected exception for %s: %r %s' \
-				% (nodename_resolved, e, str(e)))
-		return 'Ricci error while getting logs for %s' % nodename_resolved
-
-	if not rc.authed():
-		try:
-			snode = getStorageNode(self, nodename)
-			setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GNL4: %s: %r %s' \
-					% (nodename_resolved, e, str(e)))
-
-		if clustername:
-			try:
-				cnode = getClusterNode(self, nodename, clustername)
-				setNodeStatus(cnode, CLUSTER_NODE_NEED_AUTH)
-			except Exception, e:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('GNL5: %s: %r %s' \
-						% (nodename_resolved, e, str(e)))
-		return 'Luci is not authenticated to node %s. Reauthenticate first.' \
-			% nodename
-
-	return rq.getNodeLogs(rc)
-
 def propagateClusterConfAsync(	self,
 								model,
 								rc=None,
@@ -734,10 +571,12 @@
 		errors.append('Unable to determine cluster name')
 		return (False, { 'errors': errors, 'messages': messages })
 
-	if not rc:
+	if rc is None:
+		rc = getRicciAgent(self, clustername, exclude_busy=True)
+	if rc is None:
 		rc = getRicciAgent(self, clustername)
 
-	if not rc:
+	if rc is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('PCC1: no ricci agent for the %s cluster' \
 				% clustername)
@@ -776,3 +615,48 @@
 				% (e, str(e)))
 
 	return (True, { 'errors': errors, 'messages': messages, 'batchid': batch_id })
+
+def GetSystemLogs(self, fvars):
+	nodename = fvars['nodename']
+	if nodename is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GSL0: no node name')
+		return 'No system name was given'
+
+	clustername = fvars['clustername']
+	if clustername is None:
+		nodename_resolved = nodename
+	else:
+		nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+	try:
+		rc = RicciCommunicator(nodename_resolved)
+		if not rc:
+			raise Exception, 'no rc'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GSL1: unexpected exception for %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
+		return 'Ricci error while getting logs for %s' % nodename_resolved
+
+	if not rc.authed():
+		try:
+			snode = getStorageNode(self, nodename_resolved)
+			setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GSL2: %s: %r %s' \
+					% (nodename_resolved, e, str(e)))
+
+		if clustername is not None:
+			try:
+				cnode = getClusterNode(self, nodename_resolved, clustername)
+				setNodeStatus(cnode, CLUSTER_NODE_NEED_AUTH)
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GNL5: %s: %r %s' \
+						% (nodename_resolved, e, str(e)))
+		return 'Luci is not authenticated to %s. Reauthenticate first.' \
+			% nodename
+
+	return rq.getNodeLogs(rc)
--- conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/23 21:21:32	1.1.2.12
+++ conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/30 05:54:02	1.1.2.13
@@ -12,9 +12,13 @@
 from LuciSyslog import get_logger
 
 from conga_constants import CLUSTER_FOLDER_PATH, BATCH_ID, TASKTYPE, \
-	FLAG_DESC, CLUSTER_ADD, CLUSTER_NODE_ADDED, CLUSTER_NODE_NEED_AUTH, \
-	LAST_STATUS, PLONE_ROOT, STORAGE_FOLDER_PATH, CLUSTER_NODE_NOT_MEMBER, \
-	LUCI_DEBUG_MODE
+	FLAG_DESC, CLUSTER_ADD, LAST_STATUS, PLONE_ROOT, \
+	STORAGE_FOLDER_PATH, LUCI_DEBUG_MODE
+
+# Cluster node exception attribute flags
+CLUSTER_NODE_NEED_AUTH	= 0x01
+CLUSTER_NODE_NOT_MEMBER	= 0x02
+CLUSTER_NODE_ADDED		= 0x04
 
 luci_log = get_logger()
 
@@ -730,7 +734,7 @@
 	allowed = dict(map(lambda x: [ x[0], None ], allowed_systems_list))
 	return allowed.has_key(hostname)
 
-def getRicciAgent(self, clustername):
+def getRicciAgent(self, clustername, exclude_names=None, exclude_busy=False):
 	try:
 		perm = cluster_permission_check(clustername)
 		if not perm:
@@ -770,25 +774,24 @@
 
 	cluname = clustername.lower()
 	for node in nodes:
-		try:
-			hostname = node[1].getId()
-		except:
-			try:
-				hostname = node[0]
-			except:
-				continue
+		hostname = node[0]
+
+		if exclude_names is not None and hostname in exclude_names:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GRA5: %s is in the excluded names list, excluding it' % hostname)
+			continue
 
 		try:
 			rc = RicciCommunicator(hostname)
 			if not rc:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('GRA5: rc is None')
+					luci_log.debug_verbose('GRA6: rc is None')
 				continue
 
 			ricci_hostname = rc.hostname()
 			if not ricci_hostname:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('GRA6: ricci_hostname is blank')
+					luci_log.debug_verbose('GRA7: ricci_hostname is blank')
 				continue
 
 			clu_info = rc.cluster_info()
@@ -797,35 +800,59 @@
 			if not cur_name:
 				cur_name = None
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('GRA7: cluster name is none for %s' \
+					luci_log.debug_verbose('GRA8: cluster name is none for %s' \
 						% ricci_hostname)
 
 			cur_alias = str(clu_info[1]).strip().lower()
 			if not cur_alias:
 				cur_alias = None
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('GRA8: no cluster alias for %s' \
+					luci_log.debug_verbose('GRA9: no cluster alias for %s' \
 						% ricci_hostname)
 
 			if (cur_name is not None and cluname != cur_name) and (cur_alias is not None and cluname != cur_alias):
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug('GRA9: node %s reports it\'s in cluster [%s:%s] we expect %s' % (hostname, clu_info[0], clu_info[1], cluname))
+					luci_log.debug('GRA10: node %s reports it\'s in cluster [%s:%s] we expect %s' % (ricci_hostname, clu_info[0], clu_info[1], cluname))
 				setNodeStatus(self, node, CLUSTER_NODE_NOT_MEMBER)
 				continue
 
-			if rc.authed():
-				return rc
-
-			setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
+			if not rc.authed():
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA11: %s is not authenticated' \
+						% ricci_hostname)
+				setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
+				continue
 		except Exception, eout:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GRA10: %r %s' % (eout, str(eout)))
+				luci_log.debug_verbose('GRA12: %r %s' % (eout, str(eout)))
+			continue
+
+		if exclude_busy is True:
+			if NodeBusy(self, cluname, ricci_hostname, rc) is not False:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA13: %s is busy, excluding' \
+						% ricci_hostname)
+				continue
+		return rc
 
 	if LUCI_DEBUG_MODE is True:
-		luci_log.debug('GRA11: no ricci agent could be found for cluster %s' \
+		luci_log.debug('GRA14: no ricci agent could be found for cluster %s' \
 			% cluname)
 	return None
 
+def getClusterDBObj(self, clustername):
+	try:
+		cluster_path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
+		nodelist = self.restrictedTraverse(cluster_path)
+		if not nodelist:
+			raise Exception, 'no nodelist'
+		return nodelist
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCDB0: %s -> %s: %r %s' \
+				% (clustername, cluster_path, e, str(e)))
+	return None
+
 def getClusterDBNodes(self, clustername):
 	try:
 		cluster_path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
@@ -837,6 +864,18 @@
 		return []
 	return nodelist
 
+def getClusterFlags(self, cluname):
+	try:
+		path = '%s%s' % (CLUSTER_FOLDER_PATH, cluname)
+		clusterfolder = self.restrictedTraverse(path)
+		if not clusterfolder:
+			raise Exception, 'clusterfolder is None'
+		return clusterfolder.objectItems('ManagedSystem')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCF0: cluster %s [%s] folder missing: %r %s -- returning empty map' % (cluname, path, e, str(e)))
+	return None
+
 def getClusterStatusDB(self, clustername):
 	results = list()
 	vals = {}
--- conga/luci/site/luci/Extensions/Attic/LuciZopeExternal.py	2007/05/18 03:30:44	1.1.2.2
+++ conga/luci/site/luci/Extensions/Attic/LuciZopeExternal.py	2007/05/30 05:54:02	1.1.2.3
@@ -5,12 +5,18 @@
 # GNU General Public License as published by the
 # Free Software Foundation.
 
+#
+# The only purpose of this file is to aggregate all the functions
+# called by Zope External Methods.
+#
+
 from homebase_adapters import getUserPerms, homebaseControl, \
 	getDefaultUser
 
-from cluster_adapters import addResource, clusterTaskProcess, \
-	createCluChooser, createCluConfigTree, delResource, delService, \
-	getClusterOS, getClusterURL, getLogsForNode, getRicciAgentForCluster, \
+from cluster_adapters import clusterTaskProcess, resourceAdd, \
+	resourceDelete, \
+	createCluChooser, createCluConfigTree, serviceDelete, \
+	getClusterOS, getClusterURL, getSystemLogs, getRicciAgentForCluster, \
 	isClusterBusy, nodeTaskProcess, process_cluster_conf_editor, \
 	serviceMigrate, serviceRestart, serviceStart, serviceStop
 
--- conga/luci/site/luci/Extensions/Attic/RicciQueries.py	2007/05/18 05:23:55	1.1.2.5
+++ conga/luci/site/luci/Extensions/Attic/RicciQueries.py	2007/05/30 05:54:02	1.1.2.6
@@ -399,6 +399,13 @@
 
 	time_now = time()
 	entry_list = list()
+
+	try:
+		# Show older entries first.
+		log_entries.sort(lambda x, y: int(y.getAttribute('age')) - int(x.getAttribute('age')))
+	except:
+		pass
+
 	for i in log_entries:
 		try:
 			log_msg = i.getAttribute('msg')
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/23 21:21:32	1.255.2.10
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/30 05:54:02	1.255.2.11
@@ -29,7 +29,20 @@
 from homebase_adapters import parseHostForm
 from LuciClusterInfo import getClusterInfo, getModelBuilder, getModelForCluster
 
-from conga_constants import *
+from conga_constants import BATCH_ID, CLUNAME, CLUNODE_CREATE_ERRORS, \
+	CLUSTER, CLUSTER_ADD, CLUSTER_CONFIG, CLUSTER_DAEMON, CLUSTER_DELETE, \
+	CLUSTER_FOLDER_PATH, CLUSTERLIST, CLUSTER_RESTART, CLUSTERS, \
+	CLUSTER_START, CLUSTER_STOP, DISABLE_SVC_TASK, ENABLE_SVC_TASK, \
+	FDOM, FDOM_ADD, FDOM_CONFIG, FDOMS, FENCEDEV, FENCEDEV_ADD, \
+	FENCEDEV_CONFIG, FENCEDEV_NODE_CONFIG, FENCEDEVS, FLAG_DESC, \
+	INSTALL_TASK, LAST_STATUS, LUCI_DEBUG_MODE, NODE, NODE_ADD, \
+	NODE_CONFIG, NODE_DELETE, NODE_FENCE, NODE_GRID, NODE_JOIN_CLUSTER, \
+	NODE_LEAVE_CLUSTER, NODE_LIST, NODE_REBOOT, NODES, PAGETYPE, \
+	POSSIBLE_REBOOT_MESSAGE, PRE_CFG, PRE_INSTALL, PRE_JOIN, \
+	REBOOT_TASK, REDIRECT_MSG, RESOURCE, RESOURCE_ADD, RESOURCE_CONFIG, \
+	RESOURCES, RICCI_CONNECT_FAILURE, RICCI_CONNECT_FAILURE_MSG, \
+	SEND_CONF, SERVICE, SERVICE_ADD, SERVICE_CONFIG, SERVICE_LIST, \
+	SERVICES, START_NODE, TASKTYPE, VM_ADD, VM_CONFIG
 
 from FenceHandler import validateNewFenceDevice, \
 	validateFenceDevice, validate_fenceinstance, \
@@ -39,9 +52,10 @@
 	batch_status, extract_module_status
 
 from LuciDB import manageCluster, createClusterSystems, \
-	setNodeStatus, getStorageNode, \
-	getClusterNode, delCluster, buildClusterCreateFlags, \
-	resolve_nodename, set_node_flag, getRicciAgent
+	setNodeStatus, getStorageNode, getClusterFlags, \
+	getClusterNode, buildClusterCreateFlags, getClusterDBObj, \
+	resolve_nodename, set_node_flag, getRicciAgent, \
+	CLUSTER_NODE_NEED_AUTH
 
 from LuciZopePerm import havePermCreateCluster
 
@@ -319,6 +333,7 @@
 
 def validateAddClusterNode(self, request):
 	import time
+
 	try:
 		request.SESSION.delete('add_node')
 	except:
@@ -3174,13 +3189,17 @@
 
 	redirect_page = NODES
 	if task == CLUSTER_STOP:
-		clusterStop(self, model)
+		from LuciClusterActions import ClusterStop
+		ClusterStop(self, model)
 	elif task == CLUSTER_START:
-		clusterStart(self, model)
+		from LuciClusterActions import ClusterStart
+		ClusterStart(self, model)
 	elif task == CLUSTER_RESTART:
-		clusterRestart(self, model)
+		from LuciClusterActions import ClusterRestart
+		ClusterRestart(self, model)
 	elif task == CLUSTER_DELETE:
-		ret = clusterDelete(self, model)
+		from LuciClusterActions import ClusterDelete
+		ret = ClusterDelete(self, model)
 		if ret is not None:
 			redirect_page = ret
 	else:
@@ -3191,7 +3210,7 @@
 		% (request['URL'], redirect_page, model.getClusterName()))
 
 def nodeTaskProcess(self, model, request):
-	fvar = GetReqVars(request, [ 'task', 'clustername', 'nodename' ])
+	fvar = GetReqVars(request, [ 'task', 'clustername', 'nodename', 'URL' ])
 
 	task = fvar['task']
 	clustername = fvar['clustername']
@@ -3270,7 +3289,8 @@
 			return (False, { 'errors': [ 'Node "%s" is not authenticated' % nodename_resolved ]})
 
 	if task == NODE_LEAVE_CLUSTER:
-		if nodeLeave(self, rc, clustername, nodename_resolved) is None:
+		from LuciClusterActions import NodeLeaveCluster
+		if NodeLeaveCluster(self, rc, clustername, nodename_resolved) is None:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('NTP8: nodeLeave failed')
 			return (False, {'errors': [ 'Node "%s" failed to leave cluster "%s"' % (nodename_resolved, clustername) ]})
@@ -3278,7 +3298,8 @@
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_JOIN_CLUSTER:
-		if nodeJoin(self, rc, clustername, nodename_resolved) is None:
+		from LuciClusterActions import NodeJoinCluster
+		if NodeJoinCluster(self, rc, clustername, nodename_resolved) is None:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('NTP9: nodeJoin failed')
 			return (False, {'errors': [ 'Node "%s" failed to join cluster "%s"' % (nodename_resolved, clustername) ]})
@@ -3286,7 +3307,8 @@
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_REBOOT:
-		if forceNodeReboot(self, rc, clustername, nodename_resolved) is None:
+		from LuciClusterActions import NodeReboot
+		if NodeReboot(self, rc, clustername, nodename_resolved) is None:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('NTP10: nodeReboot failed')
 			return (False, {'errors': [ 'Node "%s" failed to reboot' \
@@ -3295,16 +3317,17 @@
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_FENCE:
-		if forceNodeFence(self, clustername, nodename, nodename_resolved) is None:
+		from LuciClusterActions import NodeFence
+		if NodeFence(self, clustername, nodename, nodename_resolved) is None:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('NTP11: nodeFencefailed')
 			return (False, {'errors': [ 'Fencing of node "%s" failed' \
 				% nodename_resolved]})
-
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_DELETE:
-		if nodeDelete(self, rc, model, clustername, nodename, nodename_resolved) is None:
+		from LuciClusterActions import NodeDeleteFromCluster
+		if NodeDeleteFromCluster(self, rc, model, clustername, nodename, nodename_resolved) is None:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('NTP12: nodeDelete failed')
 			return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
@@ -3318,51 +3341,26 @@
 	isBusy = False
 	redirect_message = False
 	nodereports = list()
-	busy_map['nodereports'] = nodereports
-
-	try:
-		cluname = req['clustername']
-	except KeyError, e:
-		try:
-			cluname = req.form['clustername']
-		except:
-			try:
-				cluname = req.form['clustername']
-			except:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('ICB0: No cluster name -- returning empty map')
-				return busy_map
 
-	path = '%s%s' % (CLUSTER_FOLDER_PATH, cluname)
+	fvar = GetReqVars(req, [ 'clustername', 'URL' ])
+	busy_map['nodereports'] = nodereports
 
-	try:
-		clusterfolder = self.restrictedTraverse(path)
-		if not clusterfolder:
-			raise Exception, 'clusterfolder is None'
-	except Exception, e:
+	cluname = fvar['clustername']
+	if cluname is None:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('ICB1: cluster %s [%s] folder missing: %r %s -- returning empty map' % (cluname, path, e, str(e)))
+			luci_log.debug_verbose('ICB0: No cluster name, returning empty map')
 		return busy_map
-	except:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('ICB2: cluster %s [%s] folder missing: returning empty map' % (cluname, path))
 
-	try:
-		items = clusterfolder.objectItems('ManagedSystem')
-		if not items or len(items) < 1:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ICB3: NOT BUSY: no flags at %s for cluster %s' % (cluname, path))
-			# This returns an empty map, and indicates not busy
-			return busy_map
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('ICB4: An error occurred while looking for cluster %s flags at path %s: %r %s' % (cluname, path, e, str(e)))
-		return busy_map
-	except:
+	items = getClusterFlags(self, cluname)
+	if not items or len(items) < 1:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('ICB5: An error occurred while looking for cluster %s flags at path %s' % (cluname, path))
+			luci_log.debug_verbose('ICB3: NOT BUSY: no flags for cluster %s' \
+				% cluname)
+		# This returns an empty map, and indicates not busy
 		return busy_map
 
+	clusterfolder = getClusterDBObj(cluname)
+	
 	if LUCI_DEBUG_MODE is True:
 		luci_log.debug_verbose('ICB6: %s is busy: %d flags' \
 			% (cluname, len(items)))
@@ -3401,7 +3399,7 @@
 			ricci = item[0].split('____')
 
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ICB6A: using host %s for rc for item %s' \
+				luci_log.debug_verbose('ICB6A: using rc host %s for item %s' \
 					% (ricci[0], item[0]))
 
 			try:
@@ -3576,6 +3574,7 @@
 					nodereports.append(node_report)
 					propslist = list()
 					propslist.append(LAST_STATUS)
+
 					try:
 						item[1].manage_delProperties(propslist)
 						item[1].manage_addProperty(LAST_STATUS, creation_status, 'int')
@@ -3595,6 +3594,7 @@
 				rc = None
 				finished = -1
 				err_msg = ''
+
 				if LUCI_DEBUG_MODE is True:
 					luci_log.debug_verbose('ICB15: ricci error: %s: %r %s' \
 						% (ricci[0], e, str(e)))
@@ -3745,9 +3745,8 @@
 			luci_log.debug_verbose('GRI1: missing res name')
 		return {}
 
-	cluname = fvars['clustername']
-	baseurl = fvars['URL']
-
+	#cluname = fvars['clustername']
+	#baseurl = fvars['URL']
 	#CALL
 	return {}
 
@@ -3772,43 +3771,47 @@
 def serviceRestart(self, rc, req):
 	from LuciClusterActions import RestartCluSvc
 
-	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
+	fvars = GetReqVars(req,
+				[ 'clustername', 'servicename', 'nodename', 'URL' ])
 	ret = RestartCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (req['URL'], SERVICE_LIST, cluname))
+			% (fvars['URL'], SERVICE_LIST, fvars['clustername']))
 	else:
 		return ret
 
 def serviceStop(self, rc, req):
 	from LuciClusterActions import StopCluSvc
 
-	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
+	fvars = GetReqVars(req,
+				[ 'clustername', 'servicename', 'nodename', 'URL' ])
 	ret = StopCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (req['URL'], SERVICE_LIST, cluname))
+			% (fvars['URL'], SERVICE_LIST, fvars['clustername']))
 	else:
 		return ret
 
 def serviceStart(self, rc, req):
 	from LuciClusterActions import StartCluSvc
 
-	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
+	fvars = GetReqVars(req,
+				[ 'clustername', 'servicename', 'nodename', 'URL' ])
 	ret = StartCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (req['URL'], SERVICE_LIST, cluname))
+			% (fvars['URL'], SERVICE_LIST, fvars['clustername']))
 	else:
 		return ret
 
 def serviceDelete(self, rc, req):
 	from LuciClusterActions import DeleteCluSvc
 
-	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
+	fvars = GetReqVars(req,
+				[ 'clustername', 'servicename', 'nodename', 'URL' ])
 	try:
 		model = LuciExtractCluModel(self, req,
 					cluster_name=fvars['clustername'])
@@ -3821,26 +3824,28 @@
 	if ret is None:
 		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (req['URL'], SERVICES, clustername))
+			% (fvars['URL'], SERVICES, fvars['clustername']))
 	else:
 		return ret
 
 def serviceMigrate(self, rc, req):
 	from LuciClusterActions import MigrateCluSvc
 
-	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
+	fvars = GetReqVars(req,
+				[ 'clustername', 'servicename', 'nodename', 'URL' ])
 	ret = MigrateCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (req['URL'], SERVICE_LIST, cluname))
+			% (fvars['URL'], SERVICE_LIST, fvars['clustername']))
 	else:
 		return ret
 
 def resourceDelete(self, rc, req):
 	from LuciClusterActions import DeleteResource
 
-	fvars = GetReqVars(request, [ 'clustername', 'resourcename', 'nodename' ])
+	fvars = GetReqVars(req,
+		[ 'clustername', 'resourcename', 'nodename', 'URL' ])
 	try:
 		model = LuciExtractCluModel(self, req,
 					cluster_name=fvars['clustername'])
@@ -3855,7 +3860,7 @@
 	if ret is None:
 		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (req['URL'], RESOURCES, fvars['clustername']))
+			% (fvars['URL'], RESOURCES, fvars['clustername']))
 	else:
 		return ret
 
@@ -3865,7 +3870,7 @@
 	try:
 		cluname = model.getClusterName()
 		rc = getRicciAgent(self, cluname)
-		if not rc:
+		if rc is None:
 			raise Exception, 'no rc'
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
@@ -3881,23 +3886,12 @@
 	if ret is None:
 		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (req['URL'], RESOURCES, clustername))
+			% (req['URL'], RESOURCES, cluname))
 	else:
 		return ret
 
-def nodeJoin(self, rc, cluname, nodename):
-	return None
-def nodeLeave(self, rc, cluname, nodename):
-	return None
-def nodeDelete(self, rc, cluname, nodename):
-	return None
-
-
-def clusterStart(self, model):
-	return None
-def clusterStop(self, model):
-	return None
-def clusterRestart(self, model):
-	return None
-def clusterDelete(self, model):
-	return None
+def getSystemLogs(self, req):
+	from LuciClusterActions import GetSystemLogs
+
+	fvars = GetReqVars(req, [ 'clustername', 'nodename' ])
+	return GetSystemLogs(self, fvars)
--- conga/luci/site/luci/Extensions/conga_constants.py	2007/05/15 21:42:21	1.39.2.4
+++ conga/luci/site/luci/Extensions/conga_constants.py	2007/05/30 05:54:02	1.39.2.5
@@ -142,11 +142,6 @@
 
 REDIRECT_MSG = ' -- You will be redirected in 5 seconds.'
 
-# Cluster node exception attribute flags
-CLUSTER_NODE_NEED_AUTH	= 0x01
-CLUSTER_NODE_NOT_MEMBER	= 0x02
-CLUSTER_NODE_ADDED		= 0x04
-
 # Debugging parameters. Set LUCI_DEBUG_MODE to True and LUCI_DEBUG_VERBOSITY
 # to >= 2 to get full debugging output in syslog (LOG_DAEMON/LOG_DEBUG).
 
--- conga/luci/site/luci/Extensions/homebase_adapters.py	2007/05/22 02:45:54	1.50.2.7
+++ conga/luci/site/luci/Extensions/homebase_adapters.py	2007/05/30 05:54:02	1.50.2.8
@@ -5,7 +5,7 @@
 # GNU General Public License as published by the
 # Free Software Foundation.
 
-from conga_constants import PLONE_ROOT, CLUSTER_NODE_NEED_AUTH, \
+from conga_constants import PLONE_ROOT, \
 	STORAGE_FOLDER_PATH, CLUSTER_FOLDER_PATH, LUCI_DEBUG_MODE
 
 from RicciQueries import getClusterConf
@@ -14,7 +14,8 @@
 
 from LuciDB import delCluster, clearNodeStatus, delSystem, \
 	getClusterNode, getClusters, getStorage, getStorageNode, \
-	manageCluster
+	manageCluster, \
+	CLUSTER_NODE_NEED_AUTH
 
 from LuciZopePerm import havePermAddCluster, havePermRemCluster, \
 	havePermAddUser, havePermDelUser, havePermEditPerms, \
--- conga/luci/site/luci/Extensions/ClusterModel/Attic/ModelBuilder.py	2007/05/22 21:52:05	1.1.2.5
+++ conga/luci/site/luci/Extensions/ClusterModel/Attic/ModelBuilder.py	2007/05/30 05:54:02	1.1.2.6
@@ -544,12 +544,12 @@
     self.isModified = True
 
   def retrieveNodeByName(self, name):
-    nodes = self.getNodes()
-    for node in nodes:
-      if node.getName() == name:
-        return node
+    ret = filter(lambda x: x.getName() == name, self.getNodes())
+    if len(ret) != 1:
+      raise KeyError, name
 
-    raise GeneralError('FATAL', "Couldn't find node name in current node list")
+  def deleteNodeByName(self, name):
+    return self.deleteNode(self.retrieveNodeByName(name))
 
   def retrieveServiceByName(self, name):
     svcs = self.getServices()




More information about the Cluster-devel mailing list