[Cluster-devel] conga/luci/site/luci/Extensions LuciClusterAct ...

rmccabe at sourceware.org rmccabe at sourceware.org
Wed May 23 21:21:39 UTC 2007


CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	EXPERIMENTAL
Changes by:	rmccabe at sourceware.org	2007-05-23 21:21:37

Modified files:
	luci/site/luci/Extensions: LuciClusterActions.py LuciDB.py 
	                           cluster_adapters.py 
	luci/site/luci/Extensions/ClusterModel: Ip.py 

Log message:
	more cleanup

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.5&r2=1.1.2.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.11&r2=1.1.2.12
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.255.2.9&r2=1.255.2.10
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ClusterModel/Ip.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.2&r2=1.1.2.3

--- conga/luci/site/luci/Extensions/Attic/LuciClusterActions.py	2007/05/22 21:52:03	1.1.2.5
+++ conga/luci/site/luci/Extensions/Attic/LuciClusterActions.py	2007/05/23 21:21:32	1.1.2.6
@@ -11,7 +11,7 @@
 from ricci_communicator import RicciCommunicator, RicciError
 
 from LuciDB import set_node_flag, getRicciAgent, delCluster, \
-	getClusterNode, getStorageNode, noNodeStatusPresent, \
+	getClusterNode, getStorageNode, NodeBusy, \
 	setNodeStatus, resolve_nodename
 
 from conga_constants import CLUSTER_CONFIG, LUCI_DEBUG_MODE, \
@@ -31,6 +31,12 @@
 	svcname = fvars['servicename']
 	cluname = fvars['clustername']
 
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RestartCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
 	batch_number, result = rq.restartService(rc, svcname)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
@@ -50,6 +56,12 @@
 	cluname = fvars['clustername']
 	nodename = fvars['nodename']
 
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StartCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
 	batch_number, result = rq.startService(rc, svcname, nodename)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
@@ -74,6 +86,12 @@
 	svcname = fvars['servicename']
 	cluname = fvars['clustername']
 
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StopCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
 	batch_number, result = rq.stopService(rc, svcname)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
@@ -92,6 +110,12 @@
 	svcname = fvars['servicename']
 	cluname = fvars['clustername']
 
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
 	try:
 		model.deleteService(svcname)
 	except Exception, e:
@@ -109,6 +133,12 @@
 	cluname = fvars['clustername']
 	nodename = fvars['nodename']
 
+	if svcname is None or cluname is None or nodename is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MigrateCluSvc0: svc: %s, clu: %s, nn: %s' \
+				% (svcname, cluname, nodename))
+		return (False, { 'errors': [ 'A cluster service name, the cluster name, and the target node name must be given' ] })
+
 	batch_number, result = rq.migrateService(rc, svcname, nodename)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
@@ -128,49 +158,66 @@
 # Cluster resource-related tasks
 #
 
-def DeleteResource(self, rc, fvars, model):
-	errstr = 'An error occurred while attempting to set the new cluster.conf'
-	resname = fvars['resourcename']
+def DeleteResource(self, rc, model, resname):
+	errstr = 'An error occurred while attempting to delete this cluster resource'
+	if resname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteResource0: no res name')
+		return (False, { 'errors': [ '%s: no resource name was given' % errstr ]})
 
 	try:
 		model.deleteResource(resname)
 	except KeyError, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource4: cant find res %s' % resname)
-		return '%s: the specified resource was not found' % errstr
+			luci_log.debug_verbose('DeleteResource1: no res %s: %r %s' \
+				% (resname, e, str(e)))
+		return (False, { 'errors': [ '%s: no resource named "%s" was found' % (errstr, resname) ]})
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource4: cant find res %s' % resname)
-		return '%s: the specified resource was not found' % errstr
+			luci_log.debug_verbose('DeleteResource2: err: %s: %r %s' \
+				% (resname, e, str(e)))
+		return (False, { 'errors': [ '%s: unable to delete resource "%s"' % (errstr, resname) ]})
 
 	ret = propagateClusterConfAsync(self, model, rc,
-			RESOURCE_REMOVE, 'Removing resource "%s"' % resname)
+			RESOURCE_REMOVE, 'Removing cluster resource "%s"' % resname)
 	if ret[0] is False:
 		return ret
 
-def AddResource(self, rc, fvars, model, res):
+def AddResource(self, rc, model, res):
+	resname = None
 	try:
+		resname = res.getName()
 		model.getResourcesPtr().addChild(res)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource2: %r %s' % (e, str(e)))
-		return 'Unable to add the new resource'
+			luci_log.debug_verbose('AddResource0: %r %s' % (e, str(e)))
+		if resname is not None:
+			errstr = 'Unable to add new resource "%s"' % resname
+		else:
+			errstr = 'Unable to add this new resource'
+		return (False, { 'errors': [ errstr ] })
 
 	ret = propagateClusterConfAsync(self, model, rc, RESOURCE_ADD,
-			'Creating cluster resource "%s"' % res.getName())
+			'Creating new cluster resource "%s"' % resname)
 	if ret[0] is False:
 		return ret
 
-def EditResource(self, rc, fvars, model, res):
+def EditResource(self, rc, model, res):
+	resname = None
 	try:
+		resname = res.getName()
 		model.getResourcesPtr().addChild(res)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('EditResource2: %r %s' % (e, str(e)))
-		return 'Unable to the resource'
+			luci_log.debug_verbose('EditResource0: %r %s' % (e, str(e)))
+		if resname is not None:
+			errstr = 'Unable to edit cluster resource "%s"' % resname
+		else:
+			errstr = 'Unable to edit this cluster resource'
+		return (False, { 'errors': [ errstr ] })
 
 	ret = propagateClusterConfAsync(self, model, rc, RESOURCE_CONFIG,
-				'Configuring resource "%s"' % res.getName())
+				'Configuring cluster resource "%s"' % resname)
 
 	if ret[0] is False:
 		return ret
@@ -194,39 +241,21 @@
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('NJ1: failed to set flags: %r %s' \
 				% (e, str(e)))
+		return None
 	return True
 
 def nodeLeave(self, rc, clustername, nodename_resolved):
-	path = '%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename_resolved)
-
-	try:
-		nodefolder = self.restrictedTraverse(path)
-		if not nodefolder:
-			raise Exception, 'cannot find database object at %s' % path
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NLO: node_leave_cluster err: %r %s' % (e, str(e)))
-		return None
-
-	objname = '%s____flag' % nodename_resolved
-	fnpresent = noNodeStatusPresent(self, nodefolder, objname, nodename_resolved)
-
-	if fnpresent is None:
+	if NodeBusy(self, clustername, nodename_resolved, rc) is not False:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NL1: An error checking flags for %s' \
-				% nodename_resolved)
-		return None
-
-	if fnpresent is False:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NL2: flags still present for %s -- bailing out' \
+			luci_log.debug('NL0: flags still present for %s -- bailing out' \
 				% nodename_resolved)
 		return None
 
 	batch_number, result = rq.nodeLeaveCluster(rc)
 	if batch_number is None or result is None:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NL3: nodeLeaveCluster error: batch_number and/or result is None')
+			luci_log.debug_verbose('NL1: %s: batch_number or result is None' \
+				% nodename_resolved)
 		return None
 
 	try:
@@ -235,8 +264,8 @@
 			'Node "%s" leaving cluster "%s"' % (nodename_resolved, clustername))
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NL4: failed to set flags: %r %s' \
-				% (e, str(e)))
+			luci_log.debug_verbose('NL4: failed to set flags: %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
 	return True
 
 def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=False):
@@ -551,7 +580,8 @@
 			raise Exception, 'no cluster folder at %s' % path
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('FNF0: The cluster folder %s could not be found: %r %s' % (clustername, e, str(e)))
+			luci_log.debug('FNF0: The cluster obj %s not found: %r %s' \
+				% (clustername, e, str(e)))
 		return None
 
 	try:
--- conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/22 21:52:05	1.1.2.11
+++ conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/23 21:21:32	1.1.2.12
@@ -102,12 +102,25 @@
 			luci_log.debug_verbose('SNF0: %r %s' % (e, errmsg))
 		raise Exception, errmsg
 
-def noNodeStatusPresent(self, nodefolder, flagname, hostname):
+def NodeBusy(self, clustername, nodename, rc=None):
+	try:
+		path = '%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename)
+		nodefolder = self.restrictedTraverse(path)
+		if not nodefolder:
+			raise Exception, 'cannot find database object at %s' % path
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NodeBusy0: (%s,%s) %r %s' \
+				% (clustername, nodename, e, str(e)))
+		return None
+
+	flagname = '%s____flag' % nodename
+
 	try:
 		items = nodefolder.objectItems('ManagedSystem')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NNFP0: error getting flags for %s: %r %s' \
+			luci_log.debug('NodeBusy1: error getting flags for %s: %r %s' \
 				% (nodefolder[0], e, str(e)))
 		return None
 
@@ -115,43 +128,51 @@
 		if item[0] != flagname:
 			continue
 
-		# a flag already exists... try to delete it
-		try:
-			# hostname must be a FQDN
-			rc = RicciCommunicator(hostname)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.info('NNFP1: ricci error %s: %r %s' \
-					% (hostname, e, str(e)))
-			return None
+		# A flag already exists. Check to see whether we're done.
+		if rc is None:
+			try:
+				rc = RicciCommunicator(nodename)
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.info('NodeBusy2: ricci error %s: %r %s' \
+						% (nodename, e, str(e)))
+				# We can't know if we're done or not; err on the
+				# side of caution.
+				return True
 
 		if not rc.authed():
 			try:
-				snode = getStorageNode(self, hostname)
+				snode = getStorageNode(self, nodename)
 				setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
-			except:
-				pass
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('NodeBusy3: %s: %r %s' \
+						% (nodename, e, str(e)))
 			if LUCI_DEBUG_MODE is True:
-				luci_log.info('NNFP2: %s not authenticated' % item[0])
+				luci_log.info('NodeBusy4: %s not authenticated' % item[0])
+			# The comment above applies here, too.
+			return True
 
 		batch_ret = rq.checkBatch(rc, item[1].getProperty(BATCH_ID))
 		finished = batch_ret[0]
 		if finished is True or finished == -1:
 			if finished == -1:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('NNFP2: batch error: %s' \
+					luci_log.debug_verbose('NodeBusy5: batch error: %s' \
 						% batch_ret[1])
+
 			try:
 				nodefolder.manage_delObjects([item[0]])
 			except Exception, e:
 				if LUCI_DEBUG_MODE is True:
-					luci_log.info('NNFP3: manage_delObjects for %s failed: %r %s' % (item[0], e, str(e)))
-				return None
-			return True
-		else:
-			# Not finished, so don't remove the flag.
+					luci_log.info('NodeBusy6: %s: %r %s' % (item[0], e, str(e)))
 			return False
-	return True
+
+		# Not finished, so don't remove the flag.
+		return True
+
+	# If this code is ever reached, no flags exist for the node in question.
+	return False
 
 def resolve_nodename(self, clustername, nodename):
 	path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
@@ -805,6 +826,17 @@
 			% cluname)
 	return None
 
+def getClusterDBNodes(self, clustername):
+	try:
+		cluster_path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
+		nodelist = self.restrictedTraverse(cluster_path).objectItems('Folder')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCDBN0: %s -> %s: %r %s' \
+				% (clustername, cluster_path, e, str(e)))
+		return []
+	return nodelist
+
 def getClusterStatusDB(self, clustername):
 	results = list()
 	vals = {}
@@ -818,15 +850,7 @@
 	vals['minQuorum'] = '[unknown]'
 	results.append(vals)
 
-	try:
-		cluster_path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
-		nodelist = self.restrictedTraverse(cluster_path).objectItems('Folder')
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCSDB0: %s -> %s: %r %s' \
-				% (clustername, cluster_path, e, str(e)))
-		return results
-
+	nodelist = getClusterDBNodes(self, clustername)
 	if len(nodelist) < 1:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('GCSDB0a: removing cluster %s because it has no nodes' % clustername)
@@ -837,18 +861,14 @@
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('GCSDB0b: %s: %r %s' \
 					% (clustername, e, str(e)))
-	else:
-		for node in nodelist:
-			try:
-				node_val = {}
-				node_val['type'] = 'node'
-				node_val['name'] = node[0]
-				node_val['clustered'] = '[unknown]'
-				node_val['online'] = '[unknown]'
-				node_val['error'] = True
-				results.append(node_val)
-			except Exception, e:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('GCSDB1: %r %s' % (e, str(e)))
+		return results
 
+	for node in nodelist:
+		node_val = {}
+		node_val['type'] = 'node'
+		node_val['name'] = node
+		node_val['clustered'] = '[unknown]'
+		node_val['online'] = '[unknown]'
+		node_val['error'] = True
+		results.append(node_val)
 	return results
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/22 21:52:05	1.255.2.9
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/23 21:21:32	1.255.2.10
@@ -39,7 +39,7 @@
 	batch_status, extract_module_status
 
 from LuciDB import manageCluster, createClusterSystems, \
-	setNodeStatus, getStorageNode, noNodeStatusPresent, \
+	setNodeStatus, getStorageNode, \
 	getClusterNode, delCluster, buildClusterCreateFlags, \
 	resolve_nodename, set_node_flag, getRicciAgent
 
@@ -981,7 +981,7 @@
 
 	if len(errors) < 1:
 		try:
-			addResource(self, request, model, res)
+			resourceAdd(self, request, model, res)
 		except Exception, e:
 			errors.append('An error occurred while adding resource "%s"' \
 				% res.getName())
@@ -989,10 +989,10 @@
 		errors.append('An error occurred while adding this resource')
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('resource error: %r %s' % (e, str(e)))
-		return (False, {'errors': errors})
+		return (False, { 'errors': errors})
 
 
-	return (True, {'messages': ['Resource added successfully']})
+	return (True, { 'messages': [ 'Resource added successfully' ]})
 
 
 ## Cluster properties form validation routines
@@ -3138,20 +3138,8 @@
 	return '/luci/cluster/index_html?pagetype=7&clustername=%s' % clustername
 
 def getRicciAgentForCluster(self, req):
-	clustername = None
-	try:
-		clustername = req['clustername']
-		if not clustername:
-			clustername = None
-			raise
-	except:
-		try:
-			clustername = req.form['clustername']
-			if not clustername:
-				clustername = None
-		except:
-			pass
-
+	fvar = GetReqVars(req, [ 'clustername' ])
+	clustername = fvar['clustername']
 	if clustername is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug('GRAFC0: no cluster name was found')
@@ -3159,39 +3147,30 @@
 	return getRicciAgent(self, clustername)
 
 def clusterTaskProcess(self, model, request):
-	try:
-		task = request['task']
-	except:
-		try:
-			task = request.form['task']
-		except:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CTP1: no task specified')
-			task = None
+	fvar = GetReqVars(request, [ 'task', 'clustername' ])
+
+	task = fvar['task']
+	if task is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('CTP0: no cluster task')
+		return 'No cluster task was given'
 
 	if not model:
-		try:
-			cluname = request['clustername']
-			if not cluname:
-				raise Exception, 'cluname is blank'
-		except:
-			try:
-				cluname = request.form['clustername']
-				if not cluname:
-					raise Exception, 'cluname is blank'
-			except:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('CTP0: no model/no cluster name')
-				return 'Unable to determine the cluster name'
+		cluname = fvar['cluname']
+		if cluname is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('CTP1: no cluster name')
+			return 'No cluster name was given'
+
 		try:
 			model = getModelForCluster(self, cluname)
+			if not model:
+				raise Exception, 'No cluster model'
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CPT1: GMFC failed for %s' % cluname)
-			model = None
-
-	if not model:
-		return 'Unable to get the model object for %s' % cluname
+				luci_log.debug_verbose('CTP2: GMFC failed for %s: %r %s' \
+					% (e, str(e), cluname))
+			return 'Unable to get the model object for %s' % cluname
 
 	redirect_page = NODES
 	if task == CLUSTER_STOP:
@@ -3212,37 +3191,29 @@
 		% (request['URL'], redirect_page, model.getClusterName()))
 
 def nodeTaskProcess(self, model, request):
-	try:
-		clustername = request['clustername']
-	except:
-		try:
-			clustername = request.form['clustername']
-		except:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug('NTP0: missing cluster name')
-			return (False, {'errors': [ 'No cluster name was given' ]})
+	fvar = GetReqVars(request, [ 'task', 'clustername', 'nodename' ])
 
-	try:
-		nodename = request['nodename']
-	except:
-		try:
-			nodename = request.form['nodename']
-		except:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug('NTP1: missing node name')
-			return (False, {'errors': [ 'No node name was given' ]})
+	task = fvar['task']
+	clustername = fvar['clustername']
+	nodename = fvar['nodename']
 
-	try:
-		task = request['task']
-	except:
-		try:
-			task = request.form['task']
-		except:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug('NTP2: missing task')
-			return (False, {'errors': [ 'No node task was given' ]})
+	if clustername is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NTP0: missing cluster name')
+		return (False, { 'errors': [ 'No cluster name was given' ]})
+
+	if nodename is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NTP1: missing node name')
+		return (False, { 'errors': [ 'No node name was given' ]})
+
+	if task is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NTP2: missing task')
+		return (False, { 'errors': [ 'No node task was given' ]})
 
 	nodename_resolved = resolve_nodename(self, clustername, nodename)
+	response = request.RESPONSE
 
 	if task != NODE_FENCE:
 		# Fencing is the only task for which we don't
@@ -3256,25 +3227,25 @@
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug('NTP3: ricci error from %s: %r %s' \
 					% (nodename_resolved, e, str(e)))
-			return (False, {'errors': [ 'Unable to connect to the ricci agent on %s' % nodename_resolved ]})
-		except:
+			return (False, { 'errors': [ 'Unable to connect to the ricci agent on %s' % nodename_resolved ]})
+		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug('NTP4: ricci error from %s: %r %s' \
 					% (nodename_resolved, e, str(e)))
-			return (False, {'errors': [ 'Unable to connect to the ricci agent on %s' % nodename_resolved ]})
+			return (False, { 'errors': [ 'Unable to connect to the ricci agent on %s' % nodename_resolved ]})
 
 		cluinfo = rc.cluster_info()
 		if not cluinfo[0] and not cluinfo[1]:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug('NTP5: node %s not in a cluster (expected %s)' \
 					% (nodename_resolved, clustername))
-			return (False, {'errors': [ 'Node "%s" reports it is not in a cluster' % nodename_resolved ]})
+			return (False, { 'errors': [ 'Node "%s" reports it is not in a cluster' % nodename_resolved ]})
 
 		cname = clustername.lower()
 		if cname != cluinfo[0].lower() and cname != cluinfo[1].lower():
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug('NTP6: node %s in unknown cluster %s:%s (expected %s)' % (nodename_resolved, cluinfo[0], cluinfo[1], clustername))
-			return (False, {'errors': [ 'Node "%s" reports it in cluster "%s." We expect it to be a member of cluster "%s"' % (nodename_resolved, cluinfo[0], clustername) ]})
+			return (False, { 'errors': [ 'Node "%s" reports it in cluster "%s." We expect it to be a member of cluster "%s"' % (nodename_resolved, cluinfo[0], clustername) ]})
 
 		if not rc.authed():
 			rc = None
@@ -3296,7 +3267,7 @@
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug('NTP7: node %s is not authenticated' \
 					% nodename_resolved)
-			return (False, {'errors': [ 'Node "%s" is not authenticated' % nodename_resolved ]})
+			return (False, { 'errors': [ 'Node "%s" is not authenticated' % nodename_resolved ]})
 
 	if task == NODE_LEAVE_CLUSTER:
 		if nodeLeave(self, rc, clustername, nodename_resolved) is None:
@@ -3304,7 +3275,6 @@
 				luci_log.debug_verbose('NTP8: nodeLeave failed')
 			return (False, {'errors': [ 'Node "%s" failed to leave cluster "%s"' % (nodename_resolved, clustername) ]})
 
-		response = request.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_JOIN_CLUSTER:
@@ -3313,7 +3283,6 @@
 				luci_log.debug_verbose('NTP9: nodeJoin failed')
 			return (False, {'errors': [ 'Node "%s" failed to join cluster "%s"' % (nodename_resolved, clustername) ]})
 
-		response = request.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_REBOOT:
@@ -3323,7 +3292,6 @@
 			return (False, {'errors': [ 'Node "%s" failed to reboot' \
 				% nodename_resolved ]})
 
-		response = request.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_FENCE:
@@ -3333,7 +3301,6 @@
 			return (False, {'errors': [ 'Fencing of node "%s" failed' \
 				% nodename_resolved]})
 
-		response = request.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_DELETE:
@@ -3342,7 +3309,6 @@
 				luci_log.debug_verbose('NTP12: nodeDelete failed')
 			return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
 
-		response = request.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 
@@ -3760,54 +3726,32 @@
 	return { 'msg': ''.join(msg_list), 'cluster_conf': cc }
 
 def getResourceInfo(model, request):
+	fvars = GetReqVars(request,
+				[ 'resourcename', 'type', 'value', 'clustername', 'URL' ])
+
 	if not model:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('GRI0: no model object in session')
 		return {}
 
-	name = None
-	try:
-		name = request['resourcename']
-	except:
-		try:
-			name = request.form['resourcename']
-		except:
-			pass
-
+	name = fvars['resourcename']
 	if name is None:
-		try:
-			res_type = request.form['type']
-			if res_type == 'ip':
-				name = request.form['value'].strip()
-		except:
-			pass
+		res_type = fvars['type']
+		if res_type == 'ip':
+			name = fvars['value']
 
 	if name is None:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('GRI1: missing res name')
 		return {}
 
-	try:
-		cluname = request['clustername']
-	except:
-		try:
-			cluname = request.form['clustername']
-		except:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GRI2: missing cluster name')
-			return {}
-
-	try:
-		baseurl = request['URL']
-	except:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GRI3: missing URL')
-		return {}
+	cluname = fvars['clustername']
+	baseurl = fvars['URL']
 
 	#CALL
 	return {}
 
-def GetRequestVars(req, varlist):
+def GetReqVars(req, varlist):
 	ret = {}
 	for i in varlist:
 		pval = None
@@ -3823,19 +3767,12 @@
 		ret[i] = pval
 	return ret
 
-def GetSvcReqVars(request):
-	return GetRequestVars(request,
-			['clustername', 'servicename', 'nodename', 'URL' ])
-def GetResReqVars(request):
-	return GetRequestVars(request,
-			['clustername', 'resourcename', 'nodename', 'URL' ])
-
 # These are called from external methods.
 
 def serviceRestart(self, rc, req):
 	from LuciClusterActions import RestartCluSvc
 
-	fvars = GetSvcReqVars(req)
+	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
 	ret = RestartCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
@@ -3847,7 +3784,7 @@
 def serviceStop(self, rc, req):
 	from LuciClusterActions import StopCluSvc
 
-	fvars = GetSvcReqVars(req)
+	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
 	ret = StopCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
@@ -3859,7 +3796,7 @@
 def serviceStart(self, rc, req):
 	from LuciClusterActions import StartCluSvc
 
-	fvars = GetSvcReqVars(req)
+	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
 	ret = StartCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
@@ -3871,21 +3808,27 @@
 def serviceDelete(self, rc, req):
 	from LuciClusterActions import DeleteCluSvc
 
-	fvars = GetSvcReqVars(req)
-	model = LuciExtractCluModel(self, req, cluster_name=fvars['clustername'])
+	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
+	try:
+		model = LuciExtractCluModel(self, req,
+					cluster_name=fvars['clustername'])
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceDelete0: %r %s' % (e, str(e)))
+		return (False, { 'errors': [ 'No resource name was given' ]})
 
 	ret = DeleteCluSvc(self, rc, fvars, model)
 	if ret is None:
-		response = request.RESPONSE
+		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (request['URL'], SERVICES, clustername))
+			% (req['URL'], SERVICES, clustername))
 	else:
 		return ret
 
 def serviceMigrate(self, rc, req):
 	from LuciClusterActions import MigrateCluSvc
 
-	fvars = GetSvcReqVars(req)
+	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'nodename' ])
 	ret = MigrateCluSvc(self, rc, fvars)
 	if ret is None:
 		response = req.RESPONSE
@@ -3897,37 +3840,56 @@
 def resourceDelete(self, rc, req):
 	from LuciClusterActions import DeleteResource
 
-	fvars = GetResReqVars(req)
-	model = LuciExtractCluModel(self, req, cluster_name=fvars['clustername'])
+	fvars = GetReqVars(request, [ 'clustername', 'resourcename', 'nodename' ])
+	try:
+		model = LuciExtractCluModel(self, req,
+					cluster_name=fvars['clustername'])
+		if not model:
+			raise Exception, 'no model'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('resourceDelete0: %r %s' % (e, str(e)))
+		return (False, { 'errors': [ 'No resource name was given' ] })
 
-	ret = DeleteResource(self, rc, fvars, model)
+	ret = DeleteResource(self, rc, model, fvars['resourcename'])
 	if ret is None:
-		response = request.RESPONSE
+		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (request['URL'], RESOURCES, clustername))
+			% (req['URL'], RESOURCES, fvars['clustername']))
 	else:
 		return ret
 
-def resourceAdd(self, rc, req):
-	from LuciClusterActions import AddResource
+def resourceAdd(self, req, model, res):
+	from LuciClusterActions import AddResource, EditResource
 
-	fvars = GetResReqVars(req)
-	model = LuciExtractCluModel(self, req, cluster_name=fvars['clustername'])
+	try:
+		cluname = model.getClusterName()
+		rc = getRicciAgent(self, cluname)
+		if not rc:
+			raise Exception, 'no rc'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('resourceAdd0: no ricci agent: %r %s' \
+				% (e, str(e)))
+		return (False, { 'errors': [ 'Unable to find a ricci agent for cluster "%s"' % cluname ]})
+
+	if req.form.has_key('edit'):
+		ret = EditResource(self, rc, model, res)
+	else:
+		ret = AddResource(self, rc, model, res)
 
-	# XXX pass in resource
-	ret = AddResource(self, rc, fvars, model, None)
 	if ret is None:
-		response = request.RESPONSE
+		response = req.RESPONSE
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-			% (request['URL'], RESOURCES, clustername))
+			% (req['URL'], RESOURCES, clustername))
 	else:
 		return ret
 
-def nodeJoin(self, rc, cluname, nodename_resolved):
+def nodeJoin(self, rc, cluname, nodename):
 	return None
-def nodeLeave(self, rc, cluname, nodename_resolved):
+def nodeLeave(self, rc, cluname, nodename):
 	return None
-def nodeDelete(self, rc, cluname, nodename_resolved):
+def nodeDelete(self, rc, cluname, nodename):
 	return None
 
 
--- conga/luci/site/luci/Extensions/ClusterModel/Attic/Ip.py	2007/05/15 18:58:55	1.1.2.2
+++ conga/luci/site/luci/Extensions/ClusterModel/Attic/Ip.py	2007/05/23 21:21:35	1.1.2.3
@@ -22,4 +22,3 @@
       return self.attr_hash['address']
     except KeyError, e:
       return ''
- 




More information about the Cluster-devel mailing list