[Cluster-devel] conga/luci/site/luci/Extensions conga_constant ...

rmccabe at sourceware.org rmccabe at sourceware.org
Mon May 14 16:02:22 UTC 2007


CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	EXPERIMENTAL
Changes by:	rmccabe at sourceware.org	2007-05-14 16:02:16

Modified files:
	luci/site/luci/Extensions: conga_constants.py LuciDB.py 

Log message:
	More cleanup..

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.39.2.2&r2=1.39.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.3&r2=1.1.2.4

--- conga/luci/site/luci/Extensions/conga_constants.py	2007/05/04 19:10:24	1.39.2.2
+++ conga/luci/site/luci/Extensions/conga_constants.py	2007/05/14 16:02:11	1.39.2.3
@@ -140,7 +140,8 @@
 CLUSTER_NODE_NOT_MEMBER	= 0x02
 CLUSTER_NODE_ADDED		= 0x04
 
-# Debugging parameters. Set LUCI_DEBUG_MODE to 1 and LUCI_DEBUG_VERBOSITY
+# Debugging parameters. Set LUCI_DEBUG_MODE to True and LUCI_DEBUG_VERBOSITY
 # to >= 2 to get full debugging output in syslog (LOG_DAEMON/LOG_DEBUG).
-LUCI_DEBUG_MODE			= 1
+
+LUCI_DEBUG_MODE			= True
 LUCI_DEBUG_VERBOSITY	= 2
--- conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/08 22:19:35	1.1.2.3
+++ conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/14 16:02:11	1.1.2.4
@@ -1,43 +1,52 @@
 from AccessControl import getSecurityManager
-from conga_constants import CLUSTER_FOLDER_PATH, BATCH_ID, TASKTYPE, FLAG_DESC, CLUSTER_ADD, CLUSTER_NODE_ADDED, CLUSTER_NODE_NEED_AUTH, LAST_STATUS, PLONE_ROOT, STORAGE_FOLDER_PATH, CLUSTER_NODE_NOT_MEMBER
 import RicciQuery as rq
 from ricci_communicator import RicciCommunicator
 from LuciZope import isAdmin
 
+from conga_constants import CLUSTER_FOLDER_PATH, BATCH_ID, TASKTYPE, \
+	FLAG_DESC, CLUSTER_ADD, CLUSTER_NODE_ADDED, CLUSTER_NODE_NEED_AUTH, \
+	LAST_STATUS, PLONE_ROOT, STORAGE_FOLDER_PATH, CLUSTER_NODE_NOT_MEMBER, \
+	LUCI_DEBUG_MODE
+
 from homebase_adapters import luci_log
 
 def getClusterNode(self, nodename, clustername):
 	try:
-		cluster_node = self.restrictedTraverse('%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename))
+		cluster_node = self.restrictedTraverse('%s%s/%s' \
+			% (CLUSTER_FOLDER_PATH, clustername, nodename))
 		if not cluster_node:
 			return None
 		return cluster_node
 	except Exception, e:
-		luci_log.debug_verbose('getClusterNode0: %s %s: %r' \
-			% (nodename, clustername, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getClusterNode0: %s %s: %r' \
+				% (nodename, clustername, e))
 	return None
 
 def getStorageNode(self, nodename):
 	try:
-		storage_node = self.restrictedTraverse('%s%s' % (STORAGE_FOLDER_PATH, nodename))
+		storage_node = self.restrictedTraverse('%s%s' \
+			% (STORAGE_FOLDER_PATH, nodename))
 		if not storage_node:
 			return None
 		return storage_node
 	except Exception, e:
-		luci_log.debug_verbose('getStorageNode0: %s: %r' % (nodename, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getStorageNode0: %s: %r' % (nodename, e))
 	return None
 
-def testNodeFlag(node, flag_mask):
+def testNodeStatus(node, flag_mask):
 	try:
 		flags = node.getProperty('flags')
 		if flags is None:
 			return False
 		return flags & flag_mask != 0
 	except Exception, e:
-		luci_log.debug_verbose('testNodeFlag0: %r' % e)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('testNodeStatus0: %r' % e)
 	return False
 
-def setNodeFlag(node, flag_mask):
+def setNodeStatus(node, flag_mask):
 	try:
 		flags = node.getProperty('flags')
 		if flags is None:
@@ -47,9 +56,10 @@
 		try:
 			node.manage_addProperty('flags', flag_mask, 'int')
 		except Exception, e:
-			luci_log.debug_verbose('setNodeFlag0: %r' % e)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('setNodeStatus0: %r' % e)
 
-def delNodeFlag(node, flag_mask):
+def clearNodeStatus(node, flag_mask):
 	try:
 		flags = node.getProperty('flags')
 		if flags is None:
@@ -57,7 +67,8 @@
 		if flags & flag_mask != 0:
 			node.manage_changeProperties({ 'flags': flags & ~flag_mask })
 	except Exception, e:
-		luci_log.debug_verbose('delNodeFlag0: %r' % e)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('clearNodeStatus0: %r' % e)
 
 def set_node_flag(self, cluname, agent, batchid, task, desc):
 	path = '%s%s' % (CLUSTER_FOLDER_PATH, cluname)
@@ -76,14 +87,17 @@
 	except Exception, e:
 		errmsg = 'SNF0: error creating flag (%s,%s,%s) at %s: %r' \
 					% (batch_id, task, desc, objpath, e)
-		luci_log.debug_verbose(errmsg)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose(errmsg)
 		raise Exception, errmsg
 
-def noNodeFlagsPresent(self, nodefolder, flagname, hostname):
+def noNodeStatussPresent(self, nodefolder, flagname, hostname):
 	try:
 		items = nodefolder.objectItems('ManagedSystem')
 	except Exception, e:
-		luci_log.debug('NNFP0: error getting flags for %s: %r' % (nodefolder[0], e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NNFP0: error getting flags for %s: %r' \
+				% (nodefolder[0], e))
 		return None
 
 	for item in items:
@@ -95,27 +109,32 @@
 			# hostname must be a FQDN
 			rc = RicciCommunicator(hostname)
 		except Exception, e:
-			luci_log.info('NNFP1: ricci error %s: %r' % (hostname, e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.info('NNFP1: ricci error %s: %r' % (hostname, e))
 			return None
 
 		if not rc.authed():
 			try:
 				snode = getStorageNode(self, hostname)
-				setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+				setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
 			except:
 				pass
-			luci_log.info('NNFP2: %s not authenticated' % item[0])
+			if LUCI_DEBUG_MODE is True:
+				luci_log.info('NNFP2: %s not authenticated' % item[0])
 
 		batch_ret = rq.checkBatch(rc, item[1].getProperty(BATCH_ID))
 		finished = batch_ret[0]
 		if finished == True or finished == -1:
 			if finished == -1:
-				luci_log.debug_verbose('NNFP2: batch error: %s' % batch_ret[1])
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('NNFP2: batch error: %s' \
+						% batch_ret[1])
 			try:
 				nodefolder.manage_delObjects([item[0]])
 			except Exception, e:
-				luci_log.info('NNFP3: manage_delObjects for %s failed: %r' \
-					% (item[0], e))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.info('NNFP3: manage_delObjects for %s failed: %r' \
+						% (item[0], e))
 				return None
 			return True
 		else:
@@ -130,8 +149,9 @@
 		clusterfolder = self.restrictedTraverse(path)
 		objs = clusterfolder.objectItems('Folder')
 	except Exception, e:
-		luci_log.debug_verbose('RNN0: error for %s/%s: %r' \
-			% (nodename, clustername, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RNN0: error for %s/%s: %r' \
+				% (nodename, clustername, e))
 		return nodename
 
 	for obj in objs:
@@ -141,8 +161,9 @@
 		except:
 			continue
 
-	luci_log.debug_verbose('RNN1: failed for %s/%s: nothing found' \
-		% (nodename, clustername))
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug_verbose('RNN1: failed for %s/%s: nothing found' \
+			% (nodename, clustername))
 	return nodename
 
 def resolveClusterChanges(self, clusterName, model):
@@ -151,17 +172,19 @@
 		if not mb_nodes or not len(mb_nodes):
 			raise Exception, 'node list is empty'
 	except Exception, e:
-		luci_log.debug_verbose('RCC0: no model builder nodes found for %s: %r' \
-				% (clusterName, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RCC0: no model builder nodes found for %s: %r' % (clusterName, e))
 		return 'Unable to find cluster nodes for %s' % clusterName
 
 	try:
-		cluster_node = self.restrictedTraverse('%s/systems/cluster/%s' % (PLONE_ROOT, clusterName))
+		cluster_node = self.restrictedTraverse('%s/systems/cluster/%s' \
+			% (PLONE_ROOT, clusterName))
 		if not cluster_node:
 			raise Exception, 'cluster node is none'
 	except Exception, e:
-		luci_log.debug('RCC1: cant find cluster node for %s: %r'
-			% (clusterName, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('RCC1: cant find cluster node for %s: %r' \
+				% (clusterName, e))
 		return 'Unable to find an entry for %s in the Luci database.' % clusterName
 
 	try:
@@ -170,7 +193,8 @@
 			raise Exception, 'no database nodes'
 	except Exception, e:
 		# Should we just create them all? Can this even happen?
-		luci_log.debug('RCC2: error: %r' % e)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('RCC2: error: %r' % e)
 		return 'Unable to find database entries for any nodes in %s' % clusterName
 
 	same_host = lambda x, y: x == y or x[:len(y) + 1] == y + '.' or y[:len(x) + 1] == x + '.'
@@ -201,24 +225,27 @@
 		try:
 			# or alternately
 			# new_node = cluster_node.restrictedTraverse(i)
-			# #setNodeFlag(self, new_node, CLUSTER_NODE_NOT_MEMBER)
+			# #setNodeStatus(self, new_node, CLUSTER_NODE_NOT_MEMBER)
 			cluster_node.delObjects([i])
 			messages.append('Node "%s" is no longer in a member of cluster "%s." It has been deleted from the management interface for this cluster.' % (i, clusterName))
-			luci_log.debug_verbose('VCC3: deleted node %s' % i)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VCC3: deleted node %s' % i)
 		except Exception, e:
-			luci_log.debug_verbose('VCC4: delObjects: %s: %r' % (i, e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VCC4: delObjects: %s: %r' % (i, e))
 
 	new_flags = CLUSTER_NODE_NEED_AUTH | CLUSTER_NODE_ADDED
 	for i in new_list:
 		try:
-			cluster_node.manage_addFolder(i, '__luci__:csystem:%s' % clusterName)
+			cluster_node.manage_addFolder(i)
 			new_node = cluster_node.restrictedTraverse(i)
-			setNodeFlag(self, new_node, new_flags)
+			setNodeStatus(self, new_node, new_flags)
 			messages.append('A new cluster node, "%s," is now a member of cluster "%s." It has been added to the management interface for this cluster, but you must authenticate to it in order for it to be fully functional.' % (i, clusterName))
 		except Exception, e:
 			messages.append('A new cluster node, "%s," is now a member of cluster "%s,". but it has not been added to the management interface for this cluster as a result of an error creating a database entry for it.' % (i, clusterName))
-			luci_log.debug_verbose('VCC5: addFolder: %s/%s: %r' \
-				% (clusterName, i, e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VCC5: addFolder: %s/%s: %r' \
+					% (clusterName, i, e))
 
 	return messages
 
@@ -228,7 +255,9 @@
 	try:
 		clusterfolder = self.restrictedTraverse(path)
 	except Exception, e:
-		luci_log.debug_verbose('buildCCF0: no cluster folder at %s: %r' % (path, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('buildCCF0: no cluster folder at %s: %r' \
+				% (path, e))
 		return None
 
 	for key in batch_map.keys():
@@ -248,8 +277,8 @@
 			flag.manage_addProperty(FLAG_DESC, 'Creating node "%s" for cluster "%s"' % (key, clusterName), 'string')
 			flag.manage_addProperty(LAST_STATUS, 0, 'int')
 		except Exception, e:
-			luci_log.debug_verbose('buildCCF1: error creating flag for %s: %r' \
-				% (key, e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('buildCCF1: error creating flag for %s: %r' % (key, e))
 
 def manageCluster(self, clusterName, node_list, cluster_os):
 	clusterName = str(clusterName)
@@ -259,48 +288,55 @@
 		if not clusters:
 			raise Exception, 'cannot find the cluster entry in the DB'
 	except Exception, e:
-		luci_log.debug_verbose('MC0: %s: %r' % (clusterName, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC0: %s: %r' % (clusterName, e))
 		return 'Unable to create cluster %s: the cluster directory is missing.' % clusterName
 
 	try:
 		newCluster = self.restrictedTraverse('%s%s' % (CLUSTER_FOLDER_PATH, clusterName))
 		if newCluster:
-			luci_log.debug_verbose('MC1: cluster %s: already exists' % clusterName)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('MC1: cluster %s: already exists' \
+					% clusterName)
 			return 'A cluster named %s is already managed by Luci' % clusterName
 	except:
 		pass
 
 	try:
-		clusters.manage_addFolder(clusterName, '__luci__:cluster')
+		clusters.manage_addFolder(clusterName)
 		newCluster = self.restrictedTraverse('%s%s' % (CLUSTER_FOLDER_PATH, clusterName))
 		if not newCluster:
 			raise Exception, 'unable to create the cluster DB entry for %s' % clusterName
 	except Exception, e:
-		luci_log.debug_verbose('MC2: %s: %r' % (clusterName, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC2: %s: %r' % (clusterName, e))
 		return 'Unable to create cluster %s: %r' % (clusterName, e)
 
 	try:
 		newCluster.manage_acquiredPermissions([])
 		newCluster.manage_role('View', ['Access Contents Information', 'View'])
 	except Exception, e:
-		luci_log.debug_verbose('MC3: %s: %r' % (clusterName, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC3: %s: %r' % (clusterName, e))
 		try:
 			clusters.manage_delObjects([clusterName])
 		except Exception, e:
-			luci_log.debug_verbose('MC4: %s: %r' % (clusterName, e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('MC4: %s: %r' % (clusterName, e))
 		return 'Unable to set permissions on new cluster: %s: %r' % (clusterName, e)
 
 	try:
 		newCluster.manage_addProperty('cluster_os', cluster_os, 'string')
 	except Exception, e:
-		luci_log.debug_verbose('MC5: %s: %s: %r' \
-			% (clusterName, cluster_os, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC5: %s: %s: %r' \
+				% (clusterName, cluster_os, e))
 
 	for i in node_list:
 		host = node_list[i]['host']
 
 		try:
-			newCluster.manage_addFolder(host, '__luci__:csystem:%s' % clusterName)
+			newCluster.manage_addFolder(host)
 			newSystem = self.restrictedTraverse('%s%s/%s' % (CLUSTER_FOLDER_PATH, clusterName, host))
 			if not newSystem:
 				raise Exception, 'unable to create cluster system DB entry for node %s' % host
@@ -310,11 +346,13 @@
 			try:
 				clusters.manage_delObjects([clusterName])
 			except Exception, e:
-				luci_log.debug_verbose('MC6: %s: %s: %r' \
-					% (clusterName, host, e))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('MC6: %s: %s: %r' \
+						% (clusterName, host, e))
 
-			luci_log.debug_verbose('MC7: %s: %s: %r' \
-				% (clusterName, host, e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('MC7: %s: %s: %r' \
+					% (clusterName, host, e))
 			return 'Unable to create cluster node %s for cluster %s: %r' \
 				% (host, clusterName, e)
 
@@ -323,7 +361,8 @@
 		if not ssystem:
 			raise Exception, 'The storage DB entry is missing'
 	except Exception, e:
-		luci_log.debug_verbose('MC8: %s: %s: %r' % (clusterName, host, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC8: %s: %s: %r' % (clusterName, host, e))
 		return 'Error adding storage node %s: %r' % (host, e)
 
 	# Only add storage systems if the cluster and cluster node DB
@@ -339,12 +378,14 @@
 			pass
 
 		try:
-			ssystem.manage_addFolder(host, '__luci__:system')
+			ssystem.manage_addFolder(host)
 			newSystem = self.restrictedTraverse('%s%s' % (STORAGE_FOLDER_PATH, host))
 			newSystem.manage_acquiredPermissions([])
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
 		except Exception, e:
-			luci_log.debug_verbose('MC9: %s: %s: %r' % (clusterName, host, e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('MC9: %s: %s: %r' \
+					% (clusterName, host, e))
 
 def createClusterSystems(self, clusterName, node_list):
 	try:
@@ -352,7 +393,8 @@
 		if not clusterObj:
 			raise Exception, 'cluster %s DB entry is missing' % clusterName
 	except Exception, e:
-		luci_log.debug_verbose('CCS0: %s: %r' % (clusterName, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('CCS0: %s: %r' % (clusterName, e))
 		return 'No cluster named "%s" is managed by Luci' % clusterName
 
 	for x in node_list:
@@ -360,9 +402,11 @@
 		host = str(i['host'])
 
 		try:
-			clusterObj.manage_addFolder(host, '__luci__:csystem:%s' % clusterName)
+			clusterObj.manage_addFolder(host)
 		except Exception, e:
-			luci_log.debug_verbose('CCS0a: %s: %s: %r' % (clusterName, host, e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CCS0a: %s: %s: %r' \
+					% (clusterName, host, e))
 
 		try:
 			newSystem = self.restrictedTraverse('%s%s/%s' % (CLUSTER_FOLDER_PATH, clusterName, host))
@@ -371,7 +415,9 @@
 			newSystem.manage_acquiredPermissions([])
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
 		except Exception, e:
-			luci_log.debug_verbose('CCS1: %s: %s: %r' % (clusterName, host, e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CCS1: %s: %s: %r' \
+					% (clusterName, host, e))
 			return 'Unable to create cluster node %s for cluster %s: %r' \
 				% (host, clusterName, e)
 
@@ -381,7 +427,8 @@
 			raise Exception, 'storage DB entry is missing'
 	except Exception, e:
 		# This shouldn't fail, but if it does, it's harmless right now
-		luci_log.debug_verbose('CCS2: %s: %r' % (clusterName, host, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('CCS2: %s: %r' % (clusterName, host, e))
 		return None
 
 	# Only add storage systems if the and cluster node DB
@@ -398,13 +445,13 @@
 			pass
 
 		try:
-			ssystem.manage_addFolder(host, '__luci__:system')
+			ssystem.manage_addFolder(host)
 			newSystem = self.restrictedTraverse('%s%s' % (STORAGE_FOLDER_PATH, host))
 			newSystem.manage_acquiredPermissions([])
 			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
 		except Exception, e:
-			luci_log.debug_verbose('CCS3: %s: %r' % (clusterName, host, e))
-
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CCS3: %s: %r' % (clusterName, host, e))
 
 def delSystem(self, systemName):
 	try:
@@ -412,7 +459,8 @@
 		if not ssystem:
 			raise Exception, 'storage DB entry is missing'
 	except Exception, e:
-		luci_log.debug_verbose('delSystem0: %s: %r' % (systemName, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delSystem0: %s: %r' % (systemName, e))
 		return 'Unable to find storage system %s: %r' % (systemName, e)
 
 	try:
@@ -423,9 +471,11 @@
 		try:
 			ssystem.manage_delObjects([ systemName ])
 		except Exception, e1:
-			luci_log.debug_verbose('delSystem1: %s: %r' % (systemName, e1))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('delSystem1: %s: %r' % (systemName, e1))
 			return 'Unable to delete the storage system %s' % systemName
-		luci_log.debug_verbose('delSystem2: %s: %r' % (systemName, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delSystem2: %s: %r' % (systemName, e))
 		return
 
 	# Only unauthenticate if the system isn't a member of
@@ -458,18 +508,19 @@
 	try:
 		ssystem.manage_delObjects([ systemName ])
 	except Exception, e:
-		luci_log.debug_verbose('delSystem3: %s: %r' % (systemName, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delSystem3: %s: %r' % (systemName, e))
 		return 'Unable to delete storage system %s: %r' \
 			% (systemName, e)
 
-
 def delCluster(self, clusterName):
 	try:
 		clusters = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
 		if not clusters:
 			raise Exception, 'clusters DB entry is missing'
 	except Exception, e:
-		luci_log.debug_verbose('delCluster0: %r' % e)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delCluster0: %r' % e)
 		return 'Unable to find cluster %s' % clusterName
 
 	err = delClusterSystems(self, clusterName)
@@ -479,7 +530,8 @@
 	try:
 		clusters.manage_delObjects([ clusterName ])
 	except Exception, e:
-		luci_log.debug_verbose('delCluster1: %s %r' % (clusterName, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delCluster1: %s %r' % (clusterName, e))
 		return 'Unable to delete cluster %s' % clusterName
 
 def delClusterSystem(self, cluster, systemName):
@@ -491,14 +543,15 @@
 			rc = RicciCommunicator(systemName, enforce_trust=False)
 			rc.unauth()
 		except Exception, e:
-			luci_log.debug_verbose('delClusterSystem0: ricci error for %s: %r' \
-				% (systemName, e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('delClusterSystem0: ricci error for %s: %r' % (systemName, e))
 
 	try:
 		cluster.manage_delObjects([ systemName ])
 	except Exception, e:
 		err_str = 'Error deleting cluster object %s: %r' % (systemName, e)
-		luci_log.debug_verbose('delClusterSystem1: %s' % err_str)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delClusterSystem1: %s' % err_str)
 		return err_str
 
 def delClusterSystems(self, clusterName):
@@ -512,19 +565,23 @@
 			if not csystems or len(csystems) < 1:
 				return None
 		except Exception, e:
-			luci_log.debug_verbose('delCluSystems0: %r' % e)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('delCluSystems0: %r' % e)
 			return None
 	except Exception, er:
-		luci_log.debug_verbose('delCluSystems1: error for %s: %r' \
-			% (clusterName, er))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delCluSystems1: error for %s: %r' \
+				% (clusterName, er))
 		return str(er)
 
 	error_list = list()
 	for i in csystems:
 		err = delClusterSystem(self, cluster, i[0])
 		if err:
-			error_list.append('Unable to delete the cluster system %s: %s\n' % (i[0], err))
-			luci_log.debug_verbose('delCluSystems2: %s' % err)
+			error_list.append('Unable to delete the cluster system %s: %s\n' \
+				% (i[0], err))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('delCluSystems2: %s' % err)
 	return ''.join(error_list)
 
 # In case we want to give access to non-admin users in the future
@@ -562,7 +619,7 @@
 	need_auth_hash = {}
 	for i in storage:
 		storageList.append(i[0])
-		if testNodeFlag(i[1], CLUSTER_NODE_NEED_AUTH) != False:
+		if testNodeStatus(i[1], CLUSTER_NODE_NEED_AUTH) != False:
 			need_auth_hash[i[0]] = i[1]
 
 	chash = {}
@@ -570,7 +627,7 @@
 		csystems = getClusterSystems(self, i[0])
 		cslist = list()
 		for c in csystems:
-			if testNodeFlag(c[1], CLUSTER_NODE_NEED_AUTH) != False:
+			if testNodeStatus(c[1], CLUSTER_NODE_NEED_AUTH) != False:
 				need_auth_hash[c[0]] = c[1]
 			cslist.append(c[0])
 		chash[i[0]] = cslist
@@ -581,131 +638,67 @@
 	return ret
 
 def getClusterSystems(self, clusterName):
-	if isAdmin(self):
-		try:
-			return self.restrictedTraverse('%s%s/objectItems' % (CLUSTER_FOLDER_PATH, clusterName))('Folder')
-		except Exception, e:
-			luci_log.debug_verbose('GCSy0: %s: %r' % (clusterName, e))
-			return None
-
 	try:
-		i = getSecurityManager().getUser()
-		if not i:
-			raise Exception, 'security manager says no user'
+		cluster_nodes = self.restrictedTraverse('%s%s/objectItems' % (CLUSTER_FOLDER_PATH, clusterName))('Folder')
 	except Exception, e:
-		luci_log.debug_verbose('GCSy1: %s: %r' % (clusterName, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSy0: %s: %r' % (clusterName, e))
 		return None
 
+	if isAdmin(self):
+		return cluster_nodes
+
 	try:
-		csystems = self.restrictedTraverse('%s%s/objectItems' % (CLUSTER_FOLDER_PATH, clusterName))('Folder')
-		if not csystems or len(csystems) < 1:
-			return None
+		cluster =  self.restrictedTraverse('%s%s/objectItems' \
+			% (CLUSTER_FOLDER_PATH, clusterName))
 	except Exception, e:
-		luci_log.debug_verbose('GCSy2: %s: %r' % (clusterName, e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSy1: %s: %r' % (clusterName, e))
 		return None
 
-	allowedCSystems = list()
-	for c in csystems:
-		try:
-			if i.has_role('View', c[1]):
-				allowedCSystems.append(c)
-		except Exception, e:
-			luci_log.debug_verbose('GCSy3: %s: %s: %r' \
-				% (clusterName, c[0], e))
-
-	return allowedCSystems
+	if cluster_permission_check(self, cluster):
+		return cluster_nodes 
+	return None
 
 def getClusters(self):
-	if isAdmin(self):
-		try:
-			return self.restrictedTraverse('%s/systems/cluster/objectItems' % PLONE_ROOT)('Folder')
-		except Exception, e:
-			luci_log.debug_verbose('GC0: %r' % e)
-			return None
-	try:
-		i = getSecurityManager().getUser()
-		if not i:
-			raise Exception, 'GSMGU failed'
-	except Exception, e:
-		luci_log.debug_verbose('GC1: %r' % e)
-		return None
-
 	try:
 		clusters = self.restrictedTraverse('%s/systems/cluster/objectItems' % PLONE_ROOT)('Folder')
-		if not clusters or len(clusters) < 1:
-			return None
 	except Exception, e:
-		luci_log.debug_verbose('GC2: %r' % e)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GC0: %r' % e)
 		return None
 
-	allowedClusters = list()
-	for c in clusters:
-		try:
-			if i.has_role('View', c[1]):
-				allowedClusters.append(c)
-		except Exception, e:
-			luci_log.debug_verbose('GC3: %s: %r' % (c[0], e))
-
-	return allowedClusters
-
-
-def getStorage(self):
 	if isAdmin(self):
-		try:
-			return self.restrictedTraverse('%s/systems/storage/objectItems' % PLONE_ROOT)('Folder')
-		except Exception, e:
-			luci_log.debug_verbose('GS0: %r' % e)
-			return None
-
-	try:
-		i = getSecurityManager().getUser()
-		if not i:
-			raise Exception, 'GSMGU failed'
-	except Exception, e:
-		luci_log.debug_verbose('GS1: %r' % e)
-		return None
+		return clusters
+	return check_clusters(self, clusters)
 
+def getStorage(self):
 	try:
 		storage = self.restrictedTraverse('%s/systems/storage/objectItems' % PLONE_ROOT)('Folder')
-		if not storage or len(storage) < 1:
-			return None
 	except Exception, e:
-		luci_log.debug_verbose('GS2: %r' % e)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GS0: %r' % e)
 		return None
-
-	allowedStorage = list()
-	for s in storage:
-		try:
-			if i.has_role('View', s[1]):
-				allowedStorage.append(s)
-		except Exception, e:
-			luci_log.debug_verbose('GS3: %r' % e)
-
-	return allowedStorage
+	if isAdmin(self):
+		return storage
+	return allowed_systems(storage)
 
 def check_clusters(self, clusters):
-	sm = getSecurityManager()
-	user = sm.getUser()
-
-	clist = list()
-	for cluster in clusters:
-		if user.has_permission('View', cluster):
-			clist.append(cluster)
-	return clist
+	user = getSecurityManager().getUser()
+	return filter(lambda x: user.has_role('View', x[1]), clusters)
 
 def cluster_permission_check(cluster):
 	try:
-		sm = getSecurityManager()
-		user = sm.getUser()
-		if user.has_permission('View', cluster):
+		user = getSecurityManager().getUser()
+		if user.has_role('View', cluster[1]):
 			return True
 	except:
 		pass
 	return False
 
-def allowed_systems(self, user, systems):
-	user = getSecurityManager().getUser().getUser()
-	return map(lambda x: user.has_permission('View', x[1]), systems)
+def allowed_systems(self, systems):
+	user = getSecurityManager().getUser()
+	return filter(lambda x: user.has_role('View', x[1]), systems)
 
 def access_to_host_allowed(self, hostname, allowed_systems_list):
 	allowed = dict(map(lambda x: [ x[0], None ], allowed_systems_list))
@@ -717,31 +710,37 @@
 		if not perm:
 			return None
 	except Exception, e:
-		luci_log.debug_verbose('GRA0: %r' % e)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GRA0: %r' % e)
 		return None
 
 	try:
 		path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
 		clusterfolder = self.restrictedTraverse(path)
 		if not clusterfolder:
-			luci_log.debug('GRA1: cluster folder %s for %s is missing' \
-				% (path, clustername))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('GRA1: cluster folder %s for %s is missing' \
+					% (path, clustername))
 			return None
 
 		nodes = clusterfolder.objectItems('Folder')
 		if len(nodes) < 1:
-			luci_log.debug('GRA2: no cluster nodes for %s found.' % clustername)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('GRA2: no cluster nodes for %s found' \
+					% clustername)
 			raise Exception, 'No cluster nodes were found at %s' % path
 	except Exception, e:
 		try:
-			luci_log.debug('GRA3: cluster folder %s for %s is missing: %r' \
-				% (path, clustername, e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('GRA3: cluster folder %s for %s is missing: %r' \
+					% (path, clustername, e))
 
 			if len(clusterfolder.objectItems()) < 1:
 				clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
 				clusters.manage_delObjects([clustername])
 		except Exception, ein:
-			luci_log.debug_verbose('GRA4: %r' % ein)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GRA4: %r' % ein)
 		return None
 
 	cluname = clustername.lower()
@@ -757,12 +756,14 @@
 		try:
 			rc = RicciCommunicator(hostname)
 			if not rc:
-				luci_log.debug_verbose('GRA5: rc is None')
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA5: rc is None')
 				continue
 
 			ricci_hostname = rc.hostname()
 			if not ricci_hostname:
-				luci_log.debug_verbose('GRA6: ricci_hostname is blank')
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA6: ricci_hostname is blank')
 				continue
 
 			clu_info = rc.cluster_info()
@@ -770,25 +771,32 @@
 			cur_name = str(clu_info[0]).strip().lower()
 			if not cur_name:
 				cur_name = None
-				luci_log.debug_verbose('GRA7: cluster name is none for %s' % ricci_hostname)
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA7: cluster name is none for %s' \
+						% ricci_hostname)
 
 			cur_alias = str(clu_info[1]).strip().lower()
 			if not cur_alias:
 				cur_alias = None
-				luci_log.debug_verbose('GRA8: cluster alias is none for %s' % ricci_hostname)
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA8: no cluster alias for %s' \
+						% ricci_hostname)
 
 			if (cur_name is not None and cluname != cur_name) and (cur_alias is not None and cluname != cur_alias):
-				luci_log.debug('GRA9: node %s reports it\'s in cluster %s:%s; we expect %s' % (hostname, clu_info[0], clu_info[1], cluname))
-				setNodeFlag(self, node, CLUSTER_NODE_NOT_MEMBER)
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug('GRA9: node %s reports it\'s in cluster [%s:%s] we expect %s' % (hostname, clu_info[0], clu_info[1], cluname))
+				setNodeStatus(self, node, CLUSTER_NODE_NOT_MEMBER)
 				continue
 
 			if rc.authed():
 				return rc
 
-			setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+			setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
 		except Exception, eout:
-			luci_log.debug_verbose('GRA10: %r' % eout)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GRA10: %r' % eout)
 
-	luci_log.debug('GRA11: no ricci agent could be found for cluster %s' \
-		% cluname)
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug('GRA11: no ricci agent could be found for cluster %s' \
+			% cluname)
 	return None




More information about the Cluster-devel mailing list