[Cluster-devel] conga/luci/site/luci/Extensions LuciDB.py Luci ...
rmccabe at sourceware.org
rmccabe at sourceware.org
Tue May 8 22:19:38 UTC 2007
CVSROOT: /cvs/cluster
Module name: conga
Branch: EXPERIMENTAL
Changes by: rmccabe at sourceware.org 2007-05-08 22:19:35
Modified files:
luci/site/luci/Extensions: LuciDB.py LuciZope.py
cluster_adapters.py
Added files:
luci/site/luci/Extensions: LuciClusterActions.py
LuciClusterInfo.py
Removed files:
luci/site/luci/Extensions: clusterOS.py
Log message:
cleanup and refactor, part 3
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=NONE&r2=1.1.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterInfo.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=NONE&r2=1.1.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.2&r2=1.1.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZope.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.1&r2=1.1.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.255.2.2&r2=1.255.2.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/clusterOS.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.3&r2=NONE
--- conga/luci/site/luci/Extensions/Attic/LuciDB.py 2007/05/04 19:10:24 1.1.2.2
+++ conga/luci/site/luci/Extensions/Attic/LuciDB.py 2007/05/08 22:19:35 1.1.2.3
@@ -1,5 +1,5 @@
from AccessControl import getSecurityManager
-from conga_constants import CLUSTER_FOLDER_PATH, BATCH_ID, TASKTYPE, FLAG_DESC, CLUSTER_ADD, CLUSTER_NODE_ADDED, CLUSTER_NODE_NEED_AUTH, LAST_STATUS, PLONE_ROOT, STORAGE_FOLDER_PATH
+from conga_constants import CLUSTER_FOLDER_PATH, BATCH_ID, TASKTYPE, FLAG_DESC, CLUSTER_ADD, CLUSTER_NODE_ADDED, CLUSTER_NODE_NEED_AUTH, LAST_STATUS, PLONE_ROOT, STORAGE_FOLDER_PATH, CLUSTER_NODE_NOT_MEMBER
import RicciQuery as rq
from ricci_communicator import RicciCommunicator
from LuciZope import isAdmin
@@ -10,22 +10,22 @@
try:
cluster_node = self.restrictedTraverse('%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename))
if not cluster_node:
- raise Exception, 'cluster node is none'
+ return None
return cluster_node
except Exception, e:
- luci_log.debug_verbose('getClusterNode0: %s %s: %s' \
- % (nodename, clustername, str(e)))
- return None
+ luci_log.debug_verbose('getClusterNode0: %s %s: %r' \
+ % (nodename, clustername, e))
+ return None
def getStorageNode(self, nodename):
try:
storage_node = self.restrictedTraverse('%s%s' % (STORAGE_FOLDER_PATH, nodename))
if not storage_node:
- raise Exception, 'storage node is none'
+ return None
return storage_node
except Exception, e:
- luci_log.debug_verbose('getStorageNode0: %s: %s' % (nodename, str(e)))
- return None
+ luci_log.debug_verbose('getStorageNode0: %s: %r' % (nodename, e))
+ return None
def testNodeFlag(node, flag_mask):
try:
@@ -34,7 +34,7 @@
return False
return flags & flag_mask != 0
except Exception, e:
- luci_log.debug_verbose('testNodeFlag0: %s' % str(e))
+ luci_log.debug_verbose('testNodeFlag0: %r' % e)
return False
def setNodeFlag(node, flag_mask):
@@ -47,7 +47,7 @@
try:
node.manage_addProperty('flags', flag_mask, 'int')
except Exception, e:
- luci_log.debug_verbose('setNodeFlag0: %s' % str(e))
+ luci_log.debug_verbose('setNodeFlag0: %r' % e)
def delNodeFlag(node, flag_mask):
try:
@@ -57,7 +57,7 @@
if flags & flag_mask != 0:
node.manage_changeProperties({ 'flags': flags & ~flag_mask })
except Exception, e:
- luci_log.debug_verbose('delNodeFlag0: %s' % str(e))
+ luci_log.debug_verbose('delNodeFlag0: %r' % e)
def set_node_flag(self, cluname, agent, batchid, task, desc):
path = '%s%s' % (CLUSTER_FOLDER_PATH, cluname)
@@ -74,16 +74,16 @@
flag.manage_addProperty(TASKTYPE, task, 'string')
flag.manage_addProperty(FLAG_DESC, desc, 'string')
except Exception, e:
- errmsg = 'SNF0: error creating flag (%s,%s,%s) at %s: %s' \
- % (batch_id, task, desc, objpath, str(e))
+ errmsg = 'SNF0: error creating flag (%s,%s,%s) at %s: %r' \
+ % (batch_id, task, desc, objpath, e)
luci_log.debug_verbose(errmsg)
raise Exception, errmsg
def noNodeFlagsPresent(self, nodefolder, flagname, hostname):
try:
items = nodefolder.objectItems('ManagedSystem')
- except:
- luci_log.debug('NNFP0: error getting flags for %s' % nodefolder[0])
+ except Exception, e:
+ luci_log.debug('NNFP0: error getting flags for %s: %r' % (nodefolder[0], e))
return None
for item in items:
@@ -95,7 +95,7 @@
# hostname must be a FQDN
rc = RicciCommunicator(hostname)
except Exception, e:
- luci_log.info('NNFP1: ricci error %s: %s' % (hostname, str(e)))
+ luci_log.info('NNFP1: ricci error %s: %r' % (hostname, e))
return None
if not rc.authed():
@@ -114,8 +114,8 @@
try:
nodefolder.manage_delObjects([item[0]])
except Exception, e:
- luci_log.info('NNFP3: manage_delObjects for %s failed: %s' \
- % (item[0], str(e)))
+ luci_log.info('NNFP3: manage_delObjects for %s failed: %r' \
+ % (item[0], e))
return None
return True
else:
@@ -130,8 +130,8 @@
clusterfolder = self.restrictedTraverse(path)
objs = clusterfolder.objectItems('Folder')
except Exception, e:
- luci_log.debug_verbose('RNN0: error for %s/%s: %s' \
- % (nodename, clustername, str(e)))
+ luci_log.debug_verbose('RNN0: error for %s/%s: %r' \
+ % (nodename, clustername, e))
return nodename
for obj in objs:
@@ -151,8 +151,8 @@
if not mb_nodes or not len(mb_nodes):
raise Exception, 'node list is empty'
except Exception, e:
- luci_log.debug_verbose('RCC0: no model builder nodes found for %s: %s' \
- % (str(e), clusterName))
+ luci_log.debug_verbose('RCC0: no model builder nodes found for %s: %r' \
+ % (clusterName, e))
return 'Unable to find cluster nodes for %s' % clusterName
try:
@@ -160,8 +160,8 @@
if not cluster_node:
raise Exception, 'cluster node is none'
except Exception, e:
- luci_log.debug('RCC1: cant find cluster node for %s: %s'
- % (clusterName, str(e)))
+ luci_log.debug('RCC1: cant find cluster node for %s: %r'
+ % (clusterName, e))
return 'Unable to find an entry for %s in the Luci database.' % clusterName
try:
@@ -170,7 +170,7 @@
raise Exception, 'no database nodes'
except Exception, e:
# Should we just create them all? Can this even happen?
- luci_log.debug('RCC2: error: %s' % str(e))
+ luci_log.debug('RCC2: error: %r' % e)
return 'Unable to find database entries for any nodes in %s' % clusterName
same_host = lambda x, y: x == y or x[:len(y) + 1] == y + '.' or y[:len(x) + 1] == x + '.'
@@ -206,7 +206,7 @@
messages.append('Node "%s" is no longer in a member of cluster "%s." It has been deleted from the management interface for this cluster.' % (i, clusterName))
luci_log.debug_verbose('VCC3: deleted node %s' % i)
except Exception, e:
- luci_log.debug_verbose('VCC4: delObjects: %s: %s' % (i, str(e)))
+ luci_log.debug_verbose('VCC4: delObjects: %s: %r' % (i, e))
new_flags = CLUSTER_NODE_NEED_AUTH | CLUSTER_NODE_ADDED
for i in new_list:
@@ -217,8 +217,8 @@
messages.append('A new cluster node, "%s," is now a member of cluster "%s." It has been added to the management interface for this cluster, but you must authenticate to it in order for it to be fully functional.' % (i, clusterName))
except Exception, e:
messages.append('A new cluster node, "%s," is now a member of cluster "%s,". but it has not been added to the management interface for this cluster as a result of an error creating a database entry for it.' % (i, clusterName))
- luci_log.debug_verbose('VCC5: addFolder: %s/%s: %s' \
- % (clusterName, i, str(e)))
+ luci_log.debug_verbose('VCC5: addFolder: %s/%s: %r' \
+ % (clusterName, i, e))
return messages
@@ -228,7 +228,7 @@
try:
clusterfolder = self.restrictedTraverse(path)
except Exception, e:
- luci_log.debug_verbose('buildCCF0: no cluster folder at %s' % path)
+ luci_log.debug_verbose('buildCCF0: no cluster folder at %s: %r' % (path, e))
return None
for key in batch_map.keys():
@@ -248,8 +248,8 @@
flag.manage_addProperty(FLAG_DESC, 'Creating node "%s" for cluster "%s"' % (key, clusterName), 'string')
flag.manage_addProperty(LAST_STATUS, 0, 'int')
except Exception, e:
- luci_log.debug_verbose('buildCCF1: error creating flag for %s: %s' \
- % (key, str(e)))
+ luci_log.debug_verbose('buildCCF1: error creating flag for %s: %r' \
+ % (key, e))
def manageCluster(self, clusterName, node_list, cluster_os):
clusterName = str(clusterName)
@@ -259,7 +259,7 @@
if not clusters:
raise Exception, 'cannot find the cluster entry in the DB'
except Exception, e:
- luci_log.debug_verbose('MC0: %s: %s' % (clusterName, str(e)))
+ luci_log.debug_verbose('MC0: %s: %r' % (clusterName, e))
return 'Unable to create cluster %s: the cluster directory is missing.' % clusterName
try:
@@ -276,25 +276,25 @@
if not newCluster:
raise Exception, 'unable to create the cluster DB entry for %s' % clusterName
except Exception, e:
- luci_log.debug_verbose('MC2: %s: %s' % (clusterName, str(e)))
- return 'Unable to create cluster %s: %s' % (clusterName, str(e))
+ luci_log.debug_verbose('MC2: %s: %r' % (clusterName, e))
+ return 'Unable to create cluster %s: %r' % (clusterName, e)
try:
newCluster.manage_acquiredPermissions([])
newCluster.manage_role('View', ['Access Contents Information', 'View'])
except Exception, e:
- luci_log.debug_verbose('MC3: %s: %s' % (clusterName, str(e)))
+ luci_log.debug_verbose('MC3: %s: %r' % (clusterName, e))
try:
clusters.manage_delObjects([clusterName])
except Exception, e:
- luci_log.debug_verbose('MC4: %s: %s' % (clusterName, str(e)))
- return 'Unable to set permissions on new cluster: %s: %s' % (clusterName, str(e))
+ luci_log.debug_verbose('MC4: %s: %r' % (clusterName, e))
+ return 'Unable to set permissions on new cluster: %s: %r' % (clusterName, e)
try:
newCluster.manage_addProperty('cluster_os', cluster_os, 'string')
except Exception, e:
- luci_log.debug_verbose('MC5: %s: %s: %s' \
- % (clusterName, cluster_os, str(e)))
+ luci_log.debug_verbose('MC5: %s: %s: %r' \
+ % (clusterName, cluster_os, e))
for i in node_list:
host = node_list[i]['host']
@@ -310,21 +310,21 @@
try:
clusters.manage_delObjects([clusterName])
except Exception, e:
- luci_log.debug_verbose('MC6: %s: %s: %s' \
- % (clusterName, host, str(e)))
+ luci_log.debug_verbose('MC6: %s: %s: %r' \
+ % (clusterName, host, e))
- luci_log.debug_verbose('MC7: %s: %s: %s' \
- % (clusterName, host, str(e)))
- return 'Unable to create cluster node %s for cluster %s: %s' \
- % (host, clusterName, str(e))
+ luci_log.debug_verbose('MC7: %s: %s: %r' \
+ % (clusterName, host, e))
+ return 'Unable to create cluster node %s for cluster %s: %r' \
+ % (host, clusterName, e)
try:
ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
if not ssystem:
raise Exception, 'The storage DB entry is missing'
except Exception, e:
- luci_log.debug_verbose('MC8: %s: %s: %s' % (clusterName, host, str(e)))
- return 'Error adding storage node %s: %s' % (host, str(e))
+ luci_log.debug_verbose('MC8: %s: %s: %r' % (clusterName, host, e))
+ return 'Error adding storage node %s: %r' % (host, e)
# Only add storage systems if the cluster and cluster node DB
# objects were added successfully.
@@ -344,7 +344,7 @@
newSystem.manage_acquiredPermissions([])
newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
except Exception, e:
- luci_log.debug_verbose('MC9: %s: %s: %s' % (clusterName, host, str(e)))
+ luci_log.debug_verbose('MC9: %s: %s: %r' % (clusterName, host, e))
def createClusterSystems(self, clusterName, node_list):
try:
@@ -352,7 +352,7 @@
if not clusterObj:
raise Exception, 'cluster %s DB entry is missing' % clusterName
except Exception, e:
- luci_log.debug_verbose('CCS0: %s: %s' % (clusterName, str(e)))
+ luci_log.debug_verbose('CCS0: %s: %r' % (clusterName, e))
return 'No cluster named "%s" is managed by Luci' % clusterName
for x in node_list:
@@ -362,7 +362,8 @@
try:
clusterObj.manage_addFolder(host, '__luci__:csystem:%s' % clusterName)
except Exception, e:
- luci_log.debug_verbose('CCS0a: %s: %s: %s' % (clusterName, host, str(e)))
+ luci_log.debug_verbose('CCS0a: %s: %s: %r' % (clusterName, host, e))
+
try:
newSystem = self.restrictedTraverse('%s%s/%s' % (CLUSTER_FOLDER_PATH, clusterName, host))
if not newSystem:
@@ -370,9 +371,9 @@
newSystem.manage_acquiredPermissions([])
newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
except Exception, e:
- luci_log.debug_verbose('CCS1: %s: %s: %s' % (clusterName, host, str(e)))
- return 'Unable to create cluster node %s for cluster %s: %s' \
- % (host, clusterName, str(e))
+ luci_log.debug_verbose('CCS1: %s: %s: %r' % (clusterName, host, e))
+ return 'Unable to create cluster node %s for cluster %s: %r' \
+ % (host, clusterName, e)
try:
ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
@@ -380,7 +381,7 @@
raise Exception, 'storage DB entry is missing'
except Exception, e:
# This shouldn't fail, but if it does, it's harmless right now
- luci_log.debug_verbose('CCS2: %s: %s' % (clusterName, host, str(e)))
+ luci_log.debug_verbose('CCS2: %s: %r' % (clusterName, host, e))
return None
# Only add storage systems if the and cluster node DB
@@ -402,7 +403,7 @@
newSystem.manage_acquiredPermissions([])
newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
except Exception, e:
- luci_log.debug_verbose('CCS3: %s: %s' % (clusterName, host, str(e)))
+ luci_log.debug_verbose('CCS3: %s: %r' % (clusterName, host, e))
def delSystem(self, systemName):
@@ -411,8 +412,8 @@
if not ssystem:
raise Exception, 'storage DB entry is missing'
except Exception, e:
- luci_log.debug_verbose('delSystem0: %s: %s' % (systemName, str(e)))
- return 'Unable to find storage system %s: %s' % (systemName, str(e))
+ luci_log.debug_verbose('delSystem0: %s: %r' % (systemName, e))
+ return 'Unable to find storage system %s: %r' % (systemName, e)
try:
rc = RicciCommunicator(systemName, enforce_trust=False)
@@ -421,10 +422,10 @@
except Exception, e:
try:
ssystem.manage_delObjects([ systemName ])
- except Exception, e:
- luci_log.debug_verbose('delSystem1: %s: %s' % (systemName, str(e)))
+ except Exception, e1:
+ luci_log.debug_verbose('delSystem1: %s: %r' % (systemName, e1))
return 'Unable to delete the storage system %s' % systemName
- luci_log.debug_verbose('delSystem2: %s: %s' % (systemName, str(e)))
+ luci_log.debug_verbose('delSystem2: %s: %r' % (systemName, e))
return
# Only unauthenticate if the system isn't a member of
@@ -457,9 +458,9 @@
try:
ssystem.manage_delObjects([ systemName ])
except Exception, e:
- luci_log.debug_verbose('delSystem3: %s: %s' % (systemName, str(e)))
- return 'Unable to delete storage system %s: %s' \
- % (systemName, str(e))
+ luci_log.debug_verbose('delSystem3: %s: %r' % (systemName, e))
+ return 'Unable to delete storage system %s: %r' \
+ % (systemName, e)
def delCluster(self, clusterName):
@@ -468,7 +469,7 @@
if not clusters:
raise Exception, 'clusters DB entry is missing'
except Exception, e:
- luci_log.debug_verbose('delCluster0: %s' % str(e))
+ luci_log.debug_verbose('delCluster0: %r' % e)
return 'Unable to find cluster %s' % clusterName
err = delClusterSystems(self, clusterName)
@@ -478,7 +479,7 @@
try:
clusters.manage_delObjects([ clusterName ])
except Exception, e:
- luci_log.debug_verbose('delCluster1: %s' % str(e))
+ luci_log.debug_verbose('delCluster1: %s %r' % (clusterName, e))
return 'Unable to delete cluster %s' % clusterName
def delClusterSystem(self, cluster, systemName):
@@ -490,13 +491,13 @@
rc = RicciCommunicator(systemName, enforce_trust=False)
rc.unauth()
except Exception, e:
- luci_log.debug_verbose('delClusterSystem0: ricci error for %s: %s' \
- % (systemName, str(e)))
+ luci_log.debug_verbose('delClusterSystem0: ricci error for %s: %r' \
+ % (systemName, e))
try:
cluster.manage_delObjects([ systemName ])
except Exception, e:
- err_str = 'Error deleting cluster object %s: %s' % (systemName, str(e))
+ err_str = 'Error deleting cluster object %s: %r' % (systemName, e)
luci_log.debug_verbose('delClusterSystem1: %s' % err_str)
return err_str
@@ -511,11 +512,11 @@
if not csystems or len(csystems) < 1:
return None
except Exception, e:
- luci_log.debug_verbose('delCluSystems0: %s' % str(e))
+ luci_log.debug_verbose('delCluSystems0: %r' % e)
return None
except Exception, er:
- luci_log.debug_verbose('delCluSystems1: error for %s: %s' \
- % (clusterName, str(er)))
+ luci_log.debug_verbose('delCluSystems1: error for %s: %r' \
+ % (clusterName, er))
return str(er)
error_list = list()
@@ -584,7 +585,7 @@
try:
return self.restrictedTraverse('%s%s/objectItems' % (CLUSTER_FOLDER_PATH, clusterName))('Folder')
except Exception, e:
- luci_log.debug_verbose('GCSy0: %s: %s' % (clusterName, str(e)))
+ luci_log.debug_verbose('GCSy0: %s: %r' % (clusterName, e))
return None
try:
@@ -592,7 +593,7 @@
if not i:
raise Exception, 'security manager says no user'
except Exception, e:
- luci_log.debug_verbose('GCSy1: %s: %s' % (clusterName, str(e)))
+ luci_log.debug_verbose('GCSy1: %s: %r' % (clusterName, e))
return None
try:
@@ -600,7 +601,7 @@
if not csystems or len(csystems) < 1:
return None
except Exception, e:
- luci_log.debug_verbose('GCSy2: %s: %s' % (clusterName, str(e)))
+ luci_log.debug_verbose('GCSy2: %s: %r' % (clusterName, e))
return None
allowedCSystems = list()
@@ -609,25 +610,24 @@
if i.has_role('View', c[1]):
allowedCSystems.append(c)
except Exception, e:
- luci_log.debug_verbose('GCSy3: %s: %s: %s' \
- % (clusterName, c[0], str(e)))
+ luci_log.debug_verbose('GCSy3: %s: %s: %r' \
+ % (clusterName, c[0], e))
return allowedCSystems
-
def getClusters(self):
if isAdmin(self):
try:
return self.restrictedTraverse('%s/systems/cluster/objectItems' % PLONE_ROOT)('Folder')
except Exception, e:
- luci_log.debug_verbose('GC0: %s' % str(e))
+ luci_log.debug_verbose('GC0: %r' % e)
return None
try:
i = getSecurityManager().getUser()
if not i:
raise Exception, 'GSMGU failed'
except Exception, e:
- luci_log.debug_verbose('GC1: %s' % str(e))
+ luci_log.debug_verbose('GC1: %r' % e)
return None
try:
@@ -635,7 +635,7 @@
if not clusters or len(clusters) < 1:
return None
except Exception, e:
- luci_log.debug_verbose('GC2: %s' % str(e))
+ luci_log.debug_verbose('GC2: %r' % e)
return None
allowedClusters = list()
@@ -644,7 +644,7 @@
if i.has_role('View', c[1]):
allowedClusters.append(c)
except Exception, e:
- luci_log.debug_verbose('GC3: %s: %s' % (c[0], str(e)))
+ luci_log.debug_verbose('GC3: %s: %r' % (c[0], e))
return allowedClusters
@@ -654,7 +654,7 @@
try:
return self.restrictedTraverse('%s/systems/storage/objectItems' % PLONE_ROOT)('Folder')
except Exception, e:
- luci_log.debug_verbose('GS0: %s' % str(e))
+ luci_log.debug_verbose('GS0: %r' % e)
return None
try:
@@ -662,7 +662,7 @@
if not i:
raise Exception, 'GSMGU failed'
except Exception, e:
- luci_log.debug_verbose('GS1: %s' % str(e))
+ luci_log.debug_verbose('GS1: %r' % e)
return None
try:
@@ -670,7 +670,7 @@
if not storage or len(storage) < 1:
return None
except Exception, e:
- luci_log.debug_verbose('GS2: %s' % str(e))
+ luci_log.debug_verbose('GS2: %r' % e)
return None
allowedStorage = list()
@@ -679,7 +679,116 @@
if i.has_role('View', s[1]):
allowedStorage.append(s)
except Exception, e:
- luci_log.debug_verbose('GS3: %s' % str(e))
+ luci_log.debug_verbose('GS3: %r' % e)
return allowedStorage
+def check_clusters(self, clusters):
+ sm = getSecurityManager()
+ user = sm.getUser()
+
+ clist = list()
+ for cluster in clusters:
+ if user.has_permission('View', cluster):
+ clist.append(cluster)
+ return clist
+
+def cluster_permission_check(cluster):
+ try:
+ sm = getSecurityManager()
+ user = sm.getUser()
+ if user.has_permission('View', cluster):
+ return True
+ except:
+ pass
+ return False
+
+def allowed_systems(self, user, systems):
+ user = getSecurityManager().getUser().getUser()
+ return map(lambda x: user.has_permission('View', x[1]), systems)
+
+def access_to_host_allowed(self, hostname, allowed_systems_list):
+ allowed = dict(map(lambda x: [ x[0], None ], allowed_systems_list))
+ return allowed.has_key(hostname)
+
+def getRicciAgent(self, clustername):
+ try:
+ perm = cluster_permission_check(clustername)
+ if not perm:
+ return None
+ except Exception, e:
+ luci_log.debug_verbose('GRA0: %r' % e)
+ return None
+
+ try:
+ path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
+ clusterfolder = self.restrictedTraverse(path)
+ if not clusterfolder:
+ luci_log.debug('GRA1: cluster folder %s for %s is missing' \
+ % (path, clustername))
+ return None
+
+ nodes = clusterfolder.objectItems('Folder')
+ if len(nodes) < 1:
+ luci_log.debug('GRA2: no cluster nodes for %s found.' % clustername)
+ raise Exception, 'No cluster nodes were found at %s' % path
+ except Exception, e:
+ try:
+ luci_log.debug('GRA3: cluster folder %s for %s is missing: %r' \
+ % (path, clustername, e))
+
+ if len(clusterfolder.objectItems()) < 1:
+ clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
+ clusters.manage_delObjects([clustername])
+ except Exception, ein:
+ luci_log.debug_verbose('GRA4: %r' % ein)
+ return None
+
+ cluname = clustername.lower()
+ for node in nodes:
+ try:
+ hostname = node[1].getId()
+ except:
+ try:
+ hostname = node[0]
+ except:
+ continue
+
+ try:
+ rc = RicciCommunicator(hostname)
+ if not rc:
+ luci_log.debug_verbose('GRA5: rc is None')
+ continue
+
+ ricci_hostname = rc.hostname()
+ if not ricci_hostname:
+ luci_log.debug_verbose('GRA6: ricci_hostname is blank')
+ continue
+
+ clu_info = rc.cluster_info()
+
+ cur_name = str(clu_info[0]).strip().lower()
+ if not cur_name:
+ cur_name = None
+ luci_log.debug_verbose('GRA7: cluster name is none for %s' % ricci_hostname)
+
+ cur_alias = str(clu_info[1]).strip().lower()
+ if not cur_alias:
+ cur_alias = None
+ luci_log.debug_verbose('GRA8: cluster alias is none for %s' % ricci_hostname)
+
+ if (cur_name is not None and cluname != cur_name) and (cur_alias is not None and cluname != cur_alias):
+ luci_log.debug('GRA9: node %s reports it\'s in cluster %s:%s; we expect %s' % (hostname, clu_info[0], clu_info[1], cluname))
+ setNodeFlag(self, node, CLUSTER_NODE_NOT_MEMBER)
+ continue
+
+ if rc.authed():
+ return rc
+
+ setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
+ except Exception, eout:
+ luci_log.debug_verbose('GRA10: %r' % eout)
+
+ luci_log.debug('GRA11: no ricci agent could be found for cluster %s' \
+ % cluname)
+ return None
--- conga/luci/site/luci/Extensions/Attic/LuciZope.py 2007/05/04 19:10:24 1.1.2.1
+++ conga/luci/site/luci/Extensions/Attic/LuciZope.py 2007/05/08 22:19:35 1.1.2.2
@@ -1,6 +1,8 @@
-from ricci_communicator import CERTS_DIR_PATH
from AccessControl import getSecurityManager
+from ricci_communicator import CERTS_DIR_PATH
from conga_constants import PLONE_ROOT
+from LuciDB import allowed_systems
+
def siteIsSetup(self):
import os
@@ -36,77 +38,52 @@
pass
return False
-def allowed_systems(self, user, systems):
- allowed = []
- sm = getSecurityManager()
- user = sm.getUser()
- for system in systems:
- #Does this take too long?
- if user.has_permission('View', system[1]):
- allowed.append(system)
- return allowed
-
-def access_to_host_allowed(self, hostname, allowed_systems_list):
- for system in allowed_systems_list:
- if system[0] == hostname:
- if len(self.allowed_systems(None, [system])) == 1:
- return True
- else:
- return False
- return False
-
# removes systems that user is not authorized access to
def get_systems_statuses(self, systems, from_cache=False):
- from HelperFunctions import get_system_info
-
- CACHED_INDEX = '_get_systems_statuses()_cached_result_'
- session = self.REQUEST.SESSION
- if session.has_key(CACHED_INDEX):
- res = session[CACHED_INDEX]
- if res != None:
- session.set(CACHED_INDEX, None)
- if from_cache:
- return res
-
- allowed_sys_list = self.allowed_systems(self, None, systems)
- ss_list = get_system_info(self, allowed_sys_list)
- session.set(CACHED_INDEX, ss_list)
-
- return ss_list
-
-def set_persistent_var(self,
- var_name,
- default_value):
- request = self.REQUEST
- response = request.RESPONSE
- session = request.SESSION
-
- # TODO: add username into cookie_prefix, so users don't overwrite each other
- cookie_prefix = '__luci_storage_cookie_'
-
- value = default_value
- if request.has_key(var_name):
- value = request[var_name]
- elif session.has_key(var_name):
- value = session[var_name]
- elif request.cookies.has_key(cookie_prefix + var_name):
- value = request.cookies[cookie_prefix + var_name]
-
- session.set(var_name,
- value)
- response.setCookie(cookie_prefix + var_name,
- value,
- expires='Tue, 30 Jun 2060 12:00:00 GMT')
- return value
+ from HelperFunctions import get_system_info
+ CACHED_INDEX = '_get_systems_statuses()_cached_result_'
+ session = self.REQUEST.SESSION
+ if session.has_key(CACHED_INDEX):
+ res = session[CACHED_INDEX]
+ if res != None:
+ session.set(CACHED_INDEX, None)
+ if from_cache:
+ return res
+
+ allowed_sys_list = allowed_systems(self, None, systems)
+ ss_list = get_system_info(self, allowed_sys_list)
+ session.set(CACHED_INDEX, ss_list)
+ return ss_list
+
+def set_persistent_var(self, var_name, default_value):
+ request = self.REQUEST
+ response = request.RESPONSE
+ session = request.SESSION
+
+ # TODO: add username into cookie_prefix, so users don't overwrite each other
+ cookie_prefix = '__luci_storage_cookie_'
+
+ value = default_value
+ if request.has_key(var_name):
+ value = request[var_name]
+ elif session.has_key(var_name):
+ value = session[var_name]
+ elif request.cookies.has_key(cookie_prefix + var_name):
+ value = request.cookies[cookie_prefix + var_name]
+
+ session.set(var_name, value)
+ response.setCookie(cookie_prefix + var_name, value,
+ expires='Tue, 30 Jun 2060 12:00:00 GMT')
+ return value
# returns (str(float), units) that fits best,
# takes prefered units into account
def bytes_to_value_prefunits(self, bytes):
- from HelperFunctions import bytes_to_value_units, convert_bytes, get_units_multiplier
+ from HelperFunctions import bytes_to_value_units, convert_bytes, get_units_multiplier
- p_units = self.REQUEST.SESSION.get('preferred_size_units')
- dummy, units = bytes_to_value_units(bytes)
- if get_units_multiplier(units) > get_units_multiplier(p_units):
- units = p_units
- return (convert_bytes(bytes, units), units)
+ p_units = self.REQUEST.SESSION.get('preferred_size_units')
+ dummy, units = bytes_to_value_units(bytes)
+ if get_units_multiplier(units) > get_units_multiplier(p_units):
+ units = p_units
+ return (convert_bytes(bytes, units), units)
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2007/05/04 19:10:24 1.255.2.2
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2007/05/08 22:19:35 1.255.2.3
@@ -1449,51 +1449,71 @@
response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
% (request['URL'], CLUSTER_CONFIG, clustername))
-def validateFenceAdd(self, request):
- errors = list()
- messages = list()
- rc = None
+def LuciExtractCluName(self, request):
+ cluname = None
+ try:
+ cluname = request.form['clustername']
+ except Exception, e:
+ try:
+ cluname = request['clustername']
+ except Exception, e:
+ cluname = None
- try:
- model = request.SESSION.get('model')
- if not model:
- raise Exception, 'model is none'
- except Exception, e:
- model = None
- try:
- cluname = request.form['clustername']
- except:
- try:
- cluname = request['clustername']
- except:
- luci_log.debug_verbose('VFE: no model, no cluster name')
- return (False, {'errors': ['No cluster model was found.']})
+ if not cluname:
+ luci_log.debug_verbose('LECN0: no cluster name')
+ return cluname
+
+def LuciExtractCluModel(self, request, cluster_name=None):
+ model = None
+ if not cluster_name:
+ cluster_name = LuciExtractCluName(self, request)
+ if not cluster_name:
+ luci_log.debug_verbose('LECM0: no cluster name')
- try:
- model = getModelForCluster(self, cluname)
- except:
- model = None
+ try:
+ model = request.SESSION.get('model')
+ if not model:
+ model = None
+ except Exception, e:
+ model = None
- if model is None:
- luci_log.debug_verbose('VFE: unable to get model from session')
- return (False, {'errors': ['No cluster model was found.']})
+ try:
+ model = getModelForCluster(self, cluster_name)
+ if not model:
+ luci_log.debug_verbose('LECM1: empty model')
+ model = None
+ except Exception, e:
+ luci_log.debug_verbose('LECM2: no model: %s' % str(e))
+ model = None
+ return model
- form = None
- try:
- response = request.response
- form = request.form
- if not form:
- form = None
- raise Exception, 'no form was submitted'
- except:
- pass
+def LuciFenceAddReq(self, request):
+ model = LuciExtractCluModel(self, request)
+ if not model:
+ luci_log.debug_verbose('VFE0: no model')
+ return (False, [ 'No cluster model was found.' ])
- if form is None:
- luci_log.debug_verbose('VFE: no form was submitted')
- return (False, {'errors': ['No form was submitted']})
+ ret = validateFenceAdd(request, model)
+ if ret[0] is not True:
+ return (False, { 'errors': ret[1] })
+
+def validateFenceAdd(request, model):
+ errors = list()
+ messages = list()
+
+ if not request.form:
+ luci_log.debug_verbose('VFE: no form was submitted')
+ return (False, [ 'No form was submitted '])
+
+ ret_code, ret_msgs = validateNewFenceDevice(form, model)
+ if error_code != FD_VAL_SUCCESS:
+ if ret_msgs:
+ errors.extend(ret_msgs)
+ return (False, errors)
+ if ret_msgs:
+ messages.extend(ret_msgs)
+ return (True, ret_msgs)
- error_code, retobj = validateNewFenceDevice(form, model)
- if error_code == FD_VAL_SUCCESS:
try:
conf_str = model.exportModelAsString()
if not conf_str:
@@ -1503,38 +1523,7 @@
% str(e))
errors.append('Unable to store the new cluster configuration')
- try:
- clustername = model.getClusterName()
- if not clustername:
- raise Exception, 'cluster name from model.getClusterName() is blank'
- except Exception, e:
- luci_log.debug_verbose('VFA: error: getClusterName: %s' % str(e))
- errors.append('Unable to determine cluster name from model')
-
- if not rc:
- rc = getRicciAgent(self, clustername)
- if not rc:
- luci_log.debug_verbose('VFA: unable to find a ricci agent for the %s cluster' % clustername)
- errors.append('Unable to contact a ricci agent for cluster %s' \
- % clustername)
-
- if rc:
- batch_id, result = rq.setClusterConf(rc, str(conf_str))
- if batch_id is None or result is None:
- luci_log.debug_verbose('VFA: setCluserConf: batchid or result is None')
- errors.append('Unable to propagate the new cluster configuration for %s' \
- % clustername)
- else:
- try:
- set_node_flag(self, clustername, rc.hostname(), batch_id,
- CLUSTER_CONFIG, 'Adding new fence device "%s"' % retobj)
- except:
- pass
-
response.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (request['URL'], FENCEDEV, clustername, retobj))
- else:
- errors.extend(retobj)
- return (False, {'errors': errors, 'messages': messages})
def validateFenceEdit(self, request):
@@ -3060,125 +3049,6 @@
return portaltabs
-def check_clusters(self, clusters):
- sm = AccessControl.getSecurityManager()
- user = sm.getUser()
-
- clist = list()
- for cluster in clusters:
- if user.has_permission('View', cluster):
- clist.append(cluster)
- return clist
-
-def cluster_permission_check(cluster):
- try:
- sm = AccessControl.getSecurityManager()
- user = sm.getUser()
- if user.has_permission('View', cluster):
- return True
- except:
- pass
- return False
-
-def getRicciAgent(self, clustername):
- #Check cluster permission here! return none if false
- path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
-
- try:
- clusterfolder = self.restrictedTraverse(path)
- if not clusterfolder:
- luci_log.debug('GRA0: cluster folder %s for %s is missing.' \
- % (path, clustername))
- raise Exception, 'no cluster folder at %s' % path
- nodes = clusterfolder.objectItems('Folder')
- if len(nodes) < 1:
- luci_log.debug('GRA1: no cluster nodes for %s found.' % clustername)
- raise Exception, 'no cluster nodes were found at %s' % path
- except Exception, e:
- try:
- luci_log.debug('GRA2: cluster folder %s for %s is missing: %s.' \
- % (path, clustername, str(e)))
-
- if len(clusterfolder.objectItems()) < 1:
- clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
- clusters.manage_delObjects([clustername])
- except Exception, e:
- luci_log.debug_verbose('GRA3: %s' % str(e))
-
- return None
-
- cluname = clustername.lower()
-
- for node in nodes:
- try:
- hostname = node[1].getId()
- except:
- try:
- hostname = node[0]
- except Exception, e:
- luci_log.debug_verbose('GRA2a: %s' % str(e))
- continue
-
- try:
- rc = RicciCommunicator(hostname)
- if not rc:
- raise Exception, 'rc is None'
- ricci_hostname = rc.hostname()
- if not ricci_hostname:
- raise Exception, 'ricci_hostname is blank'
- except Exception, e:
- luci_log.debug('GRA3: ricci error: %s' % str(e))
- continue
-
- try:
- clu_info = rc.cluster_info()
- except Exception, e:
- luci_log.debug('GRA4: cluster_info error for %s: %s' \
- % (ricci_hostname, str(e)))
- continue
-
- try:
- cur_name = str(clu_info[0]).strip().lower()
- if not cur_name:
- raise Exception, 'cluster name is none for %s' % ricci_hostname
- except Exception, e:
- luci_log.debug_verbose('GRA4a: %s' % str(e))
- cur_name = None
-
- try:
- cur_alias = str(clu_info[1]).strip().lower()
- if not cur_alias:
- raise Exception, 'cluster alias is none'
- except Exception, e:
- luci_log.debug_verbose('GRA4b: %s' % str(e))
- cur_alias = None
-
- if (cur_name is not None and cluname != cur_name) and (cur_alias is not None and cluname != cur_alias):
- try:
- luci_log.debug('GRA5: %s reports it\'s in cluster %s:%s; we expect %s' \
- % (hostname, clu_info[0], clu_info[1], cluname))
- setNodeFlag(self, node, CLUSTER_NODE_NOT_MEMBER)
- except:
- pass
- continue
-
- try:
- if rc.authed():
- return rc
-
- try:
- setNodeFlag(node[1], CLUSTER_NODE_NEED_AUTH)
- except:
- pass
- raise Exception, '%s not authed' % rc.hostname()
- except Exception, e:
- luci_log.debug_verbose('GRA6: %s' % str(e))
- continue
-
- luci_log.debug('GRA7: no ricci agent could be found for cluster %s' \
- % cluname)
- return None
-
def getRicciAgentForCluster(self, req):
clustername = None
try:
@@ -3293,338 +3163,6 @@
return results
-def getClusterStatus(self, request, rc, cluname=None):
- try:
- doc = rq.getClusterStatusBatch(rc)
- if not doc:
- raise Exception, 'doc is None'
- except Exception, e:
- luci_log.debug_verbose('GCS0: error: %s' % str(e))
- doc = None
-
- if doc is None and not cluname:
- try:
- model = request.SESSION.get('model')
- cinfo = getClusterStatusModel(model)
- if not cinfo or len(cinfo) < 1:
- raise Exception, 'cinfo is None'
- return cinfo
- except Exception, e:
- luci_log.debug_verbose('GCS1: %s' % str(e))
- doc = None
-
- if not doc:
- try:
- clustername = cluname
- if clustername is None:
- try:
- clustername = request['clustername']
- except:
- try:
- clustername = request.form['clustername']
- except:
- pass
-
- if not clustername:
- raise Exception, 'unable to determine cluster name'
-
- cinfo = getClusterStatusDB(self, clustername)
- if not cinfo or len(cinfo) < 1:
- raise Exception, 'cinfo is None'
- return cinfo
- except Exception, e:
- luci_log.debug_verbose('GCS1a: unable to get cluster info from DB: %s' % str(e))
- return []
-
- results = list()
- vals = {}
- vals['type'] = "cluster"
-
- try:
- vals['alias'] = doc.firstChild.getAttribute('alias')
- except AttributeError, e:
- vals['alias'] = doc.firstChild.getAttribute('name')
-
- vals['votes'] = doc.firstChild.getAttribute('votes')
- vals['name'] = doc.firstChild.getAttribute('name')
- vals['minQuorum'] = doc.firstChild.getAttribute('minQuorum')
- vals['quorate'] = doc.firstChild.getAttribute('quorate')
- results.append(vals)
-
- for node in doc.firstChild.childNodes:
- if node.nodeName == 'node':
- vals = {}
- vals['type'] = "node"
- vals['clustered'] = node.getAttribute('clustered')
- vals['name'] = node.getAttribute('name')
- vals['online'] = node.getAttribute('online')
- vals['uptime'] = node.getAttribute('uptime')
- vals['votes'] = node.getAttribute('votes')
- results.append(vals)
- elif node.nodeName == 'service':
- vals = {}
- vals['type'] = 'service'
- vals['name'] = node.getAttribute('name')
- vals['nodename'] = node.getAttribute('nodename')
- vals['running'] = node.getAttribute('running')
- try:
- vals['is_vm'] = node.getAttribute('vm').lower() == 'true'
- except:
- vals['is_vm'] = False
- vals['failed'] = node.getAttribute('failed')
- vals['autostart'] = node.getAttribute('autostart')
- results.append(vals)
- return results
-
-def getServicesInfo(self, status, model, req):
- svc_map = {}
- maplist = list()
-
- try:
- baseurl = req['URL']
- if not baseurl:
- raise KeyError, 'is blank'
- except:
- baseurl = '/luci/cluster/index_html'
-
- try:
- nodes = model.getNodes()
- cluname = req['clustername']
- if not cluname:
- raise KeyError, 'is blank'
- except:
- try:
- cluname = req.form['clusterName']
- if not cluname:
- raise KeyError, 'is blank'
- except:
- cluname = '[error retrieving cluster name]'
-
- for item in status:
- if item['type'] == "service":
- itemmap = {}
- itemmap['name'] = item['name']
-
- cur_node = None
- if item['running'] == "true":
- cur_node = item['nodename']
- itemmap['running'] = "true"
- itemmap['nodename'] = cur_node
- itemmap['disableurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE_STOP)
- itemmap['restarturl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE_RESTART)
- else:
- itemmap['enableurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE_START)
-
- itemmap['autostart'] = item['autostart']
-
- try:
- svc = model.retrieveServiceByName(item['name'])
- itemmap['cfgurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE)
- itemmap['cfgurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE_DELETE)
- except:
- try:
- svc = model.retrieveVMsByName(item['name'])
- itemmap['is_vm'] = True
- itemmap['cfgurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], VM_CONFIG)
- itemmap['delurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], VM_CONFIG)
- except:
- continue
-
- starturls = list()
- for node in nodes:
- cur_nodename = node.getName()
- if node.getName() != cur_node:
- starturl = {}
- starturl['nodename'] = cur_nodename
- starturl['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, item['name'], SERVICE_START, cur_nodename)
- starturls.append(starturl)
-
- if itemmap.has_key('is_vm') and itemmap['is_vm'] is True:
- migrate_url = { 'nodename': cur_nodename }
- migrate_url['migrate'] = True
- migrate_url['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, item['name'], SERVICE_MIGRATE, cur_nodename)
- starturls.append(migrate_url)
-
- itemmap['links'] = starturls
-
- dom = svc.getAttribute("domain")
- if dom is not None:
- itemmap['faildom'] = dom
- else:
- itemmap['faildom'] = "No Failover Domain"
- maplist.append(itemmap)
-
- svc_map['services'] = maplist
- return svc_map
-
-def get_fdom_names(model):
- return map(lambda x: x.getName(), model.getFailoverDomains())
-
-def getServiceInfo(self, status, model, req):
- from Products.Archetypes.utils import make_uuid
- #set up struct for service config page
- hmap = {}
- root_uuid = 'toplevel'
-
- try:
- baseurl = req['URL']
- if not baseurl:
- raise KeyError, 'is blank'
- except:
- baseurl = '/luci/cluster/index_html'
-
- try:
- hmap['fdoms'] = get_fdom_names(model)
- except:
- hmap['fdoms'] = list()
-
- try:
- cluname = req['clustername']
- if not cluname:
- raise KeyError, 'is blank'
- except KeyError, e:
- try:
- cluname = req.form['clusterName']
- if not cluname:
- raise
- except:
- cluname = '[error retrieving cluster name]'
-
- hmap['root_uuid'] = root_uuid
- # uuids for the service page needed when new resources are created
- hmap['uuid_list'] = map(lambda x: make_uuid('resource'), xrange(30))
-
- try:
- servicename = req['servicename']
- except KeyError, e:
- hmap['resource_list'] = {}
- return hmap
-
- for item in status:
- innermap = {}
- if item['type'] == 'service':
- if item['name'] == servicename:
- hmap['name'] = servicename
- hmap['autostart'] = item['autostart']
-
- starturls = list()
- if item['running'] == 'true':
- hmap['running'] = 'true'
- nodename = item['nodename']
- innermap['current'] = 'Running on %s' % nodename
-
- innermap['disableurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_STOP)
- innermap['restarturl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_RESTART)
- innermap['delurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_DELETE)
-
- #In this case, determine where it can run...
- nodes = model.getNodes()
- for node in nodes:
- if node.getName() != nodename:
- starturl = {}
- cur_nodename = node.getName()
- starturl['nodename'] = cur_nodename
- starturl['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, servicename, SERVICE_START, cur_nodename)
- starturls.append(starturl)
-
- if item.has_key('is_vm') and item['is_vm'] is True:
- migrate_url = { 'nodename': cur_nodename }
- migrate_url['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, servicename, SERVICE_MIGRATE, cur_nodename)
- migrate_url['migrate'] = True
- starturls.append(migrate_url)
- innermap['links'] = starturls
- else:
- #Do not set ['running'] in this case...ZPT will detect it is missing
- innermap['current'] = "Stopped"
- innermap['enableurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_START)
- innermap['delurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_DELETE)
-
- nodes = model.getNodes()
- starturls = list()
- for node in nodes:
- starturl = {}
- cur_nodename = node.getName()
-
- starturl['nodename'] = cur_nodename
- starturl['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, servicename, SERVICE_START, cur_nodename)
- starturls.append(starturl)
-
- if item.has_key('is_vm') and item['is_vm'] is True:
- migrate_url = { 'nodename': cur_nodename }
- migrate_url['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, servicename, SERVICE_MIGRATE, cur_nodename)
- migrate_url['migrate'] = True
- starturls.append(migrate_url)
- innermap['links'] = starturls
- hmap['innermap'] = innermap
-
- #Now build hashes for resources under service.
- #first get service by name from model
- svc = model.getService(servicename)
- try:
- hmap['domain'] = svc.getAttribute('domain')
- except:
- hmap['domain'] = None
-
- try:
- hmap['recovery'] = svc.getAttribute('recovery')
- except:
- hmap['recovery'] = None
-
- try:
- if int(svc.getAttribute('exclusive')):
- hmap['exclusive'] = 'true'
- else:
- hmap['exclusive'] = 'false'
- except:
- hmap['exclusive'] = 'false'
-
- resource_list = list()
- if svc is not None:
- indent_ctr = 0
- children = svc.getChildren()
- for child in children:
- recurse_resources(root_uuid, child, resource_list, indent_ctr)
-
- hmap['resource_list'] = resource_list
- return hmap
-
-def recurse_resources(parent_uuid, child, resource_list, indent_ctr, parent=None):
- #First, add the incoming child as a resource
- #Next, check for children of it
- #Call yourself on every children
- #then return
- rc_map = {}
- if parent is not None:
- rc_map['parent'] = parent
- rc_map['name'] = child.getName()
-
- #Note: Final version needs all resource attrs
- if child.isRefObject() == True:
- rc_map['ref_object'] = True
- rc_map['tag_name'] = child.getObj().TAG_NAME
- rc_map['type'] = child.getObj().getResourceType()
- rc_map['attrs'] = child.getObj().getAttributes()
- else:
- rc_map['tag_name'] = child.TAG_NAME
- rc_map['type'] = child.getResourceType()
- rc_map['attrs'] = child.getAttributes()
-
- rc_map['indent_ctr'] = indent_ctr
-
- rc_map['uuid'] = make_uuid('resource')
- rc_map['parent_uuid'] = parent_uuid
-
- resource_list.append(rc_map)
- kids = child.getChildren()
- child_depth = 0
- new_indent_ctr = indent_ctr + 1
- for kid in kids:
- cdepth = recurse_resources(rc_map['uuid'], kid, resource_list, new_indent_ctr, child)
- child_depth = max(cdepth, child_depth)
-
- rc_map['max_depth'] = child_depth
- return child_depth + 1
-
def serviceStart(self, rc, req):
svcname = None
try:
@@ -3820,111 +3358,6 @@
response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
% (req['URL'], SERVICE_LIST, cluname))
-def getFdomInfo(self, model, request):
- fhash = {}
- fhash['members'] = {}
-
- try:
- fdom = model.getFailoverDomainByName(request['fdomname'])
- except Exception, e:
- luci_log.debug_verbose('getFdomInfo0: %s' % str(e))
- return fhash
-
- fhash['name'] = fdom.getName()
-
- ordered_attr = fdom.getAttribute('ordered')
- if ordered_attr is not None and (ordered_attr == "true" or ordered_attr == "1"):
- fhash['prioritized'] = '1'
- else:
- fhash['prioritized'] = '0'
-
- restricted_attr = fdom.getAttribute('restricted')
- if restricted_attr is not None and (restricted_attr == "true" or restricted_attr == "1"):
- fhash['restricted'] = '1'
- else:
- fhash['restricted'] = '0'
-
- nodes = fdom.getChildren()
- for node in nodes:
- try:
- priority = node.getAttribute('priority')
- except:
- priority = '1'
- fhash['members'][node.getName()] = { 'priority': priority }
- return fhash
-
-def getFdomsInfo(self, model, request, clustatus):
- slist = list()
- nlist = list()
- for item in clustatus:
- if item['type'] == "node":
- nlist.append(item)
- elif item['type'] == "service":
- slist.append(item)
- fdomlist = list()
- clustername = request['clustername']
- baseurl = request['URL']
- fdoms = model.getFailoverDomains()
- svcs = model.getServices()
- for fdom in fdoms:
- fdom_map = {}
- fdom_map['name'] = fdom.getName()
- fdom_map['cfgurl'] = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
- % (baseurl, FDOM, clustername, fdom.getName())
- ordered_attr = fdom.getAttribute('ordered')
- restricted_attr = fdom.getAttribute('restricted')
- if ordered_attr is not None and (ordered_attr == "true" or ordered_attr == "1"):
- fdom_map['ordered'] = True
- else:
- fdom_map['ordered'] = False
- if restricted_attr is not None and (restricted_attr == "true" or restricted_attr == "1"):
- fdom_map['restricted'] = True
- else:
- fdom_map['restricted'] = False
- nodes = fdom.getChildren()
- nodelist = list()
- for node in nodes:
- nodesmap = {}
- ndname = node.getName()
- for nitem in nlist:
- if nitem['name'] == ndname:
- break
- nodesmap['nodename'] = ndname
- nodesmap['nodecfgurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
- % (baseurl, clustername, ndname, NODE)
- if nitem['clustered'] == "true":
- nodesmap['status'] = NODE_ACTIVE
- elif nitem['online'] == "false":
- nodesmap['status'] = NODE_UNKNOWN
- else:
- nodesmap['status'] = NODE_INACTIVE
- priority_attr = node.getAttribute('priority')
- if priority_attr is not None:
- nodesmap['priority'] = "0"
- nodelist.append(nodesmap)
- fdom_map['nodeslist'] = nodelist
-
- svclist = list()
- for svc in svcs:
- svcname = svc.getName()
- for sitem in slist:
- if sitem['name'] == svcname:
- break #found more info about service...
-
- domain = svc.getAttribute("domain")
- if domain is not None:
- if domain == fdom.getName():
- svcmap = {}
- svcmap['name'] = svcname
- svcmap['status'] = sitem['running']
- svcmap['svcurl'] = '%s?pagetype=%s&clustername=%s&servicename=%s' \
- % (baseurl, SERVICE, clustername, svcname)
- svcmap['location'] = sitem['nodename']
- svclist.append(svcmap)
- fdom_map['svclist'] = svclist
- fdomlist.append(fdom_map)
- return fdomlist
-
def clusterTaskProcess(self, model, request):
try:
task = request['task']
@@ -3975,248 +3408,6 @@
response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
% (request['URL'], redirect_page, model.getClusterName()))
-def getClusterInfo(self, model, req):
- try:
- cluname = req[CLUNAME]
- except:
- try:
- cluname = req.form['clustername']
- except:
- try:
- cluname = req.form['clusterName']
- except:
- luci_log.debug_verbose('GCI0: unable to determine cluster name')
- return {}
-
- clumap = {}
- if model is None:
- try:
- model = getModelForCluster(self, cluname)
- if not model:
- raise Exception, 'model is none'
- req.SESSION.set('model', model)
- except Exception, e:
- luci_log.debug_verbose('GCI1: unable to get model for cluster %s: %s' % (cluname, str(e)))
- return {}
- else:
- totem = model.getTotemPtr()
- if totem:
- clumap['totem'] = totem.getAttributes()
-
- prop_baseurl = '%s?pagetype=%s&clustername=%s&' \
- % (req['URL'], CLUSTER_CONFIG, cluname)
- basecluster_url = '%stab=%s' % (prop_baseurl, PROP_GENERAL_TAB)
- #needed:
- clumap['basecluster_url'] = basecluster_url
- #name field
- clumap['clustername'] = model.getClusterAlias()
- #config version
- cp = model.getClusterPtr()
- clumap['config_version'] = cp.getConfigVersion()
- #-------------
- #new cluster params - if rhel5
- #-------------
-
- clumap['fence_xvmd'] = model.hasFenceXVM()
- gulm_ptr = model.getGULMPtr()
- if not gulm_ptr:
- #Fence Daemon Props
- fencedaemon_url = '%stab=%s' % (prop_baseurl, PROP_FENCE_TAB)
- clumap['fencedaemon_url'] = fencedaemon_url
- fdp = model.getFenceDaemonPtr()
- pjd = fdp.getAttribute('post_join_delay')
- if pjd is None:
- pjd = "6"
- pfd = fdp.getAttribute('post_fail_delay')
- if pfd is None:
- pfd = "0"
- #post join delay
- clumap['pjd'] = pjd
- #post fail delay
- clumap['pfd'] = pfd
-
- #-------------
- #if multicast
- multicast_url = '%stab=%s' % (prop_baseurl, PROP_MCAST_TAB)
- clumap['multicast_url'] = multicast_url
- #mcast addr
- is_mcast = model.isMulticast()
- if is_mcast:
- clumap['mcast_addr'] = model.getMcastAddr()
- clumap['is_mcast'] = "True"
- else:
- clumap['is_mcast'] = "False"
- clumap['mcast_addr'] = "1.2.3.4"
- clumap['gulm'] = False
- else:
- #-------------
- #GULM params (rhel4 only)
- lockserv_list = list()
- clunodes = model.getNodes()
- gulm_lockservs = map(lambda x: x.getName(), gulm_ptr.getChildren())
- lockserv_list = map(lambda x: (x, True), gulm_lockservs)
- for node in clunodes:
- n = node.getName()
- if not n in gulm_lockservs:
- lockserv_list.append((n, False))
- clumap['gulm'] = True
- clumap['gulm_url'] = '%stab=%s' % (prop_baseurl, PROP_GULM_TAB)
- clumap['gulm_lockservers'] = lockserv_list
-
- #-------------
- #quorum disk params
- quorumd_url = '%stab=%s' % (prop_baseurl, PROP_QDISK_TAB)
- clumap['quorumd_url'] = quorumd_url
- is_quorumd = model.isQuorumd()
- clumap['is_quorumd'] = is_quorumd
- clumap['interval'] = ""
- clumap['tko'] = ""
- clumap['votes'] = ""
- clumap['min_score'] = ""
- clumap['device'] = ""
- clumap['label'] = ""
-
- #list struct for heuristics...
- hlist = list()
-
- if is_quorumd:
- qdp = model.getQuorumdPtr()
- interval = qdp.getAttribute('interval')
- if interval is not None:
- clumap['interval'] = interval
-
- tko = qdp.getAttribute('tko')
- if tko is not None:
- clumap['tko'] = tko
-
- votes = qdp.getAttribute('votes')
- if votes is not None:
- clumap['votes'] = votes
-
- min_score = qdp.getAttribute('min_score')
- if min_score is not None:
- clumap['min_score'] = min_score
-
- device = qdp.getAttribute('device')
- if device is not None:
- clumap['device'] = device
-
- label = qdp.getAttribute('label')
- if label is not None:
- clumap['label'] = label
-
- heuristic_kids = qdp.getChildren()
-
- for kid in heuristic_kids:
- hmap = {}
- hprog = kid.getAttribute('program')
- if hprog is None:
- continue
-
- hscore = kid.getAttribute('score')
- hmap['hprog'] = hprog
- if hscore is not None:
- hmap['hscore'] = hscore
- else:
- hmap['hscore'] = ""
-
- hinterval = kid.getAttribute('interval')
- if hinterval is not None:
- hmap['hinterval'] = hinterval
- else:
- hmap['hinterval'] = ""
- hlist.append(hmap)
- clumap['hlist'] = hlist
-
- return clumap
-
-def getClustersInfo(self, status, req):
- clu_map = {}
- nodelist = list()
- svclist = list()
- clulist = list()
- baseurl = req['URL']
-
- for item in status:
- if item['type'] == "node":
- nodelist.append(item)
- elif item['type'] == "service":
- svclist.append(item)
- elif item['type'] == "cluster":
- clulist.append(item)
- else:
- continue
- if len(clulist) < 1:
- return {}
- clu = clulist[0]
- if 'error' in clu:
- clu_map['error'] = True
- clustername = clu['name']
- if clu['alias'] != "":
- clu_map['clusteralias'] = clu['alias']
- else:
- clu_map['clusteralias'] = clustername
- clu_map['clustername'] = clustername
- if clu['quorate'] == "true":
- clu_map['status'] = "Quorate"
- clu_map['running'] = "true"
- else:
- clu_map['status'] = "Not Quorate"
- clu_map['running'] = "false"
- clu_map['votes'] = clu['votes']
- clu_map['minquorum'] = clu['minQuorum']
-
- clu_map['clucfg'] = '%s?pagetype=%s&clustername=%s' \
- % (baseurl, CLUSTER_CONFIG, clustername)
-
- clu_map['restart_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
- % (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_RESTART)
- clu_map['stop_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
- % (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_STOP)
- clu_map['start_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
- % (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_START)
- clu_map['delete_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
- % (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_DELETE)
-
- svc_dict_list = list()
- for svc in svclist:
- svc_dict = {}
- svc_dict['nodename'] = svc['nodename']
- svcname = svc['name']
- svc_dict['name'] = svcname
- svc_dict['srunning'] = svc['running']
- svc_dict['servicename'] = svcname
-
- if svc.has_key('is_vm') and svc['is_vm'] is True:
- target_page = VM_CONFIG
- else:
- target_page = SERVICE
-
- svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
- % (baseurl, target_page, clustername, svcname)
- svc_dict['svcurl'] = svcurl
- svc_dict_list.append(svc_dict)
- clu_map['currentservices'] = svc_dict_list
- node_dict_list = list()
-
- for item in nodelist:
- nmap = {}
- name = item['name']
- nmap['nodename'] = name
- cfgurl = '%s?pagetype=%s&clustername=%s&nodename=%s' \
- % (baseurl, NODE, clustername, name)
- nmap['configurl'] = cfgurl
- if item['clustered'] == "true":
- nmap['status'] = NODE_ACTIVE
- elif item['online'] == "false":
- nmap['status'] = NODE_UNKNOWN
- else:
- nmap['status'] = NODE_INACTIVE
- node_dict_list.append(nmap)
-
- clu_map['currentnodes'] = node_dict_list
- return clu_map
-
def nodeLeave(self, rc, clustername, nodename_resolved):
path = '%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename_resolved)
@@ -4707,657 +3898,6 @@
response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
% (request['URL'], NODES, clustername))
-def getNodeInfo(self, model, status, request):
- infohash = {}
- item = None
- baseurl = request['URL']
- nodestate = NODE_ACTIVE
- svclist = list()
- for thing in status:
- if thing['type'] == "service":
- svclist.append(thing)
-
- #Get cluster name and node name from request
- try:
- clustername = request['clustername']
- nodename = request['nodename']
- except Exception, e:
- luci_log.debug_verbose('getNodeInfo0: %s' % str(e))
- return {}
-
- #extract correct node line from cluster status
- found = False
- for item in status:
- if (item['type'] == "node") and (item['name'] == nodename):
- found = True
- break
- if found == False:
- luci_log.debug_verbose('getNodeInfo1: Unable to resolve node name in cluster status')
- return {}
-
- #Now determine state of node...
- if item['online'] == "false":
- nodestate = NODE_UNKNOWN
- elif item['clustered'] == "true":
- nodestate = NODE_ACTIVE
- else:
- nodestate = NODE_INACTIVE
-
- infohash['nodestate'] = nodestate
- infohash['nodename'] = nodename
-
- #set up drop down links
- if nodestate == NODE_ACTIVE:
- infohash['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_LEAVE_CLUSTER, nodename, clustername)
- infohash['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_REBOOT, nodename, clustername)
- infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
- infohash['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_DELETE, nodename, clustername)
- elif nodestate == NODE_INACTIVE:
- infohash['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_JOIN_CLUSTER, nodename, clustername)
- infohash['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_REBOOT, nodename, clustername)
- infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
- infohash['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_DELETE, nodename, clustername)
- else:
- infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
-
- #figure out current services running on this node
- svc_dict_list = list()
- for svc in svclist:
- if svc['nodename'] == nodename:
- svc_dict = {}
- svcname = svc['name']
- svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
- % (baseurl, SERVICE, clustername, svcname)
- svc_dict['servicename'] = svcname
- svc_dict['svcurl'] = svcurl
- svc_dict_list.append(svc_dict)
-
- infohash['currentservices'] = svc_dict_list
-
- fdom_dict_list = list()
- gulm_cluster = False
- if model:
- gulm_cluster = model.getGULMPtr() is not None
- try:
- infohash['gulm_lockserver'] = model.isNodeLockserver(nodename)
- except:
- infohash['gulm_lockserver'] = False
- #next is faildoms
- fdoms = model.getFailoverDomainsForNode(nodename)
- for fdom in fdoms:
- fdom_dict = {}
- fdom_dict['name'] = fdom.getName()
- fdomurl = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
- % (baseurl, FDOM_CONFIG, clustername, fdom.getName())
- fdom_dict['fdomurl'] = fdomurl
- fdom_dict_list.append(fdom_dict)
- else:
- infohash['gulm_lockserver'] = False
-
- infohash['fdoms'] = fdom_dict_list
-
- #return infohash
- infohash['d_states'] = None
-
- nodename_resolved = resolve_nodename(self, clustername, nodename)
-
- if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:
- #call service module on node and find out which daemons are running
- try:
- rc = RicciCommunicator(nodename_resolved)
- if not rc:
- raise Exception, 'rc is none'
- except Exception, e:
- rc = None
- infohash['ricci_error'] = True
- luci_log.info('Error connecting to %s: %s' \
- % (nodename_resolved, str(e)))
-
- if rc is not None:
- dlist = list()
- dlist.append("ccsd")
- if not gulm_cluster:
- dlist.append("cman")
- dlist.append("fenced")
- else:
- dlist.append("lock_gulmd")
- dlist.append("rgmanager")
- states = rq.getDaemonStates(rc, dlist)
- infohash['d_states'] = states
- else:
- infohash['ricci_error'] = True
-
- infohash['logurl'] = '/luci/logs/?nodename=%s&clustername=%s' \
- % (nodename_resolved, clustername)
- return infohash
-
-def getNodesInfo(self, model, status, req):
- resultlist = list()
- nodelist = list()
- svclist = list()
-
- #Sort into lists...
- for item in status:
- if item['type'] == "node":
- nodelist.append(item)
- elif item['type'] == "service":
- svclist.append(item)
- else:
- continue
-
- try:
- clustername = req['clustername']
- if not clustername:
- raise KeyError, 'clustername is blank'
- except:
- try:
- clustername = req.form['clustername']
- raise KeyError, 'clustername is blank'
- except:
- try:
- clustername = req.form['clusterName']
- except:
- try:
- clustername = model.getClusterName()
- except:
- luci_log.debug_verbose('GNI0: unable to determine cluster name')
- return {}
-
- for item in nodelist:
- nl_map = {}
- name = item['name']
- nl_map['nodename'] = name
- try:
- nl_map['gulm_lockserver'] = model.isNodeLockserver(name)
- except:
- nl_map['gulm_lockserver'] = False
-
- try:
- baseurl = req['URL']
- except:
- baseurl = '/luci/cluster/index_html'
-
- cfgurl = '%s?pagetype=%s&clustername=%s&nodename=%s' \
- % (baseurl, NODE, clustername, name)
- nl_map['configurl'] = cfgurl
- nl_map['fenceurl'] = '%s#fence' % cfgurl
- if item['clustered'] == "true":
- nl_map['status'] = NODE_ACTIVE
- nl_map['status_str'] = NODE_ACTIVE_STR
- elif item['online'] == "false":
- nl_map['status'] = NODE_UNKNOWN
- nl_map['status_str'] = NODE_UNKNOWN_STR
- else:
- nl_map['status'] = NODE_INACTIVE
- nl_map['status_str'] = NODE_INACTIVE_STR
-
- nodename_resolved = resolve_nodename(self, clustername, name)
-
- nl_map['logurl'] = '/luci/logs?nodename=%s&clustername=%s' \
- % (nodename_resolved, clustername)
-
- #set up URLs for dropdown menu...
- if nl_map['status'] == NODE_ACTIVE:
- nl_map['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_LEAVE_CLUSTER, name, clustername)
- nl_map['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_REBOOT, name, clustername)
- nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
- nl_map['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_DELETE, name, clustername)
- elif nl_map['status'] == NODE_INACTIVE:
- nl_map['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_JOIN_CLUSTER, name, clustername)
- nl_map['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_REBOOT, name, clustername)
- nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
- nl_map['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_DELETE, name, clustername)
- else:
- nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
- % (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
-
- #figure out current services running on this node
- svc_dict_list = list()
- for svc in svclist:
- if svc['nodename'] == name:
- svc_dict = {}
- svcname = svc['name']
- svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
- % (baseurl, SERVICE, clustername, svcname)
- svc_dict['servicename'] = svcname
- svc_dict['svcurl'] = svcurl
- svc_dict_list.append(svc_dict)
-
- nl_map['currentservices'] = svc_dict_list
- #next is faildoms
-
- if model:
- fdoms = model.getFailoverDomainsForNode(name)
- else:
- nl_map['ricci_error'] = True
- fdoms = list()
- fdom_dict_list = list()
- for fdom in fdoms:
- fdom_dict = {}
- fdom_dict['name'] = fdom.getName()
- fdomurl = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
- % (baseurl, FDOM_CONFIG, clustername, fdom.getName())
- fdom_dict['fdomurl'] = fdomurl
- fdom_dict_list.append(fdom_dict)
-
- nl_map['fdoms'] = fdom_dict_list
- resultlist.append(nl_map)
-
- return resultlist
-
-def getFence(self, model, request):
- if not model:
- luci_log.debug_verbose('getFence0: model is None')
- return {}
-
- fence_map = {}
- fencename = request['fencename']
- fencedevs = model.getFenceDevices()
- for fencedev in fencedevs:
- if fencedev.getName().strip() == fencename:
- fence_map = fencedev.getAttributes()
- try:
- fence_map['pretty_name'] = FENCE_OPTS[fencedev.getAgentType()]
- except:
- fence_map['unknown'] = True
- fence_map['pretty_name'] = fencedev.getAgentType()
-
- nodes_used = list()
- nodes = model.getNodes()
- for node in nodes:
- flevels = node.getFenceLevels()
- for flevel in flevels: #These are the method blocks...
- kids = flevel.getChildren()
- for kid in kids: #These are actual devices in each level
- if kid.getName().strip() == fencedev.getName().strip():
- #See if this fd already has an entry for this node
- found_duplicate = False
- for item in nodes_used:
- if item['nodename'] == node.getName().strip():
- found_duplicate = True
- if found_duplicate == True:
- continue
- baseurl = request['URL']
- clustername = model.getClusterName()
- node_hash = {}
- cur_nodename = node.getName().strip()
- node_hash['nodename'] = cur_nodename
- node_hash['nodeurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
- % (baseurl, clustername, cur_nodename, NODE)
- nodes_used.append(node_hash)
-
- fence_map['nodesused'] = nodes_used
- return fence_map
-
- return fence_map
-
-def getFDForInstance(fds, name):
- for fd in fds:
- if fd.getName().strip() == name:
- return fd
-
- raise
-
-def getFenceInfo(self, model, request):
- if not model:
- luci_log.debug_verbose('getFenceInfo00: model is None')
- return {}
-
- try:
- clustername = request['clustername']
- except:
- try:
- clustername = request.form['clustername']
- except:
- luci_log.debug_verbose('getFenceInfo0: unable to determine cluster name')
- return {}
-
- try:
- baseurl = request['URL']
- except Exception, e:
- luci_log.debug_verbose('getFenceInfo1: no request.URL')
- return {}
-
- fence_map = {}
- level1 = list() #First level fence devices
- level2 = list() #Second level fence devices
- shared1 = list() #List of available sharable fence devs not used in level1
- shared2 = list() #List of available sharable fence devs not used in level2
- fence_map['level1'] = level1
- fence_map['level2'] = level2
- fence_map['shared1'] = shared1
- fence_map['shared2'] = shared2
-
- major_num = 1
- minor_num = 100
-
- try:
- nodename = request['nodename']
- except:
- try:
- nodename = request.form['nodename']
- except:
- luci_log.debug_verbose('getFenceInfo2: unable to extract nodename: %s' \
- % str(e))
- return {}
-
- #Here we need to get fences for a node - just the first two levels
- #Each level has its own list of fence devs used in that level
- #For each fence dev, a list of instance structs is appended
- #In addition, for each level, a list of available but unused fence devs
- #is returned.
- try:
- node = model.retrieveNodeByName(nodename)
- except GeneralError, e:
- luci_log.debug_verbose('getFenceInfo3: unabel to find node name %s in current node list' % (str(nodename), str(e)))
- return {}
-
- fds = model.getFenceDevices()
-
- levels = node.getFenceLevels()
- len_levels = len(levels)
-
- if len_levels == 0:
- return fence_map
-
- if len_levels >= 1:
- first_level = levels[0]
- kids = first_level.getChildren()
- last_kid_fd = None #This is a marker for allowing multi instances
- #beneath a fencedev
- for kid in kids:
- instance_name = kid.getName().strip()
- try:
- fd = getFDForInstance(fds, instance_name)
- except:
- fd = None #Set to None in case last time thru loop
- continue
-
- if fd is not None:
- if fd.isShared() == False: #Not a shared dev...build struct and add
- fencedev = {}
- try:
- fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
- except:
- fencedev['unknown'] = True
- fencedev['prettyname'] = fd.getAgentType()
- fencedev['isShared'] = False
- fencedev['id'] = str(major_num)
- major_num = major_num + 1
- devattrs = fd.getAttributes()
- kees = devattrs.keys()
- for kee in kees:
- fencedev[kee] = devattrs[kee]
- kidattrs = kid.getAttributes()
- kees = kidattrs.keys()
- for kee in kees:
- if kee == "name":
- continue #Don't duplicate name attr
- fencedev[kee] = kidattrs[kee]
- #This fencedev struct is complete, and needs to be placed on the
- #level1 Q. Because it is non-shared, we should set last_kid_fd
- #to none.
- last_kid_fd = None
- level1.append(fencedev)
- else: #This dev is shared
- if (last_kid_fd is not None) and (fd.getName().strip() == last_kid_fd['name'].strip()): #just append a new instance struct to last_kid_fd
- instance_struct = {}
- instance_struct['id'] = str(minor_num)
- minor_num = minor_num + 1
- kidattrs = kid.getAttributes()
- kees = kidattrs.keys()
- for kee in kees:
- if kee == "name":
- continue
- instance_struct[kee] = kidattrs[kee]
- #Now just add this struct to last_kid_fd and reset last_kid_fd
- ilist = last_kid_fd['instance_list']
- ilist.append(instance_struct)
- #last_kid_fd = fd
- continue
- else: #Shared, but not used above...so we need a new fencedev struct
- fencedev = {}
- try:
- fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
- except:
- fencedev['unknown'] = True
- fencedev['prettyname'] = fd.getAgentType()
- fencedev['isShared'] = True
- fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
- % (baseurl, clustername, fd.getName().strip(), FENCEDEV)
- fencedev['id'] = str(major_num)
- major_num = major_num + 1
- inlist = list()
- fencedev['instance_list'] = inlist
- devattrs = fd.getAttributes()
- kees = devattrs.keys()
- for kee in kees:
- fencedev[kee] = devattrs[kee]
- instance_struct = {}
- kidattrs = kid.getAttributes()
- kees = kidattrs.keys()
- for kee in kees:
- if kee == "name":
- continue
- instance_struct[kee] = kidattrs[kee]
- inlist.append(instance_struct)
- level1.append(fencedev)
- last_kid_fd = fencedev
- continue
- fence_map['level1'] = level1
-
- #level1 list is complete now, but it is still necessary to build shared1
- for fd in fds:
- isUnique = True
- if fd.isShared() == False:
- continue
- for fdev in level1:
- if fd.getName().strip() == fdev['name']:
- isUnique = False
- break
- if isUnique == True:
- shared_struct = {}
- shared_struct['name'] = fd.getName().strip()
- agentname = fd.getAgentType()
- shared_struct['agent'] = agentname
- try:
- shared_struct['prettyname'] = FENCE_OPTS[agentname]
- except:
- shared_struct['unknown'] = True
- shared_struct['prettyname'] = agentname
- shared1.append(shared_struct)
- fence_map['shared1'] = shared1
-
- #YUK: This next section violates the DRY rule, :-(
- if len_levels >= 2:
- second_level = levels[1]
- kids = second_level.getChildren()
- last_kid_fd = None #This is a marker for allowing multi instances
- #beneath a fencedev
- for kid in kids:
- instance_name = kid.getName().strip()
- try:
- fd = getFDForInstance(fds, instance_name)
- except:
- fd = None #Set to None in case last time thru loop
- continue
- if fd is not None:
- if fd.isShared() == False: #Not a shared dev...build struct and add
- fencedev = {}
- try:
- fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
- except:
- fencedev['unknown'] = True
- fencedev['prettyname'] = fd.getAgentType()
- fencedev['isShared'] = False
- fencedev['id'] = str(major_num)
- major_num = major_num + 1
- devattrs = fd.getAttributes()
- kees = devattrs.keys()
- for kee in kees:
- fencedev[kee] = devattrs[kee]
- kidattrs = kid.getAttributes()
- kees = kidattrs.keys()
- for kee in kees:
- if kee == "name":
- continue #Don't duplicate name attr
- fencedev[kee] = kidattrs[kee]
- #This fencedev struct is complete, and needs to be placed on the
- #level2 Q. Because it is non-shared, we should set last_kid_fd
- #to none.
- last_kid_fd = None
- level2.append(fencedev)
- else: #This dev is shared
- if (last_kid_fd is not None) and (fd.getName().strip() == last_kid_fd['name'].strip()): #just append a new instance struct to last_kid_fd
- instance_struct = {}
- instance_struct['id'] = str(minor_num)
- minor_num = minor_num + 1
- kidattrs = kid.getAttributes()
- kees = kidattrs.keys()
- for kee in kees:
- if kee == "name":
- continue
- instance_struct[kee] = kidattrs[kee]
- #Now just add this struct to last_kid_fd and reset last_kid_fd
- ilist = last_kid_fd['instance_list']
- ilist.append(instance_struct)
- #last_kid_fd = fd
- continue
- else: #Shared, but not used above...so we need a new fencedev struct
- fencedev = {}
- try:
- fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
- except:
- fencedev['unknown'] = True
- fencedev['prettyname'] = fd.getAgentType()
- fencedev['isShared'] = True
- fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
- % (baseurl, clustername, fd.getName().strip(), FENCEDEV)
- fencedev['id'] = str(major_num)
- major_num = major_num + 1
- inlist = list()
- fencedev['instance_list'] = inlist
- devattrs = fd.getAttributes()
- kees = devattrs.keys()
- for kee in kees:
- fencedev[kee] = devattrs[kee]
- instance_struct = {}
- kidattrs = kid.getAttributes()
- kees = kidattrs.keys()
- for kee in kees:
- if kee == "name":
- continue
- instance_struct[kee] = kidattrs[kee]
- inlist.append(instance_struct)
- level2.append(fencedev)
- last_kid_fd = fencedev
- continue
- fence_map['level2'] = level2
-
- #level2 list is complete but like above, we need to build shared2
- for fd in fds:
- isUnique = True
- if fd.isShared() == False:
- continue
- for fdev in level2:
- if fd.getName().strip() == fdev['name']:
- isUnique = False
- break
- if isUnique == True:
- shared_struct = {}
- shared_struct['name'] = fd.getName().strip()
- agentname = fd.getAgentType()
- shared_struct['agent'] = agentname
- try:
- shared_struct['prettyname'] = FENCE_OPTS[agentname]
- except:
- shared_struct['unknown'] = True
- shared_struct['prettyname'] = agentname
- shared2.append(shared_struct)
- fence_map['shared2'] = shared2
-
- return fence_map
-
-def getFencesInfo(self, model, request):
- fences_map = {}
- if not model:
- luci_log.debug_verbose('getFencesInfo0: model is None')
- fences_map['fencedevs'] = list()
- return fences_map
-
- clustername = request['clustername']
- baseurl = request['URL']
- fencedevs = list() #This is for the fencedev list page
-
- #Get list of fence devices
- fds = model.getFenceDevices()
- for fd in fds:
- #This section determines which nodes use the dev
- #create fencedev hashmap
- nodes_used = list()
-
- if fd.isShared() == True:
- fencedev = {}
- attr_hash = fd.getAttributes()
- kees = attr_hash.keys()
-
- for kee in kees:
- fencedev[kee] = attr_hash[kee] #copy attrs over
- try:
- fencedev['pretty_name'] = FENCE_OPTS[fd.getAgentType()]
- except:
- fencedev['unknown'] = True
- fencedev['pretty_name'] = fd.getAgentType()
-
- fencedev['agent'] = fd.getAgentType()
- #Add config url for this fencedev
- fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
- % (baseurl, clustername, fd.getName().strip(), FENCEDEV)
-
- nodes = model.getNodes()
- for node in nodes:
- flevels = node.getFenceLevels()
- for flevel in flevels: #These are the method blocks...
- kids = flevel.getChildren()
- for kid in kids: #These are actual devices in each level
- if kid.getName().strip() == fd.getName().strip():
- #See if this fd already has an entry for this node
- found_duplicate = False
- for item in nodes_used:
- if item['nodename'] == node.getName().strip():
- found_duplicate = True
- if found_duplicate == True:
- continue
- node_hash = {}
- cur_nodename = node.getName().strip()
- node_hash['nodename'] = cur_nodename
- node_hash['nodeurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
- % (baseurl, clustername, cur_nodename, NODE)
- nodes_used.append(node_hash)
-
- fencedev['nodesused'] = nodes_used
- fencedevs.append(fencedev)
-
- fences_map['fencedevs'] = fencedevs
- return fences_map
-
def getLogsForNode(self, request):
try:
nodename = request['nodename']
@@ -5416,43 +3956,6 @@
return rq.getNodeLogs(rc)
-def getVMInfo(self, model, request):
- vm_map = {}
-
- try:
- clustername = request['clustername']
- except Exception, e:
- try:
- clustername = model.getName()
- except:
- return vm_map
-
- svcname = None
- try:
- svcname = request['servicename']
- except Exception, e:
- try:
- vmname = request.form['servicename']
- except Exception, e:
- return vm_map
-
- vm_map['formurl'] = '%s?clustername=%s&pagetype=29&servicename=%s' \
- % (request['URL'], clustername, svcname)
-
- try:
- vm = model.retrieveVMsByName(vmname)
- except:
- luci_log.debug('An error occurred while attempting to get VM %s' \
- % vmname)
- return vm_map
-
- attrs = vm.getAttributes()
- keys = attrs.keys()
- for key in keys:
- vm_map[key] = attrs[key]
-
- return vm_map
-
def isClusterBusy(self, req):
items = None
busy_map = {}
@@ -5784,92 +4287,6 @@
clu_map['isVirtualized'] = False
return clu_map
-def getResourcesInfo(model, request):
- resList = list()
- baseurl = request['URL']
-
- try:
- cluname = request['clustername']
- except:
- try:
- cluname = request.form['clustername']
- except:
- luci_log.debug_verbose('getResourcesInfo missing cluster name')
- return resList
-
- for item in model.getResources():
- itemmap = {}
- cur_itemname = item.getName().strip()
- itemmap['name'] = cur_itemname
- itemmap['attrs'] = item.attr_hash
- itemmap['type'] = item.resource_type
- itemmap['tag_name'] = item.TAG_NAME
- itemmap['cfgurl'] = '%s?clustername=%s&resourcename=%s&pagetype=%s' \
- % (baseurl, cluname, cur_itemname, RESOURCE_CONFIG)
- itemmap['url'] = '%s?clustername=%s&resourcename=%s&pagetype=%s' \
- % (baseurl, cluname, cur_itemname, RESOURCE)
- itemmap['delurl'] = '%s?clustername=%s&resourcename=%s&pagetype=%s' \
- % (baseurl, cluname, cur_itemname, RESOURCE_REMOVE)
- resList.append(itemmap)
- return resList
-
-def getResourceInfo(model, request):
- if not model:
- luci_log.debug_verbose('GRI0: no model object in session')
- return {}
-
- name = None
- try:
- name = request['resourcename']
- except:
- try:
- name = request.form['resourcename']
- except:
- pass
-
- if name is None:
- try:
- res_type = request.form['type']
- if res_type == 'ip':
- name = request.form['value'].strip()
- except:
- pass
-
- if name is None:
- luci_log.debug_verbose('getResourceInfo missing res name')
- return {}
-
- try:
- cluname = request['clustername']
- except:
- try:
- cluname = request.form['clustername']
- except:
- luci_log.debug_verbose('getResourceInfo missing cluster name')
- return {}
-
- try:
- baseurl = request['URL']
- except:
- luci_log.debug_verbose('getResourceInfo missing URL')
- return {}
-
- for res in model.getResources():
- if res.getName() == name:
- try:
- resMap = {}
- cur_resname = res.getName().strip()
- resMap['name'] = cur_resname
- resMap['type'] = res.resource_type
- resMap['tag_name'] = res.TAG_NAME
- resMap['attrs'] = res.attr_hash
- resMap['cfgurl'] = '%s?clustername=%s&resourcename=%s&pagetype=%s' \
- % (baseurl, cluname, cur_resname, RESOURCE_CONFIG)
- return resMap
- except:
- continue
- return {}
-
def delService(self, request):
errstr = 'An error occurred while attempting to set the new cluster.conf'
@@ -6089,22 +4506,12 @@
response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true'
% (request['URL'], RESOURCES, clustername))
-def getResource(model, name):
- resPtr = model.getResourcesPtr()
- resources = resPtr.getChildren()
-
- for res in resources:
- if res.getName() == name:
- return res
-
- luci_log.debug_verbose('getResource: unable to find resource "%s"' % name)
- raise KeyError, name
def appendModel(request, model):
try:
request.SESSION.set('model', model)
- except:
- luci_log.debug_verbose('Appending model to request failed')
+ except Exception, e:
+ luci_log.debug_verbose('Appending model to request failed: %r' % e)
return 'An error occurred while storing the cluster model.'
def getModelBuilder(self, rc, isVirtualized):
@@ -6113,21 +4520,21 @@
if not cluster_conf_node:
raise Exception, 'getClusterConf returned None'
except Exception, e:
- luci_log.debug_verbose('GMB0: unable to get cluster_conf_node in getModelBuilder: %s' % str(e))
+ luci_log.debug_verbose('GMB0: unable to get cluster_conf_node in getModelBuilder: %r' % e)
return None
try:
model = ModelBuilder(0, None, None, cluster_conf_node)
if not model:
- raise Exception, 'ModelBuilder returned None'
+ raise Exception, 'ModelBuilder() returned None'
except Exception, e:
try:
- luci_log.debug_verbose('GMB1: An error occurred while trying to get model for conf "%s": %s' % (cluster_conf_node.toxml(), str(e)))
+ luci_log.debug_verbose('GMB1: An error occurred while trying to get model for conf "%r": %r' % (cluster_conf_node.toxml(), e))
except:
luci_log.debug_verbose('GMB1: ModelBuilder failed')
+ return None
- if model:
- model.setIsVirtualized(isVirtualized)
+ model.setIsVirtualized(isVirtualized)
return model
def getModelForCluster(self, clustername):
@@ -6142,8 +4549,8 @@
if not model:
raise Exception, 'model is none'
except Exception, e:
- luci_log.debug_verbose('GMFC1: unable to get model builder for %s: %s' \
- % (clustername, str(e)))
+ luci_log.debug_verbose('GMFC1: unable to get model builder for %s: %r' \
+ % (clustername, e))
return None
return model
More information about the Cluster-devel
mailing list