[Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
rmccabe at sourceware.org
rmccabe at sourceware.org
Mon Sep 25 22:59:16 UTC 2006
CVSROOT: /cvs/cluster
Module name: conga
Changes by: rmccabe at sourceware.org 2006-09-25 22:59:15
Modified files:
luci/site/luci/Extensions: cluster_adapters.py
homebase_adapters.py ricci_bridge.py
Log message:
add node stuff
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.69&r2=1.70
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&r1=1.20&r2=1.21
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.18&r2=1.19
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2006/09/25 21:00:14 1.69
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2006/09/25 22:59:15 1.70
@@ -23,7 +23,7 @@
#then only display chooser if the current user has
#permissions on at least one. If the user is admin, show ALL clusters
-from homebase_adapters import nodeAuth, nodeUnauth, manageCluster
+from homebase_adapters import nodeAuth, nodeUnauth, manageCluster, createClusterSystems
CLUSTER_FOLDER_PATH = '/luci/systems/cluster/'
@@ -151,6 +151,7 @@
cluster_properties['isComplete'] = False
errors.append(error)
return (False, {'errors': errors, 'requestResults':cluster_properties })
+
batch_id_map = {}
for i in nodeList:
try:
@@ -189,7 +190,8 @@
def validateAddClusterNode(self, request):
errors = list()
- messages = list()
+ messages = list()
+ requestResults = {}
try:
sessionData = request.SESSION.get('checkRet')
@@ -199,7 +201,7 @@
if 'clusterName' in request.form:
clusterName = request.form['clusterName']
else:
- return (False, {'errors': [ 'Cluster name is missing'] })
+ return (False, {'errors': [ 'Cluster name is missing'], 'requestResults': requestResults })
try:
numStorage = int(request.form['numStorage'])
@@ -207,7 +209,7 @@
raise
except:
errors.append('You must specify at least one node to add to the cluster')
- return (False, {'errors': [ errors ] })
+ return (False, {'errors': [ errors ], 'requestResults': requestResults })
ret = validateClusterNodes(request, sessionData, clusterName, numStorage)
errors.extend(ret[0])
@@ -225,18 +227,43 @@
i = 0
while i < len(nodeList):
+ clunode = nodeList[i]
try:
- x = 0 # ricci call succeeds
- messages.append('Cluster join initiated for host \"' + i['ricci_host'] + '\"')
+ batchNode = addClusterNodeBatch(clusterName, True, False, False)
+ if not batchNode:
+ raise
del nodeList[i]
except:
- i['errors'] = True
- errors.append('Unable to initiate node creation for host \"' + i['ricci_host'] + '\"')
- cluster_properties['isComplete'] = 0
+ clunode['errors'] = True
+ nodeUnauth(nodeList)
+ cluster_properties['isComplete'] = False
+ errors.append('Unable to initiate node creation for host \"' + clunode['ricci_host'] + '\"')
if not cluster_properties['isComplete']:
return (False, {'errors': errors, 'requestResults': cluster_properties})
+ error = createClusterSystems(self, clusterName, nodeList)
+ if error:
+ nodeUnauth(nodeList)
+ cluster_properties['isComplete'] = False
+ errors.append(error)
+ return (False, {'errors': errors, 'requestResults': cluster_properties})
+
+ batch_id_map = {}
+ for i in nodeList:
+ clunode = nodeList[i]
+ try:
+ rc = RicciCommunicator(clunode['ricci_host'])
+ resultNode = rc.process_batch(batchNode, async=True)
+ batch_id_map[clunode['ricci_host']] = resultNode.getAttribute('batch_id')
+ messages.append('Cluster join initiated for host \"' + clunode['ricci_host'] + '\"')
+ except:
+ nodeUnauth(nodeList)
+ cluster_properties['isComplete'] = False
+ errors.append('An error occurred while attempting to add cluster node \"' + clunode['ricci_host'] + '\"')
+ return (False, {'errors': errors, 'requestResults': cluster_properties})
+
+ buildClusterCreateFlags(self, batch_id_map, clusterName)
return (True, {'errors': errors, 'messages': messages})
formValidators = {
--- conga/luci/site/luci/Extensions/homebase_adapters.py 2006/08/01 16:27:53 1.20
+++ conga/luci/site/luci/Extensions/homebase_adapters.py 2006/09/25 22:59:15 1.21
@@ -986,6 +986,92 @@
newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
except: pass
+def createClusterSystems(self, clusterName, nodeList):
+ try:
+ clusterObj = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
+ if not clusterObj:
+ raise
+ except:
+ nodeUnauth(nodeList)
+ return 'No cluster named \"' + clusterName + '\" is managed by Luci'
+
+ for i in nodeList:
+ if 'ricci_host' in i:
+ host = str(i['ricci_host'])
+ else:
+ host = str(i['host'])
+
+ try:
+ clusterObj.manage_addFolder(host, '__luci__:csystem:' + clusterName)
+ newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName + '/' + host)
+ if not newSystem:
+ raise
+ newSystem.manage_acquiredPermissions([])
+ newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+ except:
+ nodeUnauth(nodeList)
+ return 'Unable to create cluster node \"' + host + '\" for cluster \"' + clusterName + '\"'
+
+ try:
+ ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+ if not ssystem:
+ raise
+ except:
+ return
+
+ # Only add storage systems if the and cluster node DB
+ # objects were added successfully.
+ for i in nodeList:
+ if 'ricci_host' in i:
+ host = str(i['ricci_host'])
+ else:
+ host = str(i['host'])
+
+ try:
+ # It's already there, as a storage system, no problem.
+ exists = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+ continue
+ except: pass
+
+ try:
+ ssystem.manage_addFolder(host, '__luci__:system')
+ newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/' + host)
+ newSystem.manage_acquiredPermissions([])
+ newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+ except: pass
+
+def delSystem(self, systemName):
+ try:
+ ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
+ except:
+ return 'Unable to find storage system \"' + systemName + '\"'
+
+ try:
+ rc = RicciCommunicator(systemName)
+ if not rc:
+ raise
+ except:
+ return 'Unable to connect to the ricci agent on \"' + systemName + '\" to unauthenticate'
+
+ # Only unauthenticate if the system isn't a member of
+ # a managed cluster.
+ cluster_info = rc.cluster_info()
+ if not cluster_info[0]:
+ try: rc.unauth()
+ except: pass
+ else:
+ try:
+ newSystem = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + cluster_info[0] + '/' + rc.system_name())
+ except:
+ try: rc.unauth()
+ except: pass
+
+ try:
+ ssystem.manage_delObjects([systemName])
+ except:
+ return 'Unable to delete storage system \"' + systemName + '\"'
+
+
def delSystem(self, systemName):
try:
ssystem = self.restrictedTraverse(PLONE_ROOT + '/systems/storage/')
--- conga/luci/site/luci/Extensions/ricci_bridge.py 2006/09/23 04:04:08 1.18
+++ conga/luci/site/luci/Extensions/ricci_bridge.py 2006/09/25 22:59:15 1.19
@@ -594,6 +594,57 @@
#parse out log entry
return payload
+def addClusterNodeBatch(cluster_name, services, shared_storage, LVS):
+ batch = '<?xml version="1.0" ?>'
+ batch += '<batch>'
+ batch += '<module name="rpm">'
+ batch += '<request API_version="1.0">'
+ batch += '<function_call name="install">'
+ batch += '<var name="sets" type="list_xml">'
+ batch += '<set name="Cluster Base"/>'
+ if services:
+ batch += '<set name="Cluster Service Manager"/>'
+ if shared_storage:
+ batch += '<set name="Clustered Storage"/>'
+ if LVS:
+ batch += '<set name="Linux Virtual Server"/>'
+ batch += '</var>'
+ batch += '</function_call>'
+ batch += '</request>'
+ batch += '</module>'
+
+ batch += '<module name="reboot">'
+ batch += '<request API_version="1.0">'
+ batch += '<function_call name="reboot_now"/>'
+ batch += '</request>'
+ batch += '</module>'
+
+ batch += '<module name="cluster">'
+ batch += '<request API_version="1.0">'
+ batch += '<function_call name="set_cluster.conf">'
+ batch += '<var mutable="false" name="propagate" type="boolean" value="false"/>'
+ batch += '<var mutable="false" name="cluster.conf" type="xml">'
+ batch += '<cluster config_version="1" name="' + cluster_name + '">'
+ batch += '<fence_daemon post_fail_delay="0" post_join_delay="3"/>'
+ batch += '<clusternodes/>'
+ batch += '<cman/>'
+ batch += '<fencedevices/>'
+ batch += '<rm/>'
+ batch += '</cluster>'
+ batch += '</var>'
+ batch += '</function_call>'
+ batch += '</request>'
+ batch += '</module>'
+
+ batch += '<module name="cluster">'
+ batch += '<request API_version="1.0">'
+ batch += '<function_call name="start_node"/>'
+ batch += '</request>'
+ batch += '</module>'
+ batch += '</batch>'
+
+ return minidom.parseString(batch).firstChild
+
def createClusterBatch(cluster_name, cluster_alias, nodeList, services, shared_storage, LVS):
batch = '<?xml version="1.0" ?>'
batch += '<batch>'
More information about the Cluster-devel
mailing list