[Cluster-devel] conga/luci cluster/form-macros site/luci/Exten ...

rmccabe at sourceware.org rmccabe at sourceware.org
Sun Nov 12 02:10:54 UTC 2006


CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-11-12 02:10:53

Modified files:
	luci/cluster   : form-macros 
	luci/site/luci/Extensions: LuciSyslog.py cluster_adapters.py 
	                           conga_constants.py ricci_bridge.py 
	                           ricci_communicator.py 

Log message:
	fix for bz# 213266

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.103&r2=1.104
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciSyslog.py.diff?cvsroot=cluster&r1=1.9&r2=1.10
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.161&r2=1.162
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.24&r2=1.25
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_bridge.py.diff?cvsroot=cluster&r1=1.41&r2=1.42
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ricci_communicator.py.diff?cvsroot=cluster&r1=1.18&r2=1.19

--- conga/luci/cluster/form-macros	2006/11/10 19:44:57	1.103
+++ conga/luci/cluster/form-macros	2006/11/12 02:10:52	1.104
@@ -25,26 +25,33 @@
       </span>
       <span tal:condition="python: 'isnodecreation' in nodereport and nodereport['isnodecreation'] == True">
        <span tal:condition="python: nodereport['iserror'] == True">
-			  <h2><span tal:content="nodereport/desc" /></h2>
-         <font color="red"><span tal:content="nodereport/errormessage"/></font>
+		<h2><span tal:content="nodereport/desc" /></h2>
+		<span class="errmsg" tal:content="nodereport/errormessage"/>
        </span>
+
        <span tal:condition="python: nodereport['iserror'] == False">
-			  <h2><span tal:content="nodereport/desc" /></h2>
-         <i><span tal:content="nodereport/statusmessage"/></i><br/>
-          <span tal:condition="python: nodereport['statusindex'] == 0">
+		<h2><span tal:content="nodereport/desc" /></h2>
+		<em tal:content="nodereport/statusmessage | nothing"/><br/>
+          <span tal:condition="python: nodereport['statusindex'] < 1">
            <img src="notstarted.png"/>
           </span>
-          <span tal:condition="python: nodereport['statusindex'] == 1">
-           <img src="installed.png"/>
-          </span>
-          <span tal:condition="python: nodereport['statusindex'] == 2">
-           <img src="rebooted.png"/>
+
+          <span tal:condition="
+			python: nodereport['statusindex'] == 1 or nodereport['statusindex'] == 2">
+           <img src="installed.png" alt="[cluster software installed]" />
           </span>
+
           <span tal:condition="python: nodereport['statusindex'] == 3">
-           <img src="configured.png"/>
+           <img src="rebooted.png" alt="[cluster node rebooted]" />
+          </span>
+
+          <span tal:condition="
+				python: nodereport['statusindex'] == 4 or nodereport['statusindex'] == 5">
+           <img src="configured.png" alt="[cluster node configured]" />
           </span>
-          <span tal:condition="python: nodereport['statusindex'] == 4">
-           <img src="joined.png"/>
+
+          <span tal:condition="python: nodereport['statusindex'] == 6">
+           <img src="joined.png" alt="[cluster node joined cluster]" />
           </span>
        </span>
       </span>
@@ -378,6 +385,7 @@
 	<tal:block
 		tal:define="global clusterinfo python: here.getClusterInfo(modelb, request)" />
 
+<tal:block tal:condition="clusterinfo">
 	<span tal:omit-tag="" tal:define="global configTabNum python: 'tab' in request and int(request['tab']) or 1" />
 
 	<ul class="configTab">
@@ -439,7 +447,7 @@
 					<td class="systemsTable">Cluster Name</td>
 					<td class="systemsTable">
 						<input type="text" name="cluname"
-							tal:attributes="value clusterinfo/clustername"/>
+							tal:attributes="value clusterinfo/clustername" />
 					</td>
 				</tr>
 				<tr class="systemsTable">
@@ -1082,6 +1090,7 @@
 		</script>
 		</form>
 	</div>
+</tal:block>
 </div>
 
 <div metal:define-macro="clusterprocess-form">
@@ -2117,7 +2126,10 @@
 <div metal:define-macro="nodeprocess-form">
 	<tal:block
 		tal:define="result python: here.nodeTaskProcess(modelb, request)"/>
-	<h2>Node Process Form</h2>
+
+	<div>
+		<span tal:replace="result | nothing" />
+	</div>
 </div>
 
 <div metal:define-macro="services-form">
--- conga/luci/site/luci/Extensions/LuciSyslog.py	2006/11/06 20:21:04	1.9
+++ conga/luci/site/luci/Extensions/LuciSyslog.py	2006/11/12 02:10:53	1.10
@@ -3,14 +3,12 @@
 		LOG_DAEMON, LOG_PID, LOG_NDELAY, LOG_INFO, \
 		LOG_WARNING, LOG_AUTH, LOG_DEBUG
 
-"""Exception class for the LuciSyslog facility
-"""
+# Exception class for the LuciSyslog facility
 class LuciSyslogError(Exception):
 	def __init__(self, msg):
 		Exception.__init__(self, msg)
 
-"""Facility that provides centralized syslog(3) functionality for luci
-"""
+# Facility that provides centralized syslog(3) functionality for luci
 class LuciSyslog:
 	def __init__(self):
 		self.__init = 0
@@ -50,11 +48,24 @@
 	def debug_verbose(self, msg):
 		if not LUCI_DEBUG_MODE or LUCI_DEBUG_VERBOSITY < 2 or not self.__init:
 			return
-		try:
-			syslog(LOG_DEBUG, msg)
-		except:
-			pass
-			#raise LuciSyslogError, 'syslog debug call failed'
+
+		msg_len = len(msg)
+		if msg_len < 1:
+			return
+
+		while True:
+			cur_len = min(msg_len, 800)
+			cur_msg = msg[:cur_len]
+			try:
+				syslog(LOG_DEBUG, cur_msg)
+			except:
+				pass
+
+			msg_len -= cur_len
+			if msg_len > 0:
+				msg = msg[cur_len:]
+			else:
+				break
 
 	def debug(self, msg):
 		if not LUCI_DEBUG_MODE or not self.__init:
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/10 19:44:57	1.161
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/11/12 02:10:53	1.162
@@ -339,7 +339,8 @@
 	while i < len(nodeList):
 		clunode = nodeList[i]
 		try:
-			batchNode = addClusterNodeBatch(clusterName,
+			batchNode = addClusterNodeBatch(clunode['os'],
+							clusterName,
 							True,
 							True,
 							enable_storage,
@@ -370,8 +371,8 @@
 		success = True
 		try:
 			rc = RicciCommunicator(clunode['host'])
-		except:
-			luci_log.info('Unable to connect to the ricci daemon on host ' + clunode['host'])
+		except Exception, e:
+			luci_log.info('Unable to connect to the ricci daemon on host %s: %s'% (clunode['host'], str(e)))
 			success = False
 
 		if success:
@@ -995,6 +996,9 @@
 def createCluConfigTree(self, request, model):
   dummynode = {}
 
+  if not model:
+    return {}
+
   #There should be a positive page type
   try:
     pagetype = request[PAGETYPE]
@@ -1418,6 +1422,8 @@
   return model.getClusterName()
 
 def getClusterAlias(self, model):
+  if not model:
+    return ''
   alias = model.getClusterAlias()
   if alias is None:
     return model.getClusterName()
@@ -1539,7 +1545,21 @@
 		except Exception, e:
 			luci_log.debug('GRA4: cluster_info error: %s' % str(e))
 
-		if cluname != lower(clu_info[0]) and cluname != lower(clu_info[1]):
+		try:
+			cur_name = str(clu_info[0]).strip().lower()
+			if not cur_name:
+				raise
+		except:
+			cur_name = None
+
+		try:
+			cur_alias = str(clu_info[1]).strip().lower()
+			if not cur_alias:
+				raise
+		except:
+			cur_alias = None
+			
+		if (cur_name is not None and cluname != cur_name) and (cur_alias is not None and cluname != cur_alias):
 			try:
 				luci_log.debug('GRA5: %s reports it\'s in cluster %s:%s; we expect %s' \
 					 % (hostname, clu_info[0], clu_info[1], cluname))
@@ -1580,12 +1600,18 @@
 	return getRicciAgent(self, clustername)
 
 def getClusterStatus(self, rc):
-	doc = getClusterStatusBatch(rc)
+	try:
+		doc = getClusterStatusBatch(rc)
+	except Exception, e:
+		luci_log.debug_verbose('GCS0: error: %s' % str(e))
+		doc = None
+
 	if not doc:
 		try:
-			luci_log.debug_verbose('getClusterStatusBatch returned None for %s/%s' % rc.cluster_info())
+			luci_log.debug_verbose('GCS1: returned None for %s/%s' % rc.cluster_info())
 		except:
 			pass
+
 		return {}
 
 	results = list()
@@ -2031,7 +2057,7 @@
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		 % (request['URL'], NODES, model.getClusterName()))
+		% (request['URL'], NODES, model.getClusterName()))
 
 def getClusterInfo(self, model, req):
   try:
@@ -2061,7 +2087,7 @@
       except Exception, e2:
         luci_log.debug_verbose('GCI2 unable to set model in session: %s' % str(e2))
     except Exception, e:
-      luci_log.debug_verbose('GCI3: unable to get model for cluster %s: %s' % cluname, str(e))
+      luci_log.debug_verbose('GCI3: unable to get model for cluster %s: %s' % (cluname, str(e)))
       return {}
 
   prop_baseurl = req['URL'] + '?' + PAGETYPE + '=' + CLUSTER_CONFIG + '&' + CLUNAME + '=' + cluname + '&'
@@ -2639,34 +2665,34 @@
 			return None
 
 		response = request.RESPONSE
-		response.redirect(request['URL'] + "?pagetype=" + NODE_LIST + "&clustername=" + clustername + '&busyfirst=true')
+		response.redirect(request['URL'] + "?pagetype=" + NODES + "&clustername=" + clustername + '&busyfirst=true')
 	elif task == NODE_JOIN_CLUSTER:
 		if nodeJoin(self, rc, clustername, nodename_resolved) is None:
 			luci_log.debug_verbose('NTP: nodeJoin failed')
 			return None
 
 		response = request.RESPONSE
-		response.redirect(request['URL'] + "?pagetype=" + NODE_LIST + "&clustername=" + clustername + '&busyfirst=true')
+		response.redirect(request['URL'] + "?pagetype=" + NODES + "&clustername=" + clustername + '&busyfirst=true')
 	elif task == NODE_REBOOT:
 		if forceNodeReboot(self, rc, clustername, nodename_resolved) is None:
 			luci_log.debug_verbose('NTP: nodeReboot failed')
 			return None
 
 		response = request.RESPONSE
-		response.redirect(request['URL'] + "?pagetype=" + NODE_LIST + "&clustername=" + clustername + '&busyfirst=true')
+		response.redirect(request['URL'] + "?pagetype=" + NODES + "&clustername=" + clustername + '&busyfirst=true')
 	elif task == NODE_FENCE:
 		if forceNodeFence(self, clustername, nodename, nodename_resolved) is None:
 			luci_log.debug_verbose('NTP: nodeFencefailed')
 			return None
 
 		response = request.RESPONSE
-		response.redirect(request['URL'] + "?pagetype=" + NODE_LIST + "&clustername=" + clustername + '&busyfirst=true')
+		response.redirect(request['URL'] + "?pagetype=" + NODES + "&clustername=" + clustername + '&busyfirst=true')
 	elif task == NODE_DELETE:
 		if nodeDelete(self, rc, model, clustername, nodename, nodename_resolved) is None:
 			luci_log.debug_verbose('NTP: nodeDelete failed')
 			return None
 		response = request.RESPONSE
-		response.redirect(request['URL'] + "?pagetype=" + NODE_LIST + "&clustername=" + clustername + '&busyfirst=true')
+		response.redirect(request['URL'] + "?pagetype=" + NODES + "&clustername=" + clustername + '&busyfirst=true')
 
 def getNodeInfo(self, model, status, request):
   infohash = {}
@@ -3396,7 +3422,7 @@
           luci_log.debug_verbose('ICB6b: rc is none')
       except Exception, e:
         rc = None
-        luci_log.debug_verbose('ICB7: ricci returned error in iCB for %s: %s' \
+        luci_log.debug_verbose('ICB7: RC: %s: %s' \
           % (cluname, str(e)))
 
       batch_id = None
@@ -3410,7 +3436,8 @@
             luci_log.debug_verbose('ICB8B: failed to get batch_id from %s: %s' \
                 % (item[0], str(e)))
           except:
-            luci_log.debug_verbose('ICB8C: failed to get batch_id from %s' % item[0])
+            luci_log.debug_verbose('ICB8C: failed to get batch_id from %s' \
+              % item[0])
 
         if batch_id is not None:
           try:
@@ -3458,18 +3485,31 @@
           elif laststatus == 0:
             node_report['statusindex'] = 0
             node_report['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_INSTALL
+          elif laststatus == DISABLE_SVC_TASK:
+            node_report['statusindex'] = DISABLE_SVC_TASK
+            node_report['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_CFG
           elif laststatus == REBOOT_TASK:
             node_report['statusindex'] = REBOOT_TASK
             node_report['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_CFG
           elif laststatus == SEND_CONF:
             node_report['statusindex'] = SEND_CONF
             node_report['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_JOIN
+          elif laststatus == ENABLE_SVC_TASK:
+            node_report['statusindex'] = ENABLE_SVC_TASK
+            node_report['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + PRE_JOIN
+          else:
+            node_report['statusindex'] = 0
+            node_report['statusmessage'] = RICCI_CONNECT_FAILURE_MSG + ' Install is in an unknown state.'
           nodereports.append(node_report)
           continue
         elif creation_status == -(INSTALL_TASK):
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, INSTALL_TASK)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[INSTALL_TASK] + err_msg
+        elif creation_status == -(DISABLE_SVC_TASK):
+          node_report['iserror'] = True
+          (err_code, err_msg) = extract_module_status(batch_xml, DISABLE_SVC_TASK)
+          node_report['errormessage'] = CLUNODE_CREATE_ERRORS[DISABLE_SVC_TASK] + err_msg
         elif creation_status == -(REBOOT_TASK):
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, REBOOT_TASK)
@@ -3478,6 +3518,10 @@
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, SEND_CONF)
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[SEND_CONF] + err_msg
+        elif creation_status == -(ENABLE_SVC_TASK):
+          node_report['iserror'] = True
+          (err_code, err_msg) = extract_module_status(batch_xml, DISABLE_SVC_TASK)
+          node_report['errormessage'] = CLUNODE_CREATE_ERRORS[ENABLE_SVC_TASK] + err_msg
         elif creation_status == -(START_NODE):
           node_report['iserror'] = True
           (err_code, err_msg) = extract_module_status(batch_xml, START_NODE)
@@ -3485,7 +3529,13 @@
         else:
           node_report['iserror'] = True
           node_report['errormessage'] = CLUNODE_CREATE_ERRORS[0]
-        clusterfolder.manage_delObjects(item[0])
+
+        try:
+          clusterfolder.manage_delObjects(item[0])
+        except Exception, e:
+          luci_log.debug_verbose('ICB14: delObjects: %s: %s' \
+            % (item[0], str(e)))
+
         nodereports.append(node_report)
         continue
       else:  #either batch completed successfully, or still running
@@ -3497,7 +3547,7 @@
           try:
               clusterfolder.manage_delObjects(item[0])
           except Exception, e:
-              luci_log.info('ICB14: Unable to delete %s: %s' % (item[0], str(e)))
+              luci_log.info('ICB15: Unable to delete %s: %s' % (item[0], str(e)))
           continue
         else:
           map['busy'] = "true"
@@ -3507,8 +3557,12 @@
           nodereports.append(node_report)
           propslist = list()
           propslist.append(LAST_STATUS)
-          item[1].manage_delProperties(propslist)
-          item[1].manage_addProperty(LAST_STATUS, creation_status, "int")
+          try:
+            item[1].manage_delProperties(propslist)
+            item[1].manage_addProperty(LAST_STATUS, creation_status, "int")
+          except Exception, e:
+            luci_log.debug_verbose('ICB16: last_status err: %s %d: %s' \
+              % (item[0], creation_status, str(e)))
           continue
           
     else:
@@ -3548,6 +3602,7 @@
   if isBusy:
     part1 = req['ACTUAL_URL']
     part2 = req['QUERY_STRING']
+
     dex = part2.find("&busyfirst")
     if dex != (-1):
       tmpstr = part2[:dex] #This strips off busyfirst var
@@ -3555,7 +3610,6 @@
       ###FIXME - The above assumes that the 'busyfirst' query var is at the
       ###end of the URL...
     wholeurl = part1 + "?" + part2
-    #map['url'] = "5, url=" + req['ACTUAL_URL'] + "?" + req['QUERY_STRING']
     map['refreshurl'] = "5; url=" + wholeurl
     req['specialpagetype'] = "1"
   else:
@@ -3564,7 +3618,6 @@
       map['refreshurl'] = '5; url=' + req['ACTUAL_URL'] + '?' + query
     except:
       map['refreshurl'] = '5; url=/luci/cluster?pagetype=3'
-  luci_log.debug_verbose('ICB17: refreshurl is \"%s\"' % map['refreshurl'])
   return map
 
 def getClusterOS(self, rc):
--- conga/luci/site/luci/Extensions/conga_constants.py	2006/11/09 20:32:02	1.24
+++ conga/luci/site/luci/Extensions/conga_constants.py	2006/11/12 02:10:53	1.25
@@ -91,26 +91,36 @@
 NODE_UNKNOWN_STR="Unknown State"
 
 #cluster/node create batch task index
-INSTALL_TASK=1
-REBOOT_TASK=2
-SEND_CONF=3
-START_NODE=4
-RICCI_CONNECT_FAILURE=(-1000)
+INSTALL_TASK = 1
+DISABLE_SVC_TASK = 2
+REBOOT_TASK = 3
+SEND_CONF = 4
+ENABLE_SVC_TASK = 5
+START_NODE = 6
+RICCI_CONNECT_FAILURE = (-1000)
 
-RICCI_CONNECT_FAILURE_MSG="A problem was encountered connecting with this node.  "
+RICCI_CONNECT_FAILURE_MSG = "A problem was encountered connecting with this node.  "
 #cluster/node create error messages
-CLUNODE_CREATE_ERRORS = ["An unknown error occurred when creating this node: ", "A problem occurred when installing packages: ","A problem occurred when rebooting this node: ", "A problem occurred when propagating the configuration to this node: ", "A problem occurred when starting this node: "]
+CLUNODE_CREATE_ERRORS = [
+	"An unknown error occurred when creating this node: ",
+	"A problem occurred when installing packages: ",
+	"A problem occurred when disabling cluster services on this node: ",
+	"A problem occurred when rebooting this node: ",
+	"A problem occurred when propagating the configuration to this node: ",
+	"A problem occurred when enabling cluster services on this node: ",
+	"A problem occurred when starting this node: "
+]
 
 #cluster/node create error status messages
-PRE_INSTALL="The install state is not yet complete"
-PRE_REBOOT="Installation complete, but reboot not yet complete"
-PRE_CFG="Reboot stage successful, but configuration for the cluster is not yet distributed"
-PRE_JOIN="Packages are installed and configuration has been distributed, but the node has not yet joined the cluster."
+PRE_INSTALL = "The install state is not yet complete"
+PRE_REBOOT = "Installation complete, but reboot not yet complete"
+PRE_CFG = "Reboot stage successful, but configuration for the cluster is not yet distributed"
+PRE_JOIN = "Packages are installed and configuration has been distributed, but the node has not yet joined the cluster."
 
 
-POSSIBLE_REBOOT_MESSAGE="This node is not currently responding and is probably<br/>rebooting as planned. This state should persist for 5 minutes or so..."
+POSSIBLE_REBOOT_MESSAGE = "This node is not currently responding and is probably<br/>rebooting as planned. This state should persist for 5 minutes or so..."
 
-REDIRECT_MSG="  You will be redirected in 5 seconds. Please fasten your safety restraints."
+REDIRECT_MSG = " You will be redirected in 5 seconds. Please fasten your safety restraints."
 
 
 # Homebase-specific constants
@@ -128,7 +138,7 @@
 CLUSTER_NODE_NOT_MEMBER = 0x02
 CLUSTER_NODE_ADDED = 0x04
 
-PLONE_ROOT='luci'
+PLONE_ROOT = 'luci'
 
 LUCI_DEBUG_MODE = 1
 LUCI_DEBUG_VERBOSITY = 2
--- conga/luci/site/luci/Extensions/ricci_bridge.py	2006/11/06 23:55:23	1.41
+++ conga/luci/site/luci/Extensions/ricci_bridge.py	2006/11/12 02:10:53	1.42
@@ -28,7 +28,8 @@
 
 	return False
 
-def addClusterNodeBatch(cluster_name,
+def addClusterNodeBatch(os_str,
+						cluster_name,
 						install_base,
 						install_services,
 						install_shared_storage,
@@ -65,13 +66,31 @@
 		
 	need_reboot = install_base or install_services or install_shared_storage or install_LVS
 	if need_reboot:
+		batch += '<module name="service">'
+		batch += '<request API_version="1.0">'
+		batch += '<function_call name="disable">'
+		batch += '<var mutable="false" name="services" type="list_xml">'
+		if os_str == 'rhel4':
+			batch += '<service name="ccsd"/>'
+		batch += '<service name="cman"/>'
+		batch += '</var>'
+		batch += '</function_call>'
+		batch += '</request>'
+		batch += '</module>'
+
 		batch += '<module name="reboot">'
 		batch += '<request API_version="1.0">'
 		batch += '<function_call name="reboot_now"/>'
 		batch += '</request>'
 		batch += '</module>'
 	else:
-		# need placeholder instead of reboot
+		# need 2 placeholders instead of disable services / reboot
+		batch += '<module name="rpm">'
+		batch += '<request API_version="1.0">'
+		batch += '<function_call name="install"/>'
+		batch += '</request>'
+		batch += '</module>'
+
 		batch += '<module name="rpm">'
 		batch += '<request API_version="1.0">'
 		batch += '<function_call name="install"/>'
@@ -95,6 +114,26 @@
 	batch += '</request>'
 	batch += '</module>'
 
+	if need_reboot:
+		batch += '<module name="service">'
+		batch += '<request API_version="1.0">'
+		batch += '<function_call name="enable">'
+		batch += '<var mutable="false" name="services" type="list_xml">'
+		if os_str == 'rhel4':
+			batch += '<service name="ccsd"/>'
+		batch += '<service name="cman"/>'
+		batch += '</var>'
+		batch += '</function_call>'
+		batch += '</request>'
+		batch += '</module>'
+	else:
+		# placeholder instead of enable services
+		batch += '<module name="rpm">'
+		batch += '<request API_version="1.0">'
+		batch += '<function_call name="install"/>'
+		batch += '</request>'
+		batch += '</module>'
+
 	batch += '<module name="cluster">'
 	batch += '<request API_version="1.0">'
 	batch += '<function_call name="start_node"/>'
@@ -142,13 +181,31 @@
 
 	need_reboot = install_base or install_services or install_shared_storage or install_LVS
 	if need_reboot:
+		batch += '<module name="service">'
+		batch += '<request API_version="1.0">'
+		batch += '<function_call name="disable">'
+		batch += '<var mutable="false" name="services" type="list_xml">'
+		if os_str == 'rhel4':
+			batch += '<service name="ccsd"/>'
+		batch += '<service name="cman"/>'
+		batch += '</var>'
+		batch += '</function_call>'
+		batch += '</request>'
+		batch += '</module>'
+
 		batch += '<module name="reboot">'
 		batch += '<request API_version="1.0">'
 		batch += '<function_call name="reboot_now"/>'
 		batch += '</request>'
 		batch += '</module>'
 	else:
-		# need placeholder instead of reboot
+		# need 2 placeholders instead of disable services / reboot
+		batch += '<module name="rpm">'
+		batch += '<request API_version="1.0">'
+		batch += '<function_call name="install"/>'
+		batch += '</request>'
+		batch += '</module>'
+
 		batch += '<module name="rpm">'
 		batch += '<request API_version="1.0">'
 		batch += '<function_call name="install"/>'
@@ -188,6 +245,26 @@
 	batch += '</request>'
 	batch += '</module>'
 
+	if need_reboot:
+		batch += '<module name="service">'
+		batch += '<request API_version="1.0">'
+		batch += '<function_call name="enable">'
+		batch += '<var mutable="false" name="services" type="list_xml">'
+		if os_str == 'rhel4':
+			batch += '<service name="ccsd"/>'
+		batch += '<service name="cman"/>'
+		batch += '</var>'
+		batch += '</function_call>'
+		batch += '</request>'
+		batch += '</module>'
+	else:
+		# placeholder instead of enable services
+		batch += '<module name="rpm">'
+		batch += '<request API_version="1.0">'
+		batch += '<function_call name="install"/>'
+		batch += '</request>'
+		batch += '</module>'
+
 	batch += '<module name="cluster">'
 	batch += '<request API_version="1.0">'
 	batch += '<function_call name="start_node">'
@@ -301,7 +378,7 @@
 def getNodeLogs(rc):
 	errstr = 'log not accessible'
 
-	batch_str = '<module name="log"><request sequence="1254" API_version="1.0"><function_call name="get"><var mutable="false" name="age" type="int" value="18000"/><var mutable="false" name="tags" type="list_str"></var></function_call></request></module>'
+	batch_str = '<module name="log"><request API_version="1.0"><function_call name="get"><var mutable="false" name="age" type="int" value="18000"/><var mutable="false" name="tags" type="list_str"></var></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str, async=False)
 	if not ricci_xml:
@@ -350,7 +427,7 @@
 	return entry
 
 def nodeReboot(rc):
-	batch_str = '<module name="reboot"><request sequence="111" API_version="1.0"><function_call name="reboot_now"/></request></module>'
+	batch_str = '<module name="reboot"><request API_version="1.0"><function_call name="reboot_now"/></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
 	return batchAttemptResult(ricci_xml)
@@ -364,13 +441,13 @@
 	if purge == False:
 		purge_conf = 'false'
 
-	batch_str = '<module name="cluster"><request sequence="111" API_version="1.0"><function_call name="stop_node"><var mutable="false" name="cluster_shutdown" type="boolean" value="' + cshutdown + '"/><var mutable="false" name="purge_conf" type="boolean" value="' + purge_conf + '"/></function_call></request></module>'
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="stop_node"><var mutable="false" name="cluster_shutdown" type="boolean" value="' + cshutdown + '"/><var mutable="false" name="purge_conf" type="boolean" value="' + purge_conf + '"/></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
 	return batchAttemptResult(ricci_xml)
 
 def nodeFence(rc, nodename):
-	batch_str = '<module name="cluster"><request sequence="111" API_version="1.0"><function_call name="fence_node"><var mutable="false" name="nodename" type="string" value="' + nodename + '"/></function_call></request></module>'
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="fence_node"><var mutable="false" name="nodename" type="string" value="' + nodename + '"/></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
 	return batchAttemptResult(ricci_xml)
@@ -380,28 +457,28 @@
 	if cluster_startup == True:
 		cstartup = 'true'
 
-	batch_str = '<module name="cluster"><request sequence="111" API_version="1.0"><function_call name="start_node"><var mutable="false" name="cluster_startup" type="boolean" value="' + cstartup + '"/></function_call></request></module>'
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="start_node"><var mutable="false" name="cluster_startup" type="boolean" value="' + cstartup + '"/></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
 	return batchAttemptResult(ricci_xml)
 
 def startService(rc, servicename, preferrednode=None):
 	if preferrednode != None:
-		batch_str = '<module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/><var mutable="false" name="nodename" type="string" value=\"' + preferrednode + '\" /></function_call></request></module>'
+		batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/><var mutable="false" name="nodename" type="string" value=\"' + preferrednode + '\" /></function_call></request></module>'
 	else:
-		batch_str = '<module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
+		batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
 	return batchAttemptResult(ricci_xml)
 
 def restartService(rc, servicename):
-	batch_str = '<module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="restart_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="restart_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
 	return batchAttemptResult(ricci_xml)
 
 def stopService(rc, servicename):
-	batch_str = '<module name="cluster"><request sequence="1254" API_version="1.0"><function_call name="stop_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="stop_service"><var mutable="false" name="servicename" type="string" value=\"' + servicename + '\"/></function_call></request></module>'
 
 	ricci_xml = rc.batch_run(batch_str)
 	return batchAttemptResult(ricci_xml)
--- conga/luci/site/luci/Extensions/ricci_communicator.py	2006/11/06 23:55:23	1.18
+++ conga/luci/site/luci/Extensions/ricci_communicator.py	2006/11/12 02:10:53	1.19
@@ -34,7 +34,7 @@
             raise RicciError, 'Error connecting to %s:%d: unknown error' \
                     % (self.__hostname, self.__port)
 
-        luci_log.debug_verbose('Connected to %s:%d' \
+        luci_log.debug_verbose('RC:init0: Connected to %s:%d' \
             % (self.__hostname, self.__port))
         try:
             self.ss = ssl(sock, self.__privkey_file, self.__cert_file)
@@ -51,7 +51,7 @@
         # receive ricci header
         hello = self.__receive()
         try:
-            luci_log.debug_verbose('Received header from %s: \"%s\"' \
+            luci_log.debug_verbose('RC:init1: Received header from %s: \"%s\"' \
                 % (self.__hostname, hello.toxml()))
         except:
             pass
@@ -67,34 +67,34 @@
     
     
     def hostname(self):
-        luci_log.debug_verbose('[auth %d] reported hostname = %s' \
+        luci_log.debug_verbose('RC:hostname: [auth %d] reported hostname = %s' \
             % (self.__authed, self.__hostname))
         return self.__hostname
     def authed(self):
-        luci_log.debug_verbose('reported authed = %d for %s' \
+        luci_log.debug_verbose('RC:authed: reported authed = %d for %s' \
             % (self.__authed, self.__hostname))
         return self.__authed
     def system_name(self):
-        luci_log.debug_verbose('[auth %d] reported system_name = %s for %s' \
+        luci_log.debug_verbose('RC:system_name: [auth %d] reported system_name = %s for %s' \
             % (self.__authed, self.__reported_hostname, self.__hostname))
         return self.__reported_hostname
     def cluster_info(self):
-        luci_log.debug_verbose('[auth %d] reported cluster_info = (%s,%s) for %s' \
+        luci_log.debug_verbose('RC:cluster_info: [auth %d] reported cluster_info = (%s,%s) for %s' \
             % (self.__authed, self.__cluname, self.__clualias, self.__hostname))
         return (self.__cluname, self.__clualias)
     def os(self):
-        luci_log.debug_verbose('[auth %d] reported system_name = %s for %s' \
+        luci_log.debug_verbose('RC:os: [auth %d] reported system_name = %s for %s' \
             % (self.__authed, self.__os, self.__hostname))
         return self.__os
     def dom0(self):
-        luci_log.debug_verbose('[auth %d] reported system_name = %s for %s' \
+        luci_log.debug_verbose('RC:dom0: [auth %d] reported system_name = %s for %s' \
             % (self.__authed, self.__dom0, self.__hostname))
         return self.__dom0
     
     
     def auth(self, password):
         if self.authed():
-            luci_log.debug_verbose('already authenticated to %s' \
+            luci_log.debug_verbose('RC:auth0: already authenticated to %s' \
                 % self.__hostname)
             return True
         
@@ -111,7 +111,8 @@
         resp = self.__receive()
         self.__authed = resp.firstChild.getAttribute('authenticated') == 'true'
 
-        luci_log.debug_verbose('auth call returning %d' % self.__authed)
+        luci_log.debug_verbose('RC:auth1: auth call returning %d' \
+			% self.__authed)
         return self.__authed
 
 
@@ -124,26 +125,26 @@
         self.__send(doc)
         resp = self.__receive()
 
-        luci_log.debug_verbose('trying to unauthenticate to %s' \
+        luci_log.debug_verbose('RC:unauth0: trying to unauthenticate to %s' \
             % self.__hostname)
 
         try:
             ret = resp.firstChild.getAttribute('success')
-            luci_log.debug_verbose('unauthenticate returned %s for %s' \
+            luci_log.debug_verbose('RC:unauth1: unauthenticate returned %s for %s' \
                 % (ret, self.__hostname))
             if ret != '0':
                 raise Exception, 'Invalid response'
         except:
             errstr = 'Error authenticating to host %s: %s' \
                         % (self.__hostname, str(ret))
-            luci_log.debug(errstr)
+            luci_log.debug_verbose('RC:unauth2:' + errstr)
             raise RicciError, errstr
         return True
 
 
     def process_batch(self, batch_xml, async=False):
         try:
-            luci_log.debug_verbose('auth=%d to %s for batch %s [async=%d]' \
+            luci_log.debug_verbose('RC:PB0: [auth=%d] to %s for batch %s [async=%d]' \
                 % (self.__authed, self.__hostname, batch_xml.toxml(), async))
         except:
             pass
@@ -169,7 +170,7 @@
         try:
             self.__send(doc)
         except Exception, e:
-            luci_log.debug('Error sending XML \"%s\" to host %s' \
+            luci_log.debug_verbose('RC:PB1: Error sending XML \"%s\" to host %s' \
                 % (doc.toxml(), self.__hostname))
             raise RicciError, 'Error sending XML to host %s: %s' \
                     % (self.__hostname, str(e))
@@ -179,13 +180,13 @@
         # receive response
         doc = self.__receive()
         try:
-            luci_log.debug_verbose('received from %s XML \"%s\"' \
+            luci_log.debug_verbose('RC:PB2: received from %s XML \"%s\"' \
                 % (self.__hostname, doc.toxml()))
         except:
             pass
  
         if doc.firstChild.getAttribute('success') != '0':
-            luci_log.debug_verbose('batch command failed')
+            luci_log.debug_verbose('RC:PB3: batch command failed')
             raise RicciError, 'The last ricci command to host %s failed' \
                     % self.__hostname
         
@@ -195,7 +196,7 @@
                 if node.nodeName == 'batch':
                     batch_node = node.cloneNode(True)
         if batch_node == None:
-            luci_log.debug_verbose('batch node missing <batch/>')
+            luci_log.debug_verbose('RC:PB4: batch node missing <batch/>')
             raise RicciError, 'missing <batch/> in ricci\'s response from %s' \
                     % self.__hostname
 
@@ -204,23 +205,23 @@
     def batch_run(self, batch_str, async=True):
         try:
             batch_xml_str = '<?xml version="1.0" ?><batch>' + batch_str + '</batch>'
-            luci_log.debug_verbose('attempting batch \"%s\" for host %s' \
+            luci_log.debug_verbose('RC:BRun0: attempting batch \"%s\" for host %s' \
                 % (batch_xml_str, self.__hostname))
             batch_xml = minidom.parseString(batch_xml_str).firstChild
         except Exception, e:
-            luci_log.debug('received invalid batch XML for %s: \"%s\": %s' \
+            luci_log.debug_verbose('RC:BRun1: received invalid batch XML for %s: \"%s\": %s' \
                 % (self.__hostname, batch_xml_str, str(e)))
             raise RicciError, 'batch XML is malformed'
 
         try:
             ricci_xml = self.process_batch(batch_xml, async)
             try:
-                luci_log.debug_verbose('received XML \"%s\" from host %s in response to batch command.' \
+                luci_log.debug_verbose('RC:BRun2: received XML \"%s\" from host %s in response to batch command.' \
                     % (ricci_xml.toxml(), self.__hostname))
             except:
                 pass
         except:
-            luci_log.debug('An error occurred while trying to process the batch job: %s' % batch_xml_str)
+            luci_log.debug_verbose('RC:BRun3: An error occurred while trying to process the batch job: \"%s\"' % batch_xml_str)
             return None
 
         doc = minidom.Document()
@@ -228,7 +229,7 @@
         return doc
 
     def batch_report(self, batch_id):
-        luci_log.debug_verbose('[auth=%d] asking for batchid# %s for host %s' \
+        luci_log.debug_verbose('RC:BRep0: [auth=%d] asking for batchid# %s for host %s' \
             % (self.__authed, batch_id, self.__hostname))
 
         if not self.authed():
@@ -271,7 +272,7 @@
             try:
                 pos = self.ss.write(buff)
             except Exception, e:
-                luci_log.debug('Error sending XML \"%s\" to %s: %s' \
+                luci_log.debug_verbose('RC:send0: Error sending XML \"%s\" to %s: %s' \
                     % (buff, self.__hostname, str(e)))
                 raise RicciError, 'write error while sending XML to host %s' \
                         % self.__hostname
@@ -280,7 +281,7 @@
                         % self.__hostname
             buff = buff[pos:]
         try:
-            luci_log.debug_verbose('Sent XML \"%s\" to host %s' \
+            luci_log.debug_verbose('RC:send1: Sent XML \"%s\" to host %s' \
                 % (xml_doc.toxml(), self.__hostname))
         except:
             pass
@@ -302,19 +303,19 @@
                     # we haven't received all of the XML data yet.
                     continue
         except Exception, e:
-            luci_log.debug('Error reading data from %s: %s' \
+            luci_log.debug_verbose('RC:recv0: Error reading data from %s: %s' \
                 % (self.__hostname, str(e)))
             raise RicciError, 'Error reading data from host %s' % self.__hostname
         except:
             raise RicciError, 'Error reading data from host %s' % self.__hostname
-        luci_log.debug_verbose('Received XML \"%s\" from host %s' \
+        luci_log.debug_verbose('RC:recv1: Received XML \"%s\" from host %s' \
             % (xml_in, self.__hostname))
 
         try:
             if doc == None:
                 doc = minidom.parseString(xml_in)
         except Exception, e:
-            luci_log.debug('Error parsing XML \"%s" from %s' \
+            luci_log.debug_verbose('RC:recv2: Error parsing XML \"%s" from %s' \
                 % (xml_in, str(e)))
             raise RicciError, 'Error parsing XML from host %s: %s' \
                     % (self.__hostname, str(e))
@@ -326,7 +327,7 @@
         
         try:        
             if doc.firstChild.nodeName != 'ricci':
-                luci_log.debug('Expecting \"ricci\" got XML \"%s\" from %s' %
+                luci_log.debug_verbose('RC:recv3: Expecting \"ricci\" got XML \"%s\" from %s' %
                     (xml_in, self.__hostname))
                 raise Exception, 'Expecting first XML child node to be \"ricci\"'
         except Exception, e:
@@ -344,7 +345,7 @@
     try:
         return RicciCommunicator(hostname)
     except Exception, e:
-        luci_log.debug('Error creating a ricci connection to %s: %s' \
+        luci_log.debug_verbose('RC:GRC0: Error creating a ricci connection to %s: %s' \
             % (hostname, str(e)))
         return None
     pass
@@ -394,7 +395,7 @@
 def batch_status(batch_xml):
     if batch_xml.nodeName != 'batch':
         try:
-            luci_log.debug('Expecting an XML batch node. Got \"%s\"' \
+            luci_log.debug_verbose('RC:BS0: Expecting an XML batch node. Got \"%s\"' \
                 % batch_xml.toxml())
         except:
             pass
@@ -414,10 +415,10 @@
                     last = last + 1
                     last = last - 2 * last
     try:
-        luci_log.debug_verbose('Returning (%d, %d) for batch_status(\"%s\")' \
+        luci_log.debug_verbose('RC:BS1: Returning (%d, %d) for batch_status(\"%s\")' \
             % (last, total, batch_xml.toxml()))
     except:
-        luci_log.debug_verbose('Returning last, total')
+        luci_log.debug_verbose('RC:BS2: Returning last, total')
 
     return (last, total)
 
@@ -443,7 +444,7 @@
 # * error_msg:  error message
 def extract_module_status(batch_xml, module_num=1):
     if batch_xml.nodeName != 'batch':
-        luci_log.debug('Expecting \"batch\" got \"%s\"' % batch_xml.toxml())
+        luci_log.debug_verbose('RC:EMS0: Expecting \"batch\" got \"%s\"' % batch_xml.toxml())
         raise RicciError, 'Invalid XML node; expecting a batch node'
 
     c = 0




More information about the Cluster-devel mailing list