[Cluster-devel] conga/luci cluster/form-macros site/luci/Exten ...

rmccabe at sourceware.org rmccabe at sourceware.org
Wed Dec 6 21:16:36 UTC 2006


CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-12-06 21:16:35

Modified files:
	luci/cluster   : form-macros 
	luci/site/luci/Extensions: ClusterNode.py cluster_adapters.py 

Log message:
	Related: #218040

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.122&r2=1.123
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ClusterNode.py.diff?cvsroot=cluster&r1=1.1&r2=1.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.173&r2=1.174

--- conga/luci/cluster/form-macros	2006/12/06 18:38:54	1.122
+++ conga/luci/cluster/form-macros	2006/12/06 21:16:35	1.123
@@ -77,7 +77,7 @@
 
 	<tal:block tal:condition="python: ricci_agent">
 		<tal:block tal:define="
-			global stat python: here.getClusterStatus(ricci_agent);
+			global stat python: here.getClusterStatus(request, ricci_agent);
 			global cstatus python: here.getClustersInfo(stat, request);
 			global cluster_status python: 'cluster ' + (('running' in cstatus and cstatus['running'] == 'true') and 'running' or 'stopped');"
 	 	/>
@@ -122,14 +122,22 @@
 		</td>
 	</tr>
 
+	<tr class="cluster">
+		<td tal:condition="exists: cstatus/error" class="cluster">
+			<span class="errmsgs">
+				An error occurred while attempting to get status information for this cluster. The information shown may be out of date.
+			</span>
+		</td>
+	</tr>
+
 	<tr class="cluster info_middle">
 		<td colspan="2" class="cluster cluster_quorum">
 			<ul class="cluster_quorum"
 				tal:condition="exists: cstatus/status">
 
-				<li><strong class="cluster">Status</strong>: <span tal:replace="cstatus/status"/></li>
-				<li><strong class="cluster">Total Cluster Votes</strong>: <span tal:replace="cstatus/votes"/></li>
-				<li><strong class="cluster">Minimum Required Quorum</strong>: <span tal:replace="cstatus/minquorum"/></li>
+				<li><strong class="cluster">Status</strong>: <span tal:replace="cstatus/status | string:[unknown]"/></li>
+				<li><strong class="cluster">Total Cluster Votes</strong>: <span tal:replace="cstatus/votes | string:[unknown]"/></li>
+				<li><strong class="cluster">Minimum Required Quorum</strong>: <span tal:replace="cstatus/minquorum | string:[unknown]"/></li>
 			</ul>
 		</td>
 	</tr>
@@ -2288,7 +2296,7 @@
 		global ricci_agent ri_agent | python: here.getRicciAgentForCluster(request)" />
 
 	<tal:block tal:define="
-		global nodestatus python: here.getClusterStatus(ricci_agent);
+		global nodestatus python: here.getClusterStatus(request, ricci_agent);
 		global nodeinfo python: here.getNodeInfo(modelb, nodestatus, request);
 		global status_class python: 'node_' + (nodeinfo['nodestate'] == '0' and 'active' or (nodeinfo['nodestate'] == '1' and 'inactive' or 'unknown'));
 		global cluster_node_status_str python: (nodeinfo['nodestate'] == '0' and 'Cluster member' or (nodeinfo['nodestate'] == '1' and 'Currently not a cluster participant' or 'This node is not responding'));
@@ -2531,7 +2539,7 @@
 		global ricci_agent ri_agent | python: here.getRicciAgentForCluster(request)" />
 
 	<tal:block tal:define="
-		global status python: here.getClusterStatus(ricci_agent);
+		global status python: here.getClusterStatus(request, ricci_agent);
 		global nds python: here.getNodesInfo(modelb, status, request)" />
 
 	<div tal:repeat="nd nds">
@@ -2752,7 +2760,7 @@
 		global ricci_agent ri_agent | python: here.getRicciAgentForCluster(request)" />
 
 	<tal:block tal:define="
-		global svcstatus python: here.getClusterStatus(ricci_agent);
+		global svcstatus python: here.getClusterStatus(request, ricci_agent);
 		global svcinf python: here.getServicesInfo(svcstatus,modelb,request);
 		global svcs svcinf/services" />
 
@@ -3027,7 +3035,7 @@
 
 	<tal:block tal:define="
 		global global_resources python: here.getResourcesInfo(modelb, request);
-		global sstat python: here.getClusterStatus(ricci_agent);
+		global sstat python: here.getClusterStatus(request, ricci_agent);
 		global sinfo python: here.getServiceInfo(sstat, modelb, request);
 		global running sinfo/running | nothing;" />
 
@@ -3217,7 +3225,7 @@
 		global ricci_agent ri_agent | python: here.getRicciAgentForCluster(request)" />
 
 	<tal:block tal:define="
-		global sta python: here.getClusterStatus(ricci_agent);
+		global sta python: here.getClusterStatus(request, ricci_agent);
 		global fdominfo python: here.getFdomsInfo(modelb, request, sta);" />
 
 	<div class="cluster fdom" tal:repeat="fdom fdominfo">
--- conga/luci/site/luci/Extensions/ClusterNode.py	2006/05/30 20:17:21	1.1
+++ conga/luci/site/luci/Extensions/ClusterNode.py	2006/12/06 21:16:35	1.2
@@ -96,3 +96,10 @@
     except KeyError, e:
       return ""
 
+  def getVotes(self):
+    try:
+      return self.getAttribute('votes')
+    except KeyError, e:
+      return "1"
+    except:
+      return None
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/12/06 18:38:54	1.173
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/12/06 21:16:35	1.174
@@ -1835,23 +1835,133 @@
 		return None
 	return getRicciAgent(self, clustername)
 
-def getClusterStatus(self, rc):
+def getClusterStatusModel(model):
+	results = list()
+	vals = {}
+
+	try:
+		clustername = model.getClusterName()
+		clusteralias = model.getClusterAlias()
+		vals['type'] = 'cluster'
+		vals['alias'] = clusteralias
+		vals['name'] = clustername
+		vals['error'] = True
+		vals['votes'] = '[unknown]'
+		vals['quorate'] = '[unknown]'
+		vals['minQuorum'] = '[unknown]'
+		results.append(vals)
+	except Exception, e:
+		luci_log.debug_verbose('GCSM0: %s' % str(e))
+		return None
+
+	try:
+		nodelist = model.getNodes()
+	except Exception, e:
+		luci_log.debug_verbose('GCSM1: %s' % str(e))
+		return None
+
+	for node in nodelist:
+		node_val = {}
+		node_val['type'] = 'node'
+		try:
+			node_name = node.getName()
+			if not node_name:
+				raise Exception, 'cluster node name is unknown'
+		except:
+			node_name = '[unknown]'
+
+		node_val['name'] = node_name
+		node_val['clustered'] = '[unknown]'
+		node_val['online'] = '[unknown]'
+		node_val['error'] = True
+
+		try:
+			votes = node.getVotes()
+			if not votes:
+				raise Exception, 'unknown unmber of votes'
+		except:
+			votes = '[unknown]'
+
+		node_val['votes'] = votes
+		results.append(node_val)
+	return results
+
+def getClusterStatusDB(self, clustername):
+	results = list()
+	vals = {}
+
+	vals['type'] = 'cluster'
+	vals['alias'] = clustername
+	vals['name'] = clustername
+	vals['error'] = True
+	vals['quorate'] = '[unknown]'
+	vals['votes'] = '[unknown]'
+	vals['minQuorum'] = '[unknown]'
+	results.append(vals)
+
+	try:
+		cluster_path = '%s/luci/systems/cluster/%s' % (CLUSTER_FOLDER_PATH, clustername)
+		nodelist = self.restrictedTraverse(cluster_path).objectItems('Folder')
+	except Exception, e:
+		luci_log.debug_verbose('GCSDB0: %s: %s' % (clustername, str(e)))
+		return results
+
+	for node in nodelist:
+		try:
+			node_val = {}
+			node_val['type'] = 'node'
+			node_val['name'] = node[0]
+			node_val['clustered'] = '[unknown]'
+			node_val['online'] = '[unknown]'
+			node_val['error'] = True
+			results.append(node_val)
+		except Exception, e:
+			luci_log.debug_verbose('GCSDB1: %s' % str(e))
+	return results
+
+def getClusterStatus(self, request, rc):
 	try:
 		doc = getClusterStatusBatch(rc)
+		if not doc:
+			raise Exception, 'doc is None'
 	except Exception, e:
 		luci_log.debug_verbose('GCS0: error: %s' % str(e))
 		doc = None
 
+	if doc is None:
+		try:
+			model = request.SESSION.get('model')
+			cinfo = getClusterStatusModel(model)
+			if not cinfo or len(cinfo) < 1:
+				raise Exception, 'cinfo is None'
+			return cinfo
+		except Exception, e:
+			luci_log.debug_verbose('GCS1: %s' % str(e))
+			doc = None
+
 	if not doc:
 		try:
-			luci_log.debug_verbose('GCS1: returned None for %s/%s' % rc.cluster_info())
-		except:
-			pass
+			clustername = None
+			try:
+				clustername = request['clustername']
+			except:
+				try:
+					clustername = request.form['clustername']
+				except:
+					pass
 
-		return {}
+			if not clustername:
+				raise Exception, 'unable to determine cluster name'
 
-	results = list()
+			cinfo = getClusterStatusDB(self, clustername)
+			if not cinfo or len(cinfo) < 1:
+				raise Exception, 'cinfo is None'
+			return cinfo
+		except Exception, e:
+			luci_log.debug_verbose('GCS1a: unable to get cluster info from DB: %s' % str(e))
+		return []
 
+	results = list()
 	vals = {}
 	vals['type'] = "cluster"
 
@@ -2315,39 +2425,31 @@
         return {}
 
   if model is None:
-    rc = getRicciAgent(self, cluname)
-    if not rc:
-      luci_log.debug_verbose('GCI1: unable to find a ricci agent for the %s cluster' % cluname)
-      return {}
     try:
-      model = getModelBuilder(None, rc, rc.dom0())
+      model = getModelForCluster(self, cluname)
       if not model:
         raise Exception, 'model is none'
-
-      try:
-        req.SESSION.set('model', model)
-      except Exception, e2:
-        luci_log.debug_verbose('GCI2 unable to set model in session: %s' % str(e2))
+      req.SESSION.set('model', model)
     except Exception, e:
-      luci_log.debug_verbose('GCI3: unable to get model for cluster %s: %s' % (cluname, str(e)))
+      luci_log.debug_verbose('GCI1: unable to get model for cluster %s: %s' % (cluname, str(e)))
       return {}
 
   prop_baseurl = req['URL'] + '?' + PAGETYPE + '=' + CLUSTER_CONFIG + '&' + CLUNAME + '=' + cluname + '&'
-  map = {}
+  clumap = {}
   basecluster_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_GENERAL_TAB
   #needed:
-  map['basecluster_url'] = basecluster_url
+  clumap['basecluster_url'] = basecluster_url
   #name field
-  map['clustername'] = model.getClusterAlias()
+  clumap['clustername'] = model.getClusterAlias()
   #config version
   cp = model.getClusterPtr()
-  map['config_version'] = cp.getConfigVersion()
+  clumap['config_version'] = cp.getConfigVersion()
   #-------------
   #new cluster params - if rhel5
   #-------------
   #Fence Daemon Props
   fencedaemon_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_FENCE_TAB
-  map['fencedaemon_url'] = fencedaemon_url
+  clumap['fencedaemon_url'] = fencedaemon_url
   fdp = model.getFenceDaemonPtr()
   pjd = fdp.getAttribute('post_join_delay')
   if pjd is None:
@@ -2356,35 +2458,35 @@
   if pfd is None:
     pfd = "0"
   #post join delay
-  map['pjd'] = pjd
+  clumap['pjd'] = pjd
   #post fail delay
-  map['pfd'] = pfd
+  clumap['pfd'] = pfd
   #-------------
   #if multicast
   multicast_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_MCAST_TAB
-  map['multicast_url'] = multicast_url
+  clumap['multicast_url'] = multicast_url
   #mcast addr
   is_mcast = model.isMulticast()
-  #map['is_mcast'] = is_mcast
+  #clumap['is_mcast'] = is_mcast
   if is_mcast:
-    map['mcast_addr'] = model.getMcastAddr()
-    map['is_mcast'] = "True"
+    clumap['mcast_addr'] = model.getMcastAddr()
+    clumap['is_mcast'] = "True"
   else:
-    map['is_mcast'] = "False"
-    map['mcast_addr'] = "1.2.3.4"
+    clumap['is_mcast'] = "False"
+    clumap['mcast_addr'] = "1.2.3.4"
 
   #-------------
   #quorum disk params
   quorumd_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_QDISK_TAB
-  map['quorumd_url'] = quorumd_url
+  clumap['quorumd_url'] = quorumd_url
   is_quorumd = model.isQuorumd()
-  map['is_quorumd'] = is_quorumd
-  map['interval'] = ""
-  map['tko'] = ""
-  map['votes'] = ""
-  map['min_score'] = ""
-  map['device'] = ""
-  map['label'] = ""
+  clumap['is_quorumd'] = is_quorumd
+  clumap['interval'] = ""
+  clumap['tko'] = ""
+  clumap['votes'] = ""
+  clumap['min_score'] = ""
+  clumap['device'] = ""
+  clumap['label'] = ""
 
   #list struct for heuristics...
   hlist = list()
@@ -2393,27 +2495,27 @@
     qdp = model.getQuorumdPtr()
     interval = qdp.getAttribute('interval')
     if interval is not None:
-      map['interval'] = interval
+      clumap['interval'] = interval
 
     tko = qdp.getAttribute('tko')
     if tko is not None:
-      map['tko'] = tko
+      clumap['tko'] = tko
 
     votes = qdp.getAttribute('votes')
     if votes is not None:
-      map['votes'] = votes
+      clumap['votes'] = votes
 
     min_score = qdp.getAttribute('min_score')
     if min_score is not None:
-      map['min_score'] = min_score
+      clumap['min_score'] = min_score
 
     device = qdp.getAttribute('device')
     if device is not None:
-      map['device'] = device
+      clumap['device'] = device
 
     label = qdp.getAttribute('label')
     if label is not None:
-      map['label'] = label
+      clumap['label'] = label
 
     heuristic_kids = qdp.getChildren()
     h_ctr = 0
@@ -2442,9 +2544,9 @@
       else:
         hmap['hinterval'] = ""
       hlist.append(hmap)
-  map['hlist'] = hlist
+  clumap['hlist'] = hlist
 
-  return map
+  return clumap
 
 def getClustersInfo(self, status, req):
   map = {}
@@ -2464,6 +2566,10 @@
   if len(clulist) < 1:
     return {}
   clu = clulist[0]
+  cluerror = False
+  if 'error' in clu:
+    cluerror = True
+    map['error'] = True
   clustername = clu['name']
   if clu['alias'] != "":
     map['clusteralias'] = clu['alias']
@@ -2478,6 +2584,7 @@
     map['running'] = "false"
   map['votes'] = clu['votes']
   map['minquorum'] = clu['minQuorum']
+
   map['clucfg'] = baseurl + "?" + PAGETYPE + "=" + CLUSTER_CONFIG + "&" + CLUNAME + "=" + clustername
 
   map['restart_url'] = baseurl + "?" + PAGETYPE + "=" + CLUSTER_PROCESS + "&" + CLUNAME + "=" + clustername + '&task=' + CLUSTER_RESTART
@@ -2499,6 +2606,7 @@
   map['currentservices'] = svc_dict_list
   node_dict_list = list()
   for item in nodelist:
+    node_error = 'error' in item
     nmap = {}
     name = item['name']
     nmap['nodename'] = name
@@ -3001,15 +3109,16 @@
 
   infohash['currentservices'] = svc_dict_list
 
-  #next is faildoms
-  fdoms = model.getFailoverDomainsForNode(nodename)
   fdom_dict_list = list()
-  for fdom in fdoms:
-    fdom_dict = {}
-    fdom_dict['name'] = fdom.getName()
-    fdomurl = baseurl + "?" + PAGETYPE + "=" + FDOM_CONFIG + "&" + CLUNAME + "=" + clustername + "&fdomname=" + fdom.getName()
-    fdom_dict['fdomurl'] = fdomurl
-    fdom_dict_list.append(fdom_dict)
+  if model:
+    #next is faildoms
+    fdoms = model.getFailoverDomainsForNode(nodename)
+    for fdom in fdoms:
+      fdom_dict = {}
+      fdom_dict['name'] = fdom.getName()
+      fdomurl = baseurl + "?" + PAGETYPE + "=" + FDOM_CONFIG + "&" + CLUNAME + "=" + clustername + "&fdomname=" + fdom.getName()
+      fdom_dict['fdomurl'] = fdomurl
+      fdom_dict_list.append(fdom_dict)
 
   infohash['fdoms'] = fdom_dict_list
 
@@ -3040,7 +3149,6 @@
 
   infohash['logurl'] = '/luci/logs/?nodename=' + nodename_resolved + '&clustername=' + clustername
   return infohash
-  #get list of faildoms for node
 
 def getNodesInfo(self, model, status, req):
   resultlist = list()
@@ -3144,6 +3252,10 @@
   return resultlist
 
 def getFence(self, model, request):
+  if not model:
+    luci_log.debug_verbose('getFence0: model is None')
+    return {}
+
   map = {}
   fencename = request['fencename']
   fencedevs = model.getFenceDevices()
@@ -3190,6 +3302,10 @@
   raise
   
 def getFenceInfo(self, model, request):
+  if not model:
+    luci_log.debug_verbose('getFenceInfo00: model is None')
+    return {}
+
   try:
     clustername = request['clustername']
   except:
@@ -3440,9 +3556,14 @@
   return map    
       
 def getFencesInfo(self, model, request):
+  map = {}
+  if not model:
+    luci_log.debug_verbose('getFencesInfo0: model is None')
+    map['fencedevs'] = list()
+    return map
+
   clustername = request['clustername']
   baseurl = request['URL']
-  map = {}
   fencedevs = list() #This is for the fencedev list page
 
   #Get list of fence devices




More information about the Cluster-devel mailing list