[Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...

rmccabe at sourceware.org rmccabe at sourceware.org
Mon Oct 2 20:53:37 UTC 2006


CVSROOT:	/cvs/cluster
Module name:	conga
Changes by:	rmccabe at sourceware.org	2006-10-02 20:53:37

Modified files:
	luci/site/luci/Extensions: cluster_adapters.py 

Log message:
	fix some of the resource backend code to accept POST as well as GET

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.76&r2=1.77

--- conga/luci/site/luci/Extensions/cluster_adapters.py	2006/09/28 22:04:27	1.76
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2006/10/02 20:53:37	1.77
@@ -21,7 +21,7 @@
 #folder, then only the admin user may see this menu, and
 #the configure option should not be displayed.
 #2)If there are clusters in the ManagedClusterSystems,
-#then only display chooser if the current user has 
+#then only display chooser if the current user has
 #permissions on at least one. If the user is admin, show ALL clusters
 
 from homebase_adapters import nodeAuth, nodeUnauth, manageCluster, createClusterSystems
@@ -187,11 +187,11 @@
     flag.manage_addProperty(BATCH_ID,batch_id, "string")
     flag.manage_addProperty(TASKTYPE,CLUSTER_ADD, "string")
     flag.manage_addProperty(FLAG_DESC,"Creating node " + key + " for cluster " + clusterName, "string")
-  
+
 
 def validateAddClusterNode(self, request):
 	errors = list()
-	messages = list() 
+	messages = list()
 	requestResults = {}
 
 	try:
@@ -542,7 +542,7 @@
 
 def createCluChooser(self, request, systems):
   dummynode = {}
-  
+
   if request.REQUEST_METHOD == 'POST':
     ret = validatePost(self, request)
     try:
@@ -553,7 +553,7 @@
     try: request.SESSION.set('checkRet', {})
     except: pass
 
-  #First, see if a cluster is chosen, then 
+  #First, see if a cluster is chosen, then
   #check that the current user can access that system
   cname = None
   try:
@@ -581,7 +581,7 @@
     cldata['currentItem'] = True
   else:
     cldata['currentItem'] = False
-  
+
   cladd = {}
   cladd['Title'] = "Create"
   cladd['cfg_type'] = "clusteradd"
@@ -591,7 +591,7 @@
     cladd['currentItem'] = True
   else:
     cladd['currentItem'] = False
-  
+
   clcfg = {}
   clcfg['Title'] = "Configure"
   clcfg['cfg_type'] = "clustercfg"
@@ -609,8 +609,8 @@
     clcfg['show_children'] = True
   else:
     clcfg['show_children'] = False
-  
-  #loop through all clusters 
+
+  #loop through all clusters
   syslist= list()
   for system in systems:
     clsys = {}
@@ -629,7 +629,7 @@
     syslist.append(clsys)
 
   clcfg['children'] = syslist
-  
+
   mylist = list()
   mylist.append(cldata)
   mylist.append(cladd)
@@ -674,7 +674,7 @@
     nd['currentItem'] = True
   else:
     nd['currentItem'] = False
-    
+
 
   ndadd = {}
   ndadd['Title'] = "Add a Node"
@@ -685,7 +685,7 @@
     ndadd['currentItem'] = True
   else:
     ndadd['currentItem'] = False
-  
+
   ndcfg = {}
   ndcfg['Title'] = "Configure"
   ndcfg['cfg_type'] = "nodecfg"
@@ -699,11 +699,11 @@
     ndcfg['currentItem'] = True
   else:
     ndcfg['currentItem'] = False
-  
+
   nodes = model.getNodes()
   nodenames = list()
   for node in nodes:
-    nodenames.append(node.getName()) 
+    nodenames.append(node.getName())
 
   cfgablenodes = list()
   for nodename in nodenames:
@@ -723,9 +723,9 @@
         cfg['currentItem'] = False
     else:
       cfg['currentItem'] = False
-      
+
     cfgablenodes.append(cfg)
-  
+
   #Now add nodename structs as children of the config element
   ndcfg['children'] = cfgablenodes
 
@@ -749,7 +749,7 @@
     sv['currentItem'] = True
   else:
     sv['currentItem'] = False
-    
+
   svadd = {}
   svadd['Title'] = "Add a Service"
   svadd['cfg_type'] = "serviceadd"
@@ -759,7 +759,7 @@
     svadd['currentItem'] = True
   else:
     svadd['currentItem'] = False
-    
+
   svcfg = {}
   svcfg['Title'] = "Configure a Service"
   svcfg['cfg_type'] = "servicecfg"
@@ -794,16 +794,16 @@
         svc['currentItem'] = False
     else:
       svc['currentItem'] = False
-      
+
     serviceable.append(svc)
-  svcfg['children'] = serviceable  
+  svcfg['children'] = serviceable
+
 
 
-  
   kids = list()
   kids.append(svadd)
   kids.append(svcfg)
-  sv['children'] = kids  
+  sv['children'] = kids
 #############################################################
   rv = {}
   rv['Title'] = "Resources"
@@ -818,7 +818,7 @@
     rv['currentItem'] = True
   else:
     rv['currentItem'] = False
-    
+
   rvadd = {}
   rvadd['Title'] = "Add a Resource"
   rvadd['cfg_type'] = "resourceadd"
@@ -828,7 +828,7 @@
     rvadd['currentItem'] = True
   else:
     rvadd['currentItem'] = False
-    
+
   rvcfg = {}
   rvcfg['Title'] = "Configure a Resource"
   rvcfg['cfg_type'] = "resourcecfg"
@@ -863,17 +863,17 @@
         rvc['currentItem'] = False
     else:
       rvc['currentItem'] = False
-      
+
     resourceable.append(rvc)
-  rvcfg['children'] = resourceable  
+  rvcfg['children'] = resourceable
+
 
 
-  
   kids = list()
   kids.append(rvadd)
   kids.append(rvcfg)
-  rv['children'] = kids  
- ################################################################# 
+  rv['children'] = kids
+ #################################################################
   fd = {}
   fd['Title'] = "Failover Domains"
   fd['cfg_type'] = "failoverdomains"
@@ -887,7 +887,7 @@
     fd['currentItem'] = True
   else:
     fd['currentItem'] = False
-    
+
   fdadd = {}
   fdadd['Title'] = "Add a Failover Domain"
   fdadd['cfg_type'] = "failoverdomainadd"
@@ -897,7 +897,7 @@
     fdadd['currentItem'] = True
   else:
     fdadd['currentItem'] = False
-    
+
   fdcfg = {}
   fdcfg['Title'] = "Configure a Failover Domain"
   fdcfg['cfg_type'] = "failoverdomaincfg"
@@ -932,16 +932,16 @@
         fdc['currentItem'] = False
     else:
       fdc['currentItem'] = False
-      
+
     fdomable.append(fdc)
-  fdcfg['children'] = fdomable  
+  fdcfg['children'] = fdomable
+
 
 
-  
   kids = list()
   kids.append(fdadd)
   kids.append(fdcfg)
-  fd['children'] = kids  
+  fd['children'] = kids
 #############################################################
   fen = {}
   fen['Title'] = "Fence Devices"
@@ -956,7 +956,7 @@
     fen['currentItem'] = True
   else:
     fen['currentItem'] = False
-    
+
   fenadd = {}
   fenadd['Title'] = "Add a Fence Device"
   fenadd['cfg_type'] = "fencedeviceadd"
@@ -966,7 +966,7 @@
     fenadd['currentItem'] = True
   else:
     fenadd['currentItem'] = False
-    
+
   fencfg = {}
   fencfg['Title'] = "Configure a Fence Device"
   fencfg['cfg_type'] = "fencedevicecfg"
@@ -1001,16 +1001,16 @@
         fenc['currentItem'] = False
     else:
       fenc['currentItem'] = False
-      
+
     fenceable.append(fenc)
-  fencfg['children'] = fenceable  
+  fencfg['children'] = fenceable
+
 
 
-  
   kids = list()
   kids.append(fenadd)
   kids.append(fencfg)
-  fen['children'] = kids  
+  fen['children'] = kids
 #############################################################
 
   mylist = list()
@@ -1026,10 +1026,10 @@
 
 
 def getClusterName(self, model):
-  return model.getClusterName() 
+  return model.getClusterName()
 
 def getClusterAlias(self, model):
-  alias = model.getClusterAlias() 
+  alias = model.getClusterAlias()
   if alias == None:
     return model.getClusterName()
   else:
@@ -1060,16 +1060,16 @@
     base2 = req['HTTP_HOST'] + req['SERVER_PORT']
 
   htab = { 'Title':"homebase",
-           'Description':"Home base for this luci server", 
+           'Description':"Home base for this luci server",
            'Taburl':"/luci/homebase"}
   if selectedtab == "homebase":
     htab['isSelected'] = True
   else:
     htab['isSelected'] = False
-      
+
 
   ctab = { 'Title':"cluster",
-           'Description':"Cluster configuration page", 
+           'Description':"Cluster configuration page",
            'Taburl':"/luci/cluster?pagetype=3"}
   if selectedtab == "cluster":
     ctab['isSelected'] = True
@@ -1077,16 +1077,16 @@
     ctab['isSelected'] = False
 
   stab = { 'Title':"storage",
-           'Description':"Storage configuration page", 
+           'Description':"Storage configuration page",
            'Taburl':"/luci/storage"}
   if selectedtab == "storage":
     stab['isSelected'] = True
   else:
     stab['isSelected'] = False
 
-  portaltabs.append(htab) 
-  portaltabs.append(ctab) 
-  portaltabs.append(stab) 
+  portaltabs.append(htab)
+  portaltabs.append(ctab)
+  portaltabs.append(stab)
 
   return portaltabs
 
@@ -1123,7 +1123,7 @@
     return None
   else:
     return None
-  
+
 def getRicciAgent(self, clustername):
   #Check cluster permission here! return none if false
   path = CLUSTER_FOLDER_PATH + clustername[0]
@@ -1137,7 +1137,7 @@
     return ""
   else:
     return ""
-  
+
 
 def getClusterStatus(self, ricci_name):
   rb = ricci_bridge(ricci_name)
@@ -1177,8 +1177,8 @@
       vals['failed'] = node.getAttribute('failed')
       vals['autostart'] = node.getAttribute('autostart')
       results.append(vals)
-    
-  return results  
+
+  return results
 
 def getServicesInfo(self, status, modelb, req):
   map = {}
@@ -1190,7 +1190,7 @@
       itemmap = {}
       itemmap['name'] = item['name']
       if item['running'] == "true":
-        itemmap['running'] = "true" 
+        itemmap['running'] = "true"
         itemmap['nodename'] = item['nodename']
       itemmap['autostart'] = item['autostart']
       itemmap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&servicename=" + item['name'] + "&pagetype=" + SERVICE
@@ -1206,7 +1206,7 @@
   map['services'] = maplist
 
   return map
-    
+
 def getServiceInfo(self,status,modelb,req):
   #set up struct for service config page
   baseurl = req['URL']
@@ -1269,7 +1269,7 @@
     children = svc.getChildren()
     for child in children:
       recurse_resources(root_uuid, child, resource_list, indent_ctr)
-      
+
   hmap['resource_list'] = resource_list
   return hmap
 
@@ -1289,7 +1289,7 @@
     rc_map['type'] = child.getResourceType()
 
   rc_map['indent_ctr'] = indent_ctr
-    
+
   #Note: Final version needs all resource attrs
   rc_map['attrs'] = child.getAttributes()
   rc_map['uuid'] = make_uuid('resource')
@@ -1305,7 +1305,7 @@
 
   rc_map['max_depth'] = child_depth
   return child_depth + 1
-    
+
 def serviceStart(self, ricci_agent, req):
   rb = ricci_bridge(ricci_agent)
   svcname = req['servicename']
@@ -1356,7 +1356,7 @@
   flag = self.restrictedTraverse(objpath)
   #flag[BATCH_ID] = batch_id
   #flag[TASKTYPE] = SERVICE_RESTART
-  #flag[FLAG_DESC] = "Restarting service " + svcname 
+  #flag[FLAG_DESC] = "Restarting service " + svcname
   flag.manage_addProperty(BATCH_ID,batch_id, "string")
   flag.manage_addProperty(TASKTYPE,SERVICE_RESTART, "string")
   flag.manage_addProperty(FLAG_DESC,"Restarting service " + svcname, "string")
@@ -1427,7 +1427,7 @@
       for nitem in nlist:
         if nitem['name'] == ndname:
           break
-      nodesmap['nodename'] = ndname 
+      nodesmap['nodename'] = ndname
       nodesmap['nodecfgurl'] = baseurl + "?clustername=" + clustername + "&nodename=" + ndname + "&pagetype=" + NODE
       if nitem['clustered'] == "true":
         nodesmap['status'] = NODE_ACTIVE
@@ -1460,7 +1460,7 @@
     fdom_map['svclist'] = svclist
     fdomlist.append(fdom_map)
   return fdomlist
- 
+
 def processClusterProps(self, ricci_agent, request):
   #First, retrieve cluster.conf from session
   conf = request.SESSION.get('conf')
@@ -1502,8 +1502,8 @@
 
   else:
     return
-  
- 
+
+
 def getClusterInfo(self, model, req):
   cluname = req[CLUNAME]
   baseurl = req['URL'] + "?" + PAGETYPE + "=" + CLUSTER_PROCESS + "&" + CLUNAME + "=" + cluname + "&"
@@ -1546,7 +1546,7 @@
   else:
     map['is_mcast'] = "False"
     map['mcast_addr'] = "1.2.3.4"
-    
+
   #-------------
   #quorum disk params
   quorumd_url = baseurl + ACTIONTYPE + "=" + QUORUMD
@@ -1710,7 +1710,7 @@
     flag.manage_addProperty(BATCH_ID,batch_id, "string")
     flag.manage_addProperty(TASKTYPE,NODE_LEAVE_CLUSTER, "string")
     flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' leaving cluster", "string")
-                                                                                
+
     response = request.RESPONSE
     #Is this correct? Should we re-direct to the cluster page?
     response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
@@ -1730,7 +1730,7 @@
     flag.manage_addProperty(BATCH_ID,batch_id, "string")
     flag.manage_addProperty(TASKTYPE,NODE_JOIN_CLUSTER, "string")
     flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' joining cluster", "string")
-                                                                                
+
     response = request.RESPONSE
     #Once again, is this correct? Should we re-direct to the cluster page?
     response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
@@ -1751,7 +1751,7 @@
     flag.manage_addProperty(BATCH_ID,batch_id, "string")
     flag.manage_addProperty(TASKTYPE,NODE_REBOOT, "string")
     flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being rebooted", "string")
-                                                                                
+
     response = request.RESPONSE
     #Once again, is this correct? Should we re-direct to the cluster page?
     response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
@@ -1789,7 +1789,7 @@
     flag.manage_addProperty(BATCH_ID,batch_id, "string")
     flag.manage_addProperty(TASKTYPE,NODE_FENCE, "string")
     flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being fenced", "string")
-                                                                                
+
     response = request.RESPONSE
     #Once again, is this correct? Should we re-direct to the cluster page?
     response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
@@ -1799,7 +1799,7 @@
     #We need to get a node name other than the node
     #to be deleted, then delete the node from the cluster.conf
     #and propogate it. We will need two ricci agents for this task.
-    
+
     #First, delete cluster.conf from node to be deleted.
 
     #next, have node leave cluster.
@@ -1853,7 +1853,7 @@
     flag.manage_addProperty(FLAG_DESC,"Deleting node \'" + nodename + "\'", "string")
     response = request.RESPONSE
     response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
-  
+
 
 def getNodeInfo(self, model, status, request):
   infohash = {}
@@ -1882,17 +1882,17 @@
     nodestate = NODE_ACTIVE
   else:
     nodestate = NODE_INACTIVE
-                                                                                
+
   infohash['nodestate'] = nodestate
   infohash['nodename'] = nodename
-                                                                                
+
   #set up drop down links
   if nodestate == NODE_ACTIVE:
     infohash['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_LEAVE_CLUSTER + "&nodename=" + nodename + "&clustername=" + clustername
     infohash['reboot_url'] = baseurl + "?pagetype=" +NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + nodename + "&clustername=" + clustername
     infohash['fence_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_FENCE + "&nodename=" + nodename + "&clustername=" + clustername
     infohash['delete_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_DELETE + "&nodename=" + nodename + "&clustername=" + clustername
-                                                                                
+
   if nodestate == NODE_INACTIVE:
     infohash['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_JOIN_CLUSTER + "&nodename=" + nodename + "&clustername=" + clustername
     infohash['reboot_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + nodename + "&clustername=" + clustername
@@ -1909,7 +1909,7 @@
       svc_dict['servicename'] = svcname
       svc_dict['svcurl'] = svcurl
       svc_dict_list.append(svc_dict)
-                                                                                
+
   infohash['currentservices'] = svc_dict_list
 
   #next is faildoms
@@ -1921,9 +1921,9 @@
     fdomurl = baseurl + "?" + PAGETYPE + "=" + FDOM_CONFIG + "&" + CLUNAME + "=" + clustername + "&fdomname=" + fdom.getName()
     fdom_dict['fdomurl'] = fdomurl
     fdom_dict_list.append(fdom_dict)
-                                                                              
+
   infohash['fdoms'] = fdom_dict_list
-                                                                                
+
   #return infohash
   infohash['d_states'] = None
   if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:
@@ -1936,10 +1936,10 @@
     dlist.append("rgmanager")
     states = rb.getDaemonStates(dlist)
     infohash['d_states'] = states
-    
-  infohash['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + nodename + "&clustername=" + clustername                                                                            
+
+  infohash['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + nodename + "&clustername=" + clustername
   return infohash
-  #get list of faildoms for node  
+  #get list of faildoms for node
 
 def getNodesInfo(self, model,status,req):
   resultlist = list()
@@ -1973,14 +1973,14 @@
       map['status'] = NODE_INACTIVE
       map['status_str'] = NODE_INACTIVE_STR
 
-    map['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + name + "&clustername=" + clustername                                                                            
+    map['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + name + "&clustername=" + clustername
     #set up URLs for dropdown menu...
     if map['status'] == NODE_ACTIVE:
       map['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_LEAVE_CLUSTER + "&nodename=" + name + "&clustername=" + clustername
       map['reboot_url'] = baseurl + "?pagetype=" +NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + name + "&clustername=" + clustername
       map['fence_it_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_FENCE + "&nodename=" + name + "&clustername=" + clustername
       map['delete_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_DELETE + "&nodename=" + name + "&clustername=" + clustername
-                                                                                
+
     if map['status'] == NODE_INACTIVE:
       map['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_JOIN_CLUSTER + "&nodename=" + name + "&clustername=" + clustername
       map['reboot_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + name + "&clustername=" + clustername
@@ -2028,7 +2028,7 @@
   cluname = req['clustername']
   path = CLUSTER_FOLDER_PATH + cluname
   clusterfolder = self.restrictedTraverse(path)
-  items = clusterfolder.objectItems('ManagedSystem') 
+  items = clusterfolder.objectItems('ManagedSystem')
   #Ok, here is what is going on...if there is an item,
   #we need to call the ricci_bridge and get a batch report.
   #This report will tell us one of three things:
@@ -2040,7 +2040,7 @@
     #Check here for more than 1 entry (an error)
     ricci = item[0].split("____") #This removes the 'flag' suffix
     rb = ricci_bridge(ricci[0])
-    finished = rb.checkBatch(item[1].getProperty(BATCH_ID)) 
+    finished = rb.checkBatch(item[1].getProperty(BATCH_ID))
     if finished == True:
       clusterfolder.manage_delObjects(item[0])
       map['refreshurl'] = '5; url=\".\"'
@@ -2054,7 +2054,7 @@
       if dex != (-1):
         tmpstr = part2[:dex] #This strips off busyfirst var
         part2 = tmpstr
-        ###FIXME - The above assumes that the 'busyfirst' query var is at the 
+        ###FIXME - The above assumes that the 'busyfirst' query var is at the
         ###end of the URL...
       wholeurl = part1 + "?" + part2
       #map['url'] = "5, url=" + req['ACTUAL_URL'] + "?" + req['QUERY_STRING']
@@ -2086,31 +2086,45 @@
     itemmap['delurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE_REMOVE
     resList.append(itemmap)
   return resList
-                                                                                
-def getResourceInfo(modelb, request):
-	resMap = {}
 
+def getResourceInfo(modelb, request):
 	try:
 		name = request['resourcename']
-		baseurl = request['URL']
+	except KeyError, e:
+		name = request.form['resourcename']
+	except:
+		return {}
+
+	try:
 		cluname = request['clustername']
+	except KeyError, e:
+		cluname = request.form['clustername']
+	except:
+		return {}
+
+	try:
+		baseurl = request['URL']
+	except:
+		return {}
 
-		for res in modelb.getResources():
-			if res.getName() == name:
+	for res in modelb.getResources():
+		if res.getName() == name:
+			resMap = {}
+			try:
 				resMap['name'] = res.getName()
 				resMap['type'] = res.resource_type
 				resMap['tag_name'] = res.TAG_NAME
 				resMap['attrs'] = res.attr_hash
 				resMap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + res.getName() + "&pagetype=" + RESOURCE_CONFIG
 				return resMap
-	except: pass
-	return {}
+			except:
+				return {}
 
 def delResource(self, request, ragent):
   modelb = request.SESSION.get('model')
   resPtr = modelb.getResourcesPtr()
   resources = resPtr.getChildren()
-  name = request['resourcename']                                                                             
+  name = request['resourcename']
   for res in resources:
     if res.getName() == name:
       resPtr.removeChild(res)
@@ -2143,11 +2157,11 @@
   response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
 
 
-  
+
 def addResource(self, request, ragent):
   if not request.form:
     return "Nothing submitted, no changes made."
-                                                                                
+
   if request.form['type'] != 'ip' and  not request.form['resourceName']:
     return "Please enter a name for the resource."
   types = {'ip': addIp,
@@ -2158,8 +2172,8 @@
            'nfsc': addNfsx,
            'scr': addScr,
            'smb': addSmb}
-  
-  
+
+
   type = request.form["type"]
   res = types[type](request)
   modelb = request.SESSION.get('model')
@@ -2195,12 +2209,12 @@
 def getResourceForEdit(modelb, name):
   resPtr = modelb.getResourcesPtr()
   resources = resPtr.getChildren()
-                                                                               
+
   for res in resources:
     if res.getName() == name:
       resPtr.removeChild(res)
       break
-                                                                              
+
   return res
 
 def addIp(request):
@@ -2236,20 +2250,20 @@
     res.attr_hash["force_unmount"] = '1'
   else:
     res.attr_hash["force_unmount"] = '0'
-                                                                                
+
   if form.has_key('selffence'):
     res.attr_hash["self_fence"] = '1'
   else:
     res.attr_hash["self_fence"] = '0'
-                                                                                
+
   if form.has_key('checkfs'):
     res.attr_hash["force_fsck"] = '1'
   else:
     res.attr_hash["force_fsck"] = '0'
-                                                                                
+
   modelb.getResourcesPtr().addChild(res)
   return res
-                                                                                
+
 def addGfs(request):
   modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
@@ -2262,12 +2276,12 @@
   res.attr_hash["device"] = form["device"]
   res.attr_hash["options"] = form["options"]
   res.attr_hash["fsid"] = form["fsid"]
-                                                                                
+
   if form.has_key('forceunmount'):
     res.attr_hash["force_unmount"] = '1'
   else:
     res.attr_hash["force_unmount"] = '0'
-                                                                                
+
   modelb.getResourcesPtr().addChild(res)
   return res
 
@@ -2284,15 +2298,15 @@
   res.attr_hash["options"] = form["options"]
   res.attr_hash["exportpath"] = form["export"]
   res.attr_hash["nfstype"] = form["fstype"]
-                                                                                
+
   if form.has_key('forceunmount'):
     res.attr_hash["force_unmount"] = '1'
   else:
     res.attr_hash["force_unmount"] = '0'
-                                                                                
+
   modelb.getResourcesPtr().addChild(res)
   return res
-                                                                                
+
 def addNfsc(request):
   modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
@@ -2303,10 +2317,10 @@
   res.attr_hash["name"] = form["resourceName"]
   res.attr_hash["target"] = form["target"]
   res.attr_hash["options"] = form["options"]
-                                                                                
+
   modelb.getResourcesPtr().addChild(res)
   return res
-                                                                                
+
 def addNfsx(request):
   modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
@@ -2315,7 +2329,7 @@
     res = apply(NFSExport)
   form = request.form
   res.attr_hash["name"] = form["resourceName"]
-                                                                                
+
   modelb.getResourcesPtr().addChild(res)
   return res
 
@@ -2328,10 +2342,10 @@
   form = request.form
   res.attr_hash["name"] = form["resourceName"]
   res.attr_hash["file"] = form["file"]
-                                                                                
+
   modelb.getResourcesPtr().addChild(res)
   return res
-                                                                                
+
 def addSmb(request):
   modelb = request.SESSION.get('model')
   if request.form.has_key('edit'):
@@ -2341,16 +2355,15 @@
   form = request.form
   res.attr_hash["name"] = form["resourceName"]
   res.attr_hash["workgroup"] = form["workgroup"]
-                                                                                
+
   modelb.getResourcesPtr().addChild(res)
   return res
-                                                                                
+
 def appendModel(request, model):
-  try:
-    request.SESSION.set('model', model)
-  except:
-    pass
-  return
+	try:
+		request.SESSION.set('model', model)
+	except:
+		pass
 
 def resolve_nodename(self, clustername, nodename):
   path = CLUSTER_FOLDER_PATH + clustername




More information about the Cluster-devel mailing list