[Cluster-devel] conga/luci cluster/form-macros site/luci/Exten ...
rmccabe at sourceware.org
rmccabe at sourceware.org
Tue Jan 23 13:53:37 UTC 2007
CVSROOT: /cvs/cluster
Module name: conga
Changes by: rmccabe at sourceware.org 2007-01-23 13:53:36
Modified files:
luci/cluster : form-macros
luci/site/luci/Extensions: Ip.py cluster_adapters.py
conga_constants.py
Log message:
GULM support for RHEL4 clusters.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.157&r2=1.158
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/Ip.py.diff?cvsroot=cluster&r1=1.1&r2=1.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.209&r2=1.210
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&r1=1.34&r2=1.35
--- conga/luci/cluster/form-macros 2007/01/22 21:18:58 1.157
+++ conga/luci/cluster/form-macros 2007/01/23 13:53:35 1.158
@@ -2828,7 +2828,13 @@
<span tal:attributes="class python: 'cluster node ' + status_class"
tal:content="python: cluster_node_status_str" />
</td>
+ </tr>
+ <tr class="cluster node info_middle"
+ tal:condition="nodeinfo/gulm_lockserver">
+ <td class="cluster node node_status" colspan="2">
+ This node is a GULM lock server.
+ </td>
</tr>
<tr class="cluster node info_bottom"
@@ -3230,6 +3236,13 @@
</td>
</tr>
+ <tr class="node info_middle"
+ tal:condition="nd/gulm_lockserver">
+ <td class="node node_status" colspan="2">
+ This node is a GULM lock server.
+ </td>
+ </tr>
+
<tr class="node info_bottom">
<td class="node node_services">
<strong class="cluster node">Services on this Node:</strong>
--- conga/luci/site/luci/Extensions/Ip.py 2006/05/30 20:17:21 1.1
+++ conga/luci/site/luci/Extensions/Ip.py 2007/01/23 13:53:36 1.2
@@ -6,7 +6,7 @@
_ = gettext.gettext
TAG_NAME = "ip"
-RESOURCE_TYPE=_("IP Address: ")
+RESOURCE_TYPE=_("IP Address")
class Ip(BaseResource):
def __init__(self):
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2007/01/22 17:06:48 1.209
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2007/01/23 13:53:36 1.210
@@ -16,6 +16,7 @@
from NFSClient import NFSClient
from NFSExport import NFSExport
from Service import Service
+from Lockserver import Lockserver
from Netfs import Netfs
from Apache import Apache
from MySQL import MySQL
@@ -1136,11 +1137,32 @@
return (True, {})
+def validateGULMConfig(model, form):
+ gulm_ptr = model.getGULMPtr()
+ if not gulm_ptr:
+ return (False, {'errors': [ 'This cluster appears not to be using GULM locking.' ]})
+ node_list = map(lambda x: x.getName(), model.getNodes())
+
+ gulm_lockservers = list()
+ for node in node_list:
+ if form.has_key(node) and form[node] == 'on':
+ ls = Lockserver()
+ ls.addAttribute('name', node)
+ gulm_lockservers.append(ls)
+
+ num_ls = len(gulm_lockservers)
+ if not num_ls in (1, 3, 4, 5):
+ return (False, {'errors': [ 'You must have exactly 1, 3, 4, or 5 GULM lock servers. You selected %d nodes as lock servers.' % num_ls ]})
+
+ model.GULM_ptr.children = gulm_lockservers
+ return (True, {})
+
configFormValidators = {
'general': validateGeneralConfig,
'mcast': validateMCastConfig,
'fence': validateFenceConfig,
- 'qdisk': validateQDiskConfig
+ 'qdisk': validateQDiskConfig,
+ 'gulm': validateGULMConfig
}
def validateConfigCluster(self, request):
@@ -3331,6 +3353,22 @@
clumap['mcast_addr'] = "1.2.3.4"
#-------------
+ #GULM params (rhel4 only)
+ gulm_ptr = model.getGULMPtr()
+ if gulm_ptr:
+ lockserv_list = list()
+ clunodes = model.getNodes()
+ gulm_lockservs = map(lambda x: x.getName(), gulm_ptr.getChildren())
+ for node in clunodes:
+ n = node.getName()
+ lockserv_list.append((n, n in gulm_lockservs))
+ clumap['gulm'] = True
+ clumap['gulm_url'] = prop_baseurl + PROPERTIES_TAB + '=' + PROP_GULM_TAB
+ clumap['gulm_lockservers'] = lockserv_list
+ else:
+ clumap['gulm'] = False
+
+ #-------------
#quorum disk params
quorumd_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_QDISK_TAB
clumap['quorumd_url'] = quorumd_url
@@ -3569,18 +3607,18 @@
rc = RicciCommunicator(nodename_resolved)
except Exception, e:
luci_log.debug_verbose('CStop0: [%d] RC %s: %s' \
- % (delete, nodename_resolved, str(e)))
+ % (delete is True, str(nodename_resolved), str(e)))
errors += 1
continue
if delete is True:
if nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=True) is None:
- luci_log.debug_verbose('CStop1: nodeDelete failed')
+ luci_log.debug_verbose('CStop1: [1] nodeDelete failed')
errors += 1
else:
if nodeLeave(self, rc, clustername, nodename_resolved) is None:
- luci_log.debug_verbose('CStop2: nodeLeave %s' \
- % (delete, nodename_resolved))
+ luci_log.debug_verbose('CStop2: [0] nodeLeave %s' \
+ % (nodename_resolved))
errors += 1
return errors
@@ -4026,6 +4064,7 @@
fdom_dict_list = list()
if model:
+ infohash['gulm_lockserver'] = model.isNodeLockserver(nodename)
#next is faildoms
fdoms = model.getFailoverDomainsForNode(nodename)
for fdom in fdoms:
@@ -4034,6 +4073,8 @@
fdomurl = baseurl + "?" + PAGETYPE + "=" + FDOM_CONFIG + "&" + CLUNAME + "=" + clustername + "&fdomname=" + fdom.getName()
fdom_dict['fdomurl'] = fdomurl
fdom_dict_list.append(fdom_dict)
+ else:
+ infohash['gulm_lockserver'] = False
infohash['fdoms'] = fdom_dict_list
@@ -4104,6 +4145,8 @@
map = {}
name = item['name']
map['nodename'] = name
+ map['gulm_lockserver'] = model.isNodeLockserver(name)
+
try:
baseurl = req['URL']
except:
--- conga/luci/site/luci/Extensions/conga_constants.py 2007/01/11 22:49:42 1.34
+++ conga/luci/site/luci/Extensions/conga_constants.py 2007/01/23 13:53:36 1.35
@@ -73,6 +73,7 @@
PROP_FENCE_TAB = '2'
PROP_MCAST_TAB = '3'
PROP_QDISK_TAB = '4'
+PROP_GULM_TAB = '5'
PAGETYPE="pagetype"
ACTIONTYPE="actiontype"
More information about the Cluster-devel
mailing list