[Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
rmccabe at sourceware.org
rmccabe at sourceware.org
Wed Dec 12 15:45:27 UTC 2007
CVSROOT: /cvs/cluster
Module name: conga
Changes by: rmccabe at sourceware.org 2007-12-12 15:45:27
Modified files:
luci/site/luci/Extensions: cluster_adapters.py
Log message:
Use new form validation routines
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.275&r2=1.276
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2007/11/06 23:05:07 1.275
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2007/12/12 15:45:27 1.276
@@ -7,44 +7,34 @@
from xml.dom import minidom
-from ClusterModel.FailoverDomain import FailoverDomain
-from ClusterModel.FailoverDomainNode import FailoverDomainNode
-from ClusterModel.RefObject import RefObject
from ClusterModel.ClusterNode import ClusterNode
-from ClusterModel.Service import Service
-from ClusterModel.Lockserver import Lockserver
-from ClusterModel.Vm import Vm
-from ClusterModel.FenceXVMd import FenceXVMd
-from ClusterModel.QuorumD import QuorumD
-from ClusterModel.Heuristic import Heuristic
-from ClusterModel.Fence import Fence
-from ClusterModel.Method import Method
import RicciQueries as rq
from HelperFunctions import resolveOSType, send_batch_to_hosts
from LuciSyslog import get_logger
-from ResourceHandler import create_resource
from homebase_adapters import parseHostForm
from LuciClusterActions import propagateClusterConfAsync
+from LuciZopeAsync import validate_clusvc_async
+
from LuciClusterInfo import getClusterInfo, \
getModelBuilder, LuciExtractCluModel
from conga_constants import BATCH_ID, CLUNODE_CREATE_ERRORS, \
CLUSTER_ADD, CLUSTER_CONFIG, CLUSTER_DAEMON, CLUSTER_DELETE, \
CLUSTER_FOLDER_PATH, CLUSTER_RESTART, CLUSTER_START, CLUSTER_STOP, \
- DISABLE_SVC_TASK, ENABLE_SVC_TASK, FDOM, FDOM_ADD, FENCEDEV, \
+ DISABLE_SVC_TASK, ENABLE_SVC_TASK, FDOM, FENCEDEV, \
FENCEDEV_NODE_CONFIG, FENCEDEVS, FLAG_DESC, INSTALL_TASK, CLUSTER_PROCESS, \
LAST_STATUS, LUCI_DEBUG_MODE, NODE, NODE_ADD, NODE_DELETE, \
NODE_FENCE, NODE_FORCE_DELETE, NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, \
NODE_REBOOT, NODES, POSSIBLE_REBOOT_MESSAGE, PRE_CFG, PRE_INSTALL, \
PRE_JOIN, REBOOT_TASK, REDIRECT_MSG, RESOURCES, RICCI_CONNECT_FAILURE, \
- RICCI_CONNECT_FAILURE_MSG, SEND_CONF, SERVICE_ADD, SERVICE_CONFIG, \
- SERVICE_LIST, SERVICES, START_NODE, TASKTYPE, VM_ADD, VM_CONFIG, \
+ RICCI_CONNECT_FAILURE_MSG, SEND_CONF, \
+ SERVICE_LIST, SERVICES, START_NODE, TASKTYPE, \
REDIRECT_SEC, LUCI_CLUSTER_BASE_URL, FENCE_XVM_KEY_CREATE
from FenceHandler import validateNewFenceDevice, \
- validateFenceDevice, validate_fenceinstance, FD_VAL_SUCCESS
+ validateFenceDevice, FD_VAL_SUCCESS
from ricci_communicator import RicciCommunicator, RicciError, \
batch_status, extract_module_status
@@ -274,23 +264,6 @@
request.SESSION.set('create_cluster', add_cluster)
return (False, { 'errors': errors, 'messages': messages })
- node_list = add_cluster['nodes'].keys()
- batchNode = rq.createClusterBatch(add_cluster['cluster_os'],
- clustername,
- clustername,
- node_list,
- True,
- True,
- add_cluster['shared_storage'],
- False,
- add_cluster['download_pkgs'],
- lockservers)
-
- if not batchNode:
- request.SESSION.set('create_cluster', add_cluster)
- errors.append('Unable to generate cluster creation ricci command')
- return (False, { 'errors': errors, 'messages': messages })
-
error = manageCluster(self, clustername,
add_cluster['nodes'], add_cluster['cluster_os'])
if error:
@@ -298,37 +271,30 @@
request.SESSION.set('create_cluster', add_cluster)
return (False, { 'errors': errors, 'messages': messages })
+ node_list = add_cluster['nodes'].keys()
+
+ ret = send_batch_to_hosts(node_list, 10, rq.create_cluster,
+ add_cluster['cluster_os'], clustername, clustername,
+ node_list, True, True, add_cluster['shared_storage'], False,
+ add_cluster['download_pkgs'], lockservers)
+
batch_id_map = {}
- for i in node_list:
- try:
- rc = RicciCommunicator(i)
- if not rc:
- raise Exception, 'rc is None'
- except Exception, e:
- msg = 'Unable to connect to the ricci agent on %s: %s' % (i, str(e))
+ for i in ret.iterkeys():
+ if ret[i].has_key('error'):
+ msg = 'Unable to connect to the ricci agent on %s: %s' \
+ % (i, ret[i]['err_msg'])
errors.append(msg)
if LUCI_DEBUG_MODE is True:
luci_log.debug_verbose(msg)
-
- if len(batch_id_map) == 0:
- request.SESSION.set('create_cluster', add_cluster)
- return (False, { 'errors': errors, 'messages': messages })
continue
+ batch_id_map[i] = ret[i]['batch_result']
- try:
- resultNode = rc.process_batch(batchNode, async=True)
- batch_id_map[i] = resultNode.getAttribute('batch_id')
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('validateCreateCluster0: %s: %r %s' \
- % (i, e, str(e)))
- errors.append('An error occurred while attempting to add cluster node "%s"' % i)
- if len(batch_id_map) == 0:
- request.SESSION.set('create_cluster', add_cluster)
- return (False, { 'errors': errors, 'messages': messages })
- continue
+ if len(batch_id_map) == 0:
+ request.SESSION.set('create_cluster', add_cluster)
+ return (False, { 'errors': errors, 'messages': messages })
buildClusterCreateFlags(self, batch_id_map, clustername)
+
response = request.RESPONSE
response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
% (request['URL'], CLUSTER_CONFIG, clustername))
@@ -712,18 +678,16 @@
% (request['URL'], CLUSTER_CONFIG, clustername))
def validateServiceAdd(self, request):
- errors = list()
- fvar = GetReqVars(request, [ 'form_xml', 'clustername', 'domain', 'recovery', 'svc_name', 'action', 'URL' ])
+ from LuciValidation import validate_clusvc_add
+ fvar = GetReqVars(request, [ 'clustername', 'URL' ])
baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
- clustername = fvar['clustername']
- form_xml = fvar['form_xml']
- if form_xml is None:
- form_xml = ''
+ clustername = fvar['clustername']
+ if clustername is None:
if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vSA0: no form_xml')
-
+ luci_log.debug_verbose('VFE0: No cluster name')
+ return (False, {'errors': ['No cluster name was given']})
model = LuciExtractCluModel(self, request, clustername)
if model is None:
@@ -731,1060 +695,222 @@
luci_log.debug_verbose('vSA1: no model')
return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
- forms = []
- if form_xml.strip():
- try:
- doc = minidom.parseString(form_xml)
- forms = doc.getElementsByTagName('form')
- if len(forms) < 1:
- raise Exception, 'invalid XML'
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vSA1: error: %r %s: %r' % (e, str(e), form_xml))
- return (False, { 'errors': [ 'The resource data submitted for this service is not properly formed' ]})
-
- form_hash = {}
- form_hash['toplevel'] = { 'form': None, 'kids': [] }
- for i in forms:
- form_id = i.getAttribute('id')
- form_parent = i.getAttribute('parent')
- if not form_id or not form_parent:
- continue
- ielems = i.getElementsByTagName('input')
- if not ielems or len(ielems) < 1:
- continue
- if not form_id in form_hash:
- form_hash[form_id] = {'form': i, 'kids': []}
- elif not form_hash[form_id]['form']:
- form_hash[form_id]['form'] = i
- if not form_parent in form_hash:
- form_hash[form_parent] = {'form': None, 'kids': []}
- form_hash[form_parent]['kids'].append(form_id)
- dummy_form = {}
-
- for i in ielems:
- try:
- input_type = str(i.getAttribute('type'))
- except:
- continue
- if not input_type or input_type == 'button':
- continue
- try:
- dummy_form[str(i.getAttribute('name'))] = str(i.getAttribute('value'))
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vSA2: parsing XML: %r %s' \
- % (e, str(e)))
-
- try:
- res_type = dummy_form['type'].strip()
- if not res_type:
- raise Exception, 'no resource type'
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vSA3: %r %s' % (e, str(e)))
- return (False, { 'errors': [ 'No resource type was specified' ]})
-
- try:
- if res_type == 'ip':
- dummy_form['resourceName'] = dummy_form['ip_address']
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vSA3a: type is ip but no addr: %r %s' \
- % (e, str(e)))
- return (False, { 'errors': [ 'No IP address was given' ]})
-
- try:
- if dummy_form.has_key('immutable'):
- newRes = model.getResourceByName(dummy_form['resourceName'])
- resObj = RefObject(newRes)
- resObj.setRef(newRes.getName())
- else:
- resObj = create_resource(res_type, dummy_form, model)
- except Exception, e:
- resObj = None
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vSA4: type %s: %r %s' \
- % (res_type, e, str(e)))
-
- if resObj is None:
- return (False, { 'errors': [ 'An error occurred while adding %s' % res_type ]})
-
- if dummy_form.has_key('__independent_subtree'):
- resObj.addAttribute('__independent_subtree', '1')
- else:
- resObj.removeAttribute('__independent_subtree')
- form_hash[form_id]['obj'] = resObj
+ ret = validate_clusvc_add(model, request)
+ if ret[0] is not True:
+ return ret
- if len(errors) > 0:
- return (False, {'errors': errors})
+ action_type = ret[1]['action_type']
+ action_msg = ret[1]['action_msg']
+ ret = propagateClusterConfAsync(self, model, rc=None,
+ action=action_type, pmsg=action_msg)
+ if ret[0] is not True:
+ return ret
- fdom = fvar['domain']
+ response = request.RESPONSE
+ response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+ % (baseurl, SERVICES, clustername))
- recovery = fvar['recovery']
- if recovery is not None and recovery != 'restart' and recovery != 'relocate' and recovery != 'disable':
- errors.append('You entered an invalid recovery option: "%s" Valid options are "restart" "relocate" and "disable."')
+def validateResourceAdd(self, request):
+ from LuciValidation import validate_clures_add
- service_name = fvar['svc_name']
- if service_name is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vSA5: no service name')
- errors.append('No service name was given')
+ fvar = GetReqVars(request, [ 'clustername', 'URL' ])
+ baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
- autostart = '1'
- try:
- if not request.form.has_key('autostart') or request.form['autostart'] == '0':
- autostart = '0'
- except Exception, e:
- autostart = None
+ clustername = fvar['clustername']
+ if clustername is None:
if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vSA5a: error getting autostart: %r %s' \
- % (e, str(e)))
-
- exclusive = '0'
- try:
- if not request.form.has_key('exclusive') or request.form['exclusive'] != '1':
- exclusive = '0'
- else:
- exclusive = '1'
- except Exception, e:
- exclusive = '0'
+ luci_log.debug_verbose('VFE0: No cluster name')
+ return (False, {'errors': ['No cluster name was given']})
- try:
- cur_service = model.retrieveServiceByName(service_name)
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vSA5c: no service named %s found: %r %s' \
- % (service_name, e, str(e)))
- cur_service = None
-
- action = fvar['action']
- if action is None:
- return (False, {'errors': [ 'No action was given for service %s' % service_name ] })
-
- if action == 'edit':
- if cur_service is None:
- return (False, {'errors': [ 'The service %s could not be found for editing' % service_name ]})
- model.deleteService(service_name)
- elif action == 'add':
- if cur_service is not None:
- return (False, {'errors': [ 'A service with the name %s already exists' % service_name ]})
- else:
+ model = LuciExtractCluModel(self, request, clustername)
+ if model is None:
if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vSA4a: unknown action %s' \
- % request.form['action'])
- return (False, {'errors': [ 'An unknown action was specified' ]})
+ luci_log.debug_verbose('vRA1: no model')
+ return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
- def buildSvcTree(parent, child_id_list):
- for i in child_id_list:
- try:
- child = form_hash[i]['obj']
- if not child:
- raise Exception, 'No object for %s' % i
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('bST0: %r %s' % (e, str(e)))
- continue
- parent.addChild(child)
- if 'kids' in form_hash[i]:
- buildSvcTree(child, form_hash[i]['kids'])
-
- new_service = Service()
- new_service.addAttribute('name', service_name)
- if fdom:
- new_service.addAttribute('domain', fdom)
- if recovery:
- new_service.addAttribute('recovery', recovery)
- new_service.addAttribute('exclusive', str(exclusive))
- if autostart is not None:
- new_service.attr_hash['autostart'] = autostart
-
- buildSvcTree(new_service, form_hash['toplevel']['kids'])
- model.resourcemanager_ptr.addChild(new_service)
- model.setModified(True)
-
- if action == 'edit':
- action_type = SERVICE_CONFIG
- action_msg = 'Configuring service "%s"'
- else:
- action_type = SERVICE_ADD
- action_msg = 'Creating service "%s"'
+ ret = validate_clures_add(model, request)
+ if ret[0] is not True:
+ return ret
+ resname = ret[1]['res_name']
ret = propagateClusterConfAsync(self, model, rc=None,
- action=action_type, pmsg=action_msg % service_name)
+ action=RESOURCES, pmsg='Configuring cluster resource %s' % resname)
if ret[0] is not True:
return ret
response = request.RESPONSE
response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, SERVICES, model.getClusterName()))
+ % (baseurl, RESOURCES, clustername))
-def validateResourceAdd(self, request):
- try:
- res_type = request.form['type'].strip()
- if not res_type:
- raise KeyError, 'type is blank'
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VRA0: type is blank')
- return (False, {'errors': ['No resource type was given']})
+ return (True, { 'messages': [ 'Resource "%s" configured successfully' % resname]})
- model = LuciExtractCluModel(self, request)
- if model is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VRA1: no model')
- return (False, { 'errors': [ 'Unable to retrieve the cluster configuration. The configuration XML may contain errors' ]})
+def validateConfigCluster(self, request):
+ from LuciValidation import validate_config_mcast, validate_config_qdisk, \
+ validate_config_fence, validate_config_gulm, validate_config_general
+
+ configFormValidators = {
+ 'general': validate_config_general,
+ 'mcast': validate_config_mcast,
+ 'fence': validate_config_fence,
+ 'qdisk': validate_config_qdisk,
+ 'gulm': validate_config_gulm
+ }
errors = list()
- try:
- res = create_resource(res_type, request.form, model)
- except Exception, e:
- errors.extend(e)
+ messages = list()
+ fvar = GetReqVars(request, [ 'configtype', 'clustername', 'URL' ])
+ baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
- if len(errors) < 1:
- try:
- resourceAdd(self, request, model, res)
- except Exception, e:
- errors.append('An error occurred while adding resource "%s"' \
- % res.getName())
- if len(errors) > 0:
- errors.append('An error occurred while adding this resource')
+ clustername = fvar['clustername']
+ if clustername is None:
if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('resource error: %r %s' % (e, str(e)))
- return (False, { 'errors': errors})
-
- return (True, { 'messages': [ 'Resource added successfully' ]})
+ luci_log.debug_verbose('VFE0: No cluster name')
+ return (False, {'errors': ['No cluster name was given']})
-## Cluster properties form validation routines
+ model = LuciExtractCluModel(self, request, clustername)
+ if model is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('VCC0: no model')
+ return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
-# rhel5 cluster version
-def validateMCastConfig(model, form):
- try:
- gulm_ptr = model.getGULMPtr()
- if gulm_ptr:
- return (False, {'errors': ['Multicast cannot be used with GULM locking']})
- except:
- pass
+ if clustername is None:
+ clustername = model.getClusterName()
- errors = list()
- try:
- mcast_val = form['mcast'].strip().lower()
- if mcast_val != 'true' and mcast_val != 'false':
- raise KeyError, mcast_val
- if mcast_val == 'true':
- mcast_manual = True
- else:
- mcast_manual = False
- except KeyError, e:
- errors.append('An invalid multicast selection was made')
- return (False, {'errors': errors})
+ config_type = fvar['configtype']
+ if config_type is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('VCC1: no config type')
+ return (False, {'errors': [ 'No configuration type was given' ]})
- mcast_interface = None
- if form.has_key('mcast_interface'):
- mcast_interface = form['mcast_interface'].strip()
+ if not configFormValidators.has_key(config_type):
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('VCC2: invalid config type: %s' \
+ % config_type)
+ return (False, { 'errors': [ 'An invalid configuration type "%s" was submitted' % config_type ]})
- if mcast_manual is True and form.has_key('cluster_version') and form['cluster_version'].strip() == 'rhel4' and not mcast_interface:
- errors.append('No multicast interface was specified')
- return (False, {'errors': errors})
+ config_validator = configFormValidators[config_type]
+ ret = config_validator(model, request.form)
- if mcast_manual is True:
- import socket
- try:
- addr_str = form['mcast_address'].strip()
- socket.inet_pton(socket.AF_INET, addr_str)
- except KeyError, e:
- addr_str = None
- errors.append('No multicast address was given')
- except socket.error, e:
- try:
- socket.inet_pton(socket.AF_INET6, addr_str)
- except socket.error, e:
- addr_str = None
- errors.append('An invalid multicast address was given: %s')
- else:
- addr_str = None
+ retcode = ret[0]
+ if ret[1].has_key('errors'):
+ errors.extend(ret[1]['errors'])
+ if ret[1].has_key('messages'):
+ messages.extend(ret[1]['messages'])
- try:
- if not addr_str:
- if mcast_interface:
- errors.append('A multicast interface was specified, but no multicast address was given')
- return (False, {'errors': errors})
- model.del_cluster_multicast()
- else:
- model.set_cluster_multicast(addr_str, mcast_if=mcast_interface)
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug('Error updating mcast properties: %r %s' \
- % (e, str(e)))
- errors.append('Unable to update cluster multicast properties')
+ if retcode is not True or len(errors) > 0:
+ return (False, {'errors': errors, 'messages': messages})
- if len(errors) > 0:
- return (False, {'errors': errors})
+ ret = propagateClusterConfAsync(self, model, None,
+ CLUSTER_CONFIG, 'Updating cluster configuration')
+ if ret[0] is not True:
+ if ret[1].has_key('errors'):
+ errors.extend(ret[1]['errors'])
+ return (retcode, {'errors': errors, 'messages': messages})
- return (True, {})
+ request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+ % (baseurl, CLUSTER_CONFIG, clustername))
-def validateQDiskConfig(model, form):
+def validateFenceAdd(self, request):
errors = list()
+ fvar = GetReqVars(request, [ 'clustername', 'URL' ])
- try:
- qdisk_val = form['quorumd'].strip().lower()
- if qdisk_val != 'true' and qdisk_val != 'false':
- raise KeyError(qdisk_val)
- if qdisk_val == 'true':
- qdisk_val = 1
- else:
- qdisk_val = 0
- except KeyError, e:
- return (False, {'errors': ['An invalid quorum partition selection was made']})
-
- cp = model.getClusterPtr()
- qdp = model.getQuorumdPtr()
+ baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
- if not qdisk_val:
- if qdp:
- try:
- cp.removeChild(qdp)
- except Exception, e:
- return (False, {'errors': [ 'Error disabling quorum partition: %s' % str(e) ] })
- return (True, {})
+ clustername = fvar['clustername']
+ if clustername is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('VFE0: No cluster name')
+ return (False, {'errors': ['No cluster name was given']})
- try:
- interval = int(form['interval'])
- if interval < 0:
- raise ValueError, 'Interval must be 0 or greater'
- except KeyError, e:
- errors.append('No Interval value was given')
- except ValueError, e:
- errors.append('An invalid Interval value was given: %s' % str(e))
-
- try:
- votes = int(form['votes'])
- if votes < 1:
- raise ValueError, 'Votes must be greater than 0'
- except KeyError, e:
- errors.append('No Votes value was given')
- except ValueError, e:
- errors.append('An invalid Votes value was given: %s' % str(e))
-
- try:
- tko = int(form['tko'])
- if tko < 0:
- raise ValueError, 'TKO must be 0 or greater'
- except KeyError, e:
- errors.append('No TKO value was given')
- except ValueError, e:
- errors.append('An invalid TKO value was given: %s' % str(e))
-
- try:
- min_score = int(form['min_score'])
- if min_score < 1:
- raise ValueError('Minimum Score must be greater than 0')
- except KeyError, e:
- errors.append('No Minimum Score value was given')
- except ValueError, e:
- errors.append('An invalid Minimum Score value was given: %s' % str(e))
+ model = LuciExtractCluModel(self, request, clustername)
+ if model is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('VFA0: no model')
+ return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ] })
- #Either device or label must be present
- device = None
- try:
- device = form['device'].strip()
- except:
- device = None
+ ret_code, ret_obj = validateNewFenceDevice(request.form, model)
+ if ret_code != FD_VAL_SUCCESS:
+ errors.extend(ret_obj)
+ return (False, { 'errors': errors })
- label = None
- try:
- label = form['label'].strip()
- except:
- label = None
+ ret = propagateClusterConfAsync(self, model, None,
+ CLUSTER_CONFIG, 'Creating fence device "%s"' % ret_obj)
+ if ret[0] is not True:
+ return ret
- if not device and not label:
- errors.append('No Device or Label value was given')
+ request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (baseurl, FENCEDEV, clustername, ret_obj))
- num_heuristics = 0
- try:
- num_heuristics = int(form['num_heuristics']) + 1
- if num_heuristics < 1:
- raise ValueError, form['num_heuristics']
- except KeyError, e:
- errors.append('No number of heuristics was given')
- except ValueError, e:
- errors.append('An invalid number of heuristics was given: %s' % str(e))
+def validateFenceEdit(self, request):
+ errors = list()
- heuristics = list()
- for i in xrange(num_heuristics):
- try:
- h = form['heuristic%d' % i]
- if not h or len(h) != 3 or not (h[0].strip() and h[1].strip() and h[2].strip()):
- continue
- except:
- continue
+ fvar = GetReqVars(request, [ 'clustername', 'URL' ])
+ baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
- try:
- hprog = h[0]
- if not hprog:
- raise Exception, 'no hprog'
- except Exception, e:
- errors.append('No program was given for heuristic %d' % (i + 1))
- try:
- hint = int(h[1])
- if hint < 1:
- raise ValueError, 'Heuristic interval values must be greater than 0'
- except KeyError, e:
- errors.append('No interval was given for heuristic %d' % (i + 1))
- except ValueError, e:
- errors.append('An invalid interval was given for heuristic %d: %s' \
- % ((i + 1), str(e)))
+ clustername = fvar['clustername']
+ if clustername is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('VFE0: No cluster name')
+ return (False, {'errors': ['No cluster name was given']})
- try:
- hscore = int(h[2])
- if hscore < 1:
- raise ValueError, 'Heuristic scores must be greater than 0'
- except KeyError, e:
- errors.append('No score was given for heuristic %d' % (i + 1))
- except ValueError, e:
- errors.append('An invalid score was given for heuristic %d: %s' \
- % ((i + 1), str(e)))
+ model = LuciExtractCluModel(self, request, clustername)
+ if model is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('VFE1: no model')
+ return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ] })
- heuristics.append([ hprog, hint, hscore ])
+ # This is a fence edit situation, so the model should already have an
+ # entry for this fence device.
+ #
+ # pass form and model to validation method, then save changes if it passes.
+ error_code, retobj = validateFenceDevice(request.form, model)
+ if error_code != FD_VAL_SUCCESS:
+ errors.extend(retobj)
+ return (False, { 'errors': errors })
- if len(errors) > 0:
- return (False, {'errors': errors })
+ ret = propagateClusterConfAsync(self, model, None,
+ CLUSTER_CONFIG, 'Updating fence device "%s"' % retobj)
+ if ret[0] is not True:
+ return ret
- qd = QuorumD()
- qd.addAttribute('interval', str(interval))
- qd.addAttribute('votes', str(votes))
- qd.addAttribute('tko', str(tko))
- qd.addAttribute('min_score', str(min_score))
+ request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (baseurl, FENCEDEV, clustername, retobj))
- if device:
- qd.addAttribute('device', str(device))
- else:
- qd.addAttribute('label', str(label))
+def validateNodeFenceConfig(self, request):
+ from LuciValidation import validate_node_fence_config
- if qdp:
- try:
- cp.removeChild(qdp)
- except:
- pass
- cp.addChild(qd)
+ fvar = GetReqVars(request, [ 'nodename', 'clustername', 'URL' ])
+ baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
+ nodename = fvar['nodename']
- for h in heuristics:
- new_h = Heuristic()
- new_h.addAttribute('program', str(h[0]))
- new_h.addAttribute('interval', str(h[1]))
- new_h.addAttribute('score', str(h[2]))
- qd.addChild(new_h)
-
- if len(errors) > 0:
- return (False, {'errors': errors })
-
- return (True, {})
-
-def validateGeneralConfig(model, form):
- errors = list()
-
- try:
- cp = model.getClusterPtr()
- old_name = model.getClusterAlias()
- old_ver = int(cp.getConfigVersion())
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('getConfigVersion: %s' % str(e))
- errors.append('unable to determine the current configuration version')
- return (False, {'errors': errors})
-
- try:
- cluster_name = form['cluname'].strip()
- if not cluster_name:
- raise KeyError('cluname')
- except KeyError, e:
- errors.append('No cluster name was given')
-
- if len(cluster_name) > 15:
- errors.append('A cluster\'s name must be less than 16 characters long')
-
- try:
- version_num = int(form['cfgver'])
- if version_num < old_ver:
- raise ValueError, 'configuration version number must be %d or greater' % old_ver
- except KeyError, e:
- errors.append('No cluster configuration version was given')
- except ValueError, e:
- errors.append('An invalid configuration version was given: %s' % str(e))
-
- if len(errors) < 1:
- try:
- if cluster_name != old_name:
- cp.addAttribute('alias', cluster_name)
- cp.setConfigVersion(str(version_num))
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('unable to update general properties: %r %s' % (e, str(e)))
- errors.append('Unable to update the cluster configuration')
-
- try:
- cluster_version = form['cluster_version'].strip()
- if cluster_version != 'rhel5':
- raise Exception, 'not rhel5'
- except:
- if len(errors) > 0:
- return (False, {'errors': errors})
- return (True, {})
-
- totem = model.getTotemPtr()
- if totem is None:
- totem = model.addTotemPtr()
-
- try:
- token = form['token'].strip()
- if not token:
- raise KeyError, 'token'
- token = int(token)
- if token < 1:
- raise ValueError, '%d is an invalid value for token timeout' % token
- totem.addAttribute('token', str(token))
- except KeyError, e:
- try:
- totem.removeAttribute('token')
- except:
- pass
- except Exception, e:
- errors.append(str(e))
-
- try:
- trblc = form['token_retransmits_before_loss_const'].strip()
- if not trblc:
- raise KeyError, 'token_retransmits_before_loss_const'
- trblc = int(trblc)
- if trblc < 1:
- raise ValueError, '%d is an invalid value for number of token retransmits before loss' % trblc
- totem.addAttribute('token_retransmits_before_loss_const', str(trblc))
- except KeyError, e:
- try:
- totem.removeAttribute('token_retransmits_before_loss_const')
- except:
- pass
- except Exception, e:
- errors.append(str(e))
-
- try:
- join = form['join'].strip()
- if not join:
- raise KeyError, 'join'
- join = int(join)
- if join < 1:
- raise ValueError, '%d is an invalid value for join timeout' % join
- totem.addAttribute('join', str(join))
- except KeyError, e:
- try:
- totem.removeAttribute('join')
- except:
- pass
- except Exception, e:
- errors.append(str(e))
-
- try:
- consensus = form['consensus'].strip()
- if not consensus:
- raise KeyError, 'consensus'
- consensus = int(consensus)
- if consensus < 1:
- raise ValueError, '%d is an invalid value for consensus timeout' % consensus
- totem.addAttribute('consensus', str(consensus))
- except KeyError, e:
- try:
- totem.removeAttribute('consensus')
- except:
- pass
- except Exception, e:
- errors.append(str(e))
-
- if len(errors) > 0:
- return (False, {'errors': errors})
- return (True, {})
-
-def validateFenceConfig(model, form):
- errors = list()
-
- if model.getGULMPtr() is not None:
- return (False, {'errors': [ 'GULM clusters do not support fenced' ]})
-
- try:
- post_fail_delay = int(form['post_fail_delay'])
- if post_fail_delay < 0:
- raise ValueError('post fail delay values must be 0 or greater')
- except KeyError, e:
- errors.append('No post fail delay was given')
- except ValueError, e:
- errors.append('Invalid post fail delay: %s' % str(e))
-
- try:
- post_join_delay = int(form['post_join_delay'])
- if post_join_delay < 0:
- raise ValueError('post join delay values must be 0 or greater')
- except KeyError, e:
- errors.append('No post join delay was given')
- except ValueError, e:
- errors.append('Invalid post join delay: %s' % str(e))
-
- run_xvmd = False
- try:
- run_xvmd = form.has_key('run_xvmd')
- except:
- pass
-
- if run_xvmd is True and not model.hasFenceXVM():
- fenceXVMd = FenceXVMd()
- model.addFenceXVM(fenceXVMd)
- elif not run_xvmd:
- model.delFenceXVM()
-
- try:
- fd = model.getFenceDaemonPtr()
- old_pj_delay = fd.getPostJoinDelay()
- old_pf_delay = fd.getPostFailDelay()
-
- if post_join_delay == old_pj_delay and post_fail_delay == old_pf_delay:
- errors.append('No fence daemon properties were changed')
- else:
- fd.setPostJoinDelay(str(post_join_delay))
- fd.setPostFailDelay(str(post_fail_delay))
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('Unable to update fence daemon properties: %r %s' % (e, str(e)))
- errors.append('An error occurred while attempting to update fence daemon properties')
-
- if len(errors) > 0:
- return (False, {'errors': errors })
-
- return (True, {})
-
-def validateGULMConfig(model, form):
- gulm_ptr = model.getGULMPtr()
- if not gulm_ptr:
- return (False, {'errors': [ 'This cluster appears not to be using GULM locking' ]})
-
- node_list = map(lambda x: x.getName(), gulm_ptr.getChildren())
- for i in model.getNodeNames():
- if not i in node_list:
- node_list.append(i)
-
- gulm_lockservers = list()
- for node in node_list:
- if form.has_key(node) and form[node] == 'on':
- ls = Lockserver()
- ls.addAttribute('name', node)
- gulm_lockservers.append(ls)
-
- try:
- xlockservers = filter(lambda x: x.strip(), form['__GULM__'])
- except:
- xlockservers = list()
-
- for i in xlockservers:
- if not i in node_list:
- ls = Lockserver()
- ls.addAttribute('name', i)
- gulm_lockservers.append(ls)
-
- num_ls = len(gulm_lockservers)
- if not num_ls in (1, 3, 5):
- return (False, {'errors': [ 'You must have exactly 1, 3, or 5 GULM lock servers. You submitted %d lock servers' % num_ls ]})
-
- model.GULM_ptr.children = gulm_lockservers
- return (True, {})
-
-configFormValidators = {
- 'general': validateGeneralConfig,
- 'mcast': validateMCastConfig,
- 'fence': validateFenceConfig,
- 'qdisk': validateQDiskConfig,
- 'gulm': validateGULMConfig
-}
-
-def validateConfigCluster(self, request):
- errors = list()
- messages = list()
- fvar = GetReqVars(request, [ 'configtype', 'clustername', 'URL' ])
-
- baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
- clustername = fvar['clustername']
+ clustername = fvar['clustername']
+ if clustername is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('VFE0: No cluster name')
+ return (False, {'errors': ['No cluster name was given']})
model = LuciExtractCluModel(self, request, clustername)
if model is None:
if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VCC0: no model')
- return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
-
- if clustername is None:
- clustername = model.getClusterName()
-
- config_type = fvar['configtype']
- if config_type is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VCC1: no config type')
- return (False, {'errors': [ 'No configuration type was given' ]})
-
- if not configFormValidators.has_key(config_type):
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VCC2: invalid config type: %s' \
- % config_type)
- return (False, { 'errors': [ 'An invalid configuration type "%s" was submitted' % config_type ]})
-
- config_validator = configFormValidators[config_type]
- ret = config_validator(model, request.form)
-
- retcode = ret[0]
- if ret[1].has_key('errors'):
- errors.extend(ret[1]['errors'])
- if ret[1].has_key('messages'):
- messages.extend(ret[1]['messages'])
-
- if retcode is not True or len(errors) > 0:
- return (False, {'errors': errors, 'messages': messages})
-
- ret = propagateClusterConfAsync(self, model, None,
- CLUSTER_CONFIG, 'Updating cluster configuration')
- if ret[0] is not True:
- if ret[1].has_key('errors'):
- errors.extend(ret[1]['errors'])
- return (retcode, {'errors': errors, 'messages': messages})
-
- request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, CLUSTER_CONFIG, clustername))
-
-def validateFenceAdd(self, request):
- errors = list()
- fvar = GetReqVars(request, [ 'clustername', 'URL' ])
-
- baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
- clustername = fvar['clustername']
-
- model = LuciExtractCluModel(self, request, clustername)
- if model is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VFA0: no model')
- return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ] })
-
- ret_code, ret_obj = validateNewFenceDevice(request.form, model)
- if ret_code != FD_VAL_SUCCESS:
- errors.extend(ret_obj)
- return (False, { 'errors': errors })
-
- ret = propagateClusterConfAsync(self, model, None,
- CLUSTER_CONFIG, 'Creating fence device "%s"' % ret_obj)
- if ret[0] is not True:
- return ret
-
- request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (baseurl, FENCEDEV, clustername, ret_obj))
-
-def validateFenceEdit(self, request):
- errors = list()
-
- fvar = GetReqVars(request, [ 'clustername', 'URL' ])
- baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
-
- clustername = fvar['clustername']
- if clustername is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VFE0: No cluster name')
- return (False, {'errors': ['No cluster name was given']})
-
- model = LuciExtractCluModel(self, request, clustername)
- if model is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VFE1: no model')
+ luci_log.debug_verbose('vNFC6: no model for %s' % clustername)
return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ] })
- # This is a fence edit situation, so the model should already have an
- # entry for this fence device.
- #
- # pass form and model to validation method, then save changes if it passes.
- error_code, retobj = validateFenceDevice(request.form, model)
- if error_code != FD_VAL_SUCCESS:
- errors.extend(retobj)
- return (False, { 'errors': errors })
-
- ret = propagateClusterConfAsync(self, model, None,
- CLUSTER_CONFIG, 'Updating fence device "%s"' % retobj)
+ ret = validate_node_fence_config(model, request)
if ret[0] is not True:
return ret
- request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (baseurl, FENCEDEV, clustername, retobj))
-
-def validateNodeFenceConfig(self, request):
- errors = list()
- fvar = GetReqVars(request,
- [ 'fence_xml', 'fence_level', 'nodename', 'clustername', 'URL' ])
-
- baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
-
- if fvar['fence_xml'] is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC0: no fence_xml for node %s' \
- % fvar['nodename'])
- return (False, {'errors': ['No fence data was supplied']})
-
- if fvar['fence_level'] is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC1: no fence level for %s' \
- % fvar['nodename'])
- return (False, {'errors': ['No fence level was supplied']})
-
- try:
- fence_level = int(fvar['fence_level'])
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC2: invalid fence level: %s: %r %s' \
- % (fvar['fence_level'], e, str(e)))
- return (False, {'errors': ['"%s" is an invalid fence level' % fvar['fence_level'] ]})
-
- nodename = fvar['nodename']
- if nodename is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC3: no nodename: %r %s' % (e, str(e)))
- return (False, {'errors': ['No node name was given']})
-
- clustername = fvar['clustername']
- if clustername is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC4: no clustername: %r %s' % (e, str(e)))
- return (False, {'errors': ['No cluster name was given']})
-
- model = LuciExtractCluModel(self, request, clustername)
- if model is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC6: no model for %s' % clustername)
- return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ] })
-
- try:
- doc = minidom.parseString(fvar['fence_xml'])
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC7: error: %r %s' % (e, str(e)))
- return (False, {'errors': ['The fence data submitted is not properly formed']})
-
- try:
- node = model.retrieveNodeByName(nodename)
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC8: unable to find node name %s in current node list: %r %s' % (nodename, e, str(e)))
- return (False, {'errors': ['Unable to find the cluster node %s in the node list' % nodename ]})
-
- levels = node.getFenceLevels()
- try:
- method_id = levels[fence_level - 1].getAttribute('name')
- if not method_id:
- raise Exception, 'No method ID'
- fence_method = Method()
- fence_method.addAttribute('name', str(method_id))
- levels[fence_level - 1] = fence_method
- except Exception, e:
- method_id = fence_level
- fence_method = Method()
- fence_method.addAttribute('name', str(method_id))
-
- forms = doc.getElementsByTagName('form')
- if len(forms) < 1:
- delete_target = None
- for l in levels:
- # delete the fence level
- if l.getAttribute('name') == method_id:
- delete_target = l
- break
- if delete_target is not None:
- try:
- node.getChildren()[0].removeChild(delete_target)
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC9: %s: %r %s' \
- % (method_id, e, str(e)))
- return (False, {'errors': ['An error occurred while deleting fence method %s' % method_id ]})
- else:
- return (True, {'messages': ['No changes were made'] })
-
- form_hash = {}
- for i in forms:
- form_id = i.getAttribute('id')
- if not form_id:
- continue
- ielems = i.getElementsByTagName('input')
- if not ielems or len(ielems) < 1:
- continue
-
- dummy_form = {}
-
- for i in ielems:
- try:
- input_type = str(i.getAttribute('type'))
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC10: input type: %r %s' \
- % (e, str(e)))
- continue
-
- if not input_type or input_type == 'button':
- continue
-
- try:
- dummy_form[str(i.getAttribute('name'))] = str(i.getAttribute('value'))
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC11: parsing XML: %r %s' \
- % (e, str(e)))
-
- if len(dummy_form) < 1:
- continue
-
- if dummy_form.has_key('fence_instance'):
- try:
- parent = dummy_form['parent_fencedev']
- except:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC12: no parent for instance')
- return (False, {'errors': [ 'Unable to determine what device the current instance uses' ]})
-
- try:
- form_hash[parent][1].append(dummy_form)
- del dummy_form['fence_instance']
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC13: no parent for instance')
- return (False, {'errors': [ 'Unable to determine what device the current instance uses' ]})
- else:
- form_hash[form_id] = (dummy_form, list())
-
- fh_keys = form_hash.keys()
- fh_keys.sort()
- for i in fh_keys:
- fencedev_name = None
- fencedev_unknown = False
-
- try:
- fence_form, instance_list = form_hash[i]
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC14: %r %s' % (e, str(e)))
- continue
-
- try:
- fence_type = fence_form['fence_type']
- if not fence_type:
- raise Exception, 'fence type is blank'
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('vNFC15: %s: %r %s' % (i, e, str(e)))
- fence_type = None
-
- if fence_form.has_key('existing_device'):
- try:
- fencedev_name = fence_form['name']
- if not fencedev_name.strip():
- raise Exception, 'no fence name'
- except Exception, e:
- errors.append('You must provide a unique name for all fence devices')
- continue
-
- if fence_type is None:
- # An unknown fence device agent. Pull the data out of
- # the model and persist it and all instances.
- # All we care about is its name.
- fencedev_unknown = True
- else:
- if not fence_form.has_key('sharable'):
- # If it's a shared fence device that already exists, the
- # user could not have edited it (without playing dirty
- # games), so it's safe to pull the existing entry from
- # the model. All we need is the device name, and nothing
- # else needs to be done here.
- #
- # For an existing non-shared device update the device
- # in the model, since the user could have edited it.
- retcode, retmsg = validateFenceDevice(fence_form, model)
- if retcode != FD_VAL_SUCCESS:
- errors.extend(retmsg)
- continue
- else:
- fencedev_name = retmsg
-
- # Add back the tags under the method block
- # for the fence instance
- if type == 'fence_manual':
- instance_list.append({'name': fencedev_name, 'nodename': nodename })
- else:
- instance_list.append({'name': fencedev_name })
- else:
- # The user created a new fence device.
- retcode, retmsg = validateNewFenceDevice(fence_form, model)
- if retcode != FD_VAL_SUCCESS:
- errors.extend(retmsg)
- continue
- else:
- fencedev_name = retmsg
-
- # If it's not shared, we need to create an instance form
- # so the appropriate XML goes into the <method> block inside
- # <node><fence>. All we need for that is the device name.
- if not fence_form.has_key('sharable'):
- if type == 'fence_manual':
- instance_list.append({'name': fencedev_name, 'nodename': nodename })
- else:
- instance_list.append({'name': fencedev_name })
-
- if fencedev_unknown is True:
- # Save any instances for this fence device.
- # XXX FIX ME - instances must be saved.
- pass
-
- for inst in instance_list:
- retcode, retobj = validate_fenceinstance(inst, fencedev_name, fence_type)
- if retcode != FD_VAL_SUCCESS:
- errors.extend(retobj)
- continue
- fence_method.addChild(retobj)
-
- if len(node.getChildren()) > 0:
- # There's already a <fence> block
- found_target = False
- for idx in xrange(len(levels)):
- if levels[idx].getAttribute('name') == method_id:
- found_target = True
- break
-
- if found_target is False:
- # There's a fence block, but no relevant method
- # block
- node.getChildren()[0].addChild(fence_method)
- else:
- # There is no <fence> tag under the node yet.
- fence_node = Fence()
- fence_node.addChild(fence_method)
- node.addChild(fence_node)
-
- if len(errors) > 0:
- return (False, {'errors': errors })
-
ret = propagateClusterConfAsync(self, model, None, FENCEDEV_NODE_CONFIG,
- 'Updating fence configuration for node "%s"' % fvar['nodename'])
+ 'Updating fence configuration for node "%s"' % nodename)
if ret[0] is not True:
return ret
request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&nodename=%s&busyfirst=true' % (baseurl, NODE, clustername, nodename))
def deleteFenceDevice(self, request):
+ from LuciValidation import validate_fence_del
errors = list()
- fvar = GetReqVars(request,
- [ 'orig_name', 'nodename', 'clustername', 'URL' ])
-
+ fvar = GetReqVars(request, [ 'clustername', 'URL' ])
baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
- nodename = fvar['nodename']
- if nodename is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('DFD0: no node name')
- return (False, {'errors': ['No node name was given']})
-
clustername = fvar['clustername']
if clustername is None:
if LUCI_DEBUG_MODE is True:
@@ -1797,28 +923,12 @@
luci_log.debug_verbose('DFD2: no model')
return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ] })
- fencedev_name = fvar['orig_name']
- if fencedev_name is None:
- return (False, {'errors': ['No fence device name in form submission']})
-
- try:
- fdev = model.getFenceDeviceByName(fencedev_name)
- if fdev:
- if model.deleteFenceDevice(fdev) is not True:
- raise Exception, 'failed to remove %s' % fdev.getName()
- model.removeFenceInstancesForFenceDevice(fencedev_name)
- else:
- raise Exception, 'no fence device named "%s" was found' \
- % fencedev_name
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('DFD3: %s: %r %s' \
- % (fencedev_name, e, str(e)))
- return (False, { 'errors': [ 'Error removing fence device %s: %s' \
- % (fencedev_name, str(e)) ]})
+ ret = validate_fence_del(model, request)
+ if ret[0] is not True:
+ return ret
ret = propagateClusterConfAsync(self, model, None, CLUSTER_CONFIG,
- 'Removing fence device "%s"' % fencedev_name)
+ 'Removing fence device "%s"' % ret[1].get('name'))
if ret[0] is not True:
return ret
@@ -1826,47 +936,22 @@
% (baseurl, FENCEDEVS, clustername))
def validateDaemonProperties(self, request):
- errors = list()
+ from LuciValidation import validate_cluster_daemon_form
fvar = GetReqVars(request,
- [ 'orig_name', 'nodename', 'clustername', 'URL' ])
-
+ [ 'nodename', 'clustername', 'URL' ])
baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
-
clustername = fvar['clustername']
- if clustername is None:
- errors.append('Unable to determine the current cluster name')
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VDP2: no clustername')
-
nodename = fvar['nodename']
- if nodename is None:
- errors.append('Unable to determine the current node name')
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VDP1: no nodename for %s' % clustername)
- disable_list = list()
- enable_list = list()
- for i in request.form.items():
- try:
- if i[0][:11] == '__daemon__:':
- daemon_prop = i[1]
- if len(daemon_prop) == 2:
- if daemon_prop[1] == '1':
- disable_list.append(daemon_prop[0])
- else:
- if daemon_prop[1] == '0' and daemon_prop[2] == 'on':
- enable_list.append(daemon_prop[0])
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VDP3: error: %s: %r %s' \
- % (str(i), e, str(e)))
+ ret = validate_cluster_daemon_form(self, request)
+ if ret[0] is not True:
+ return ret
- if len(enable_list) < 1 and len(disable_list) < 1:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VDP4: no changes made')
- request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&nodename=%s' \
- % (baseurl, NODE, clustername, nodename))
+ enable_list = ret[1]['enable_list']
+ disable_list = ret[1]['disable_list']
+
+ errors = list()
nodename_resolved = resolve_nodename(self, clustername, nodename)
try:
@@ -1878,14 +963,14 @@
luci_log.debug_verbose('VDP5: RC %s: %r %s' \
% (nodename_resolved, e, str(e)))
errors.append('Unable to connect to the ricci agent on %s to update cluster daemon properties' % nodename_resolved)
- return (False, {'errors': errors})
+ return (False, { 'errors': errors})
batch_id, result = rq.updateServices(rc, enable_list, disable_list)
if batch_id is None or result is None:
if LUCI_DEBUG_MODE is True:
luci_log.debug_verbose('VDP6: setCluserConf: batchid or result is None')
errors.append('Unable to update the cluster daemon properties on node %s' % nodename_resolved)
- return (False, {'errors': errors})
+ return (False, { 'errors': errors})
try:
if len(enable_list) > 0:
@@ -1906,13 +991,17 @@
request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&nodename=%s&busyfirst=true' % (baseurl, NODE, clustername, nodename))
def validateFdom(self, request):
- errors = list()
+ from LuciValidation import validate_fdom
+
fvar = GetReqVars(request, [ 'clustername', 'name', 'oldname', 'URL' ])
baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
+ name = fvar['name']
clustername = fvar['clustername']
if clustername is None:
- errors.append('Unable to determine this cluster\'s name')
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('VFE0: No cluster name')
+ return (False, {'errors': ['No cluster name was given']})
model = LuciExtractCluModel(self, request, clustername)
if model is None:
@@ -1920,111 +1009,21 @@
luci_log.debug_verbose('validateFdom0: no model')
return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
- name = fvar['name']
- if name is None:
- errors.append('No name was given for this failover domain')
-
- prioritized = False
- try:
- prioritized = request.form.has_key('prioritized')
- except:
- prioritized = False
-
- restricted = False
- try:
- restricted = request.form.has_key('restricted')
- except:
- restricted = False
-
- nofailback = False
- try:
- nofailback = request.form.has_key('nofailback')
- except:
- nofailback = False
-
- oldname = fvar['oldname']
-
- if oldname is None or oldname != name:
- if model.getFailoverDomainByName(name) is not None:
- errors.append('A failover domain named "%s" already exists' % name)
-
- fdom = None
- if oldname is not None:
- fdom = model.getFailoverDomainByName(oldname)
- if fdom is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('validateFdom1: No fdom named %s exists' % oldname)
- errors.append('No failover domain named "%s" exists' % oldname)
- else:
- fdom.addAttribute('name', name)
- fdom.children = list()
- else:
- fdom = FailoverDomain()
- fdom.addAttribute('name', name)
-
- if fdom is None or len(errors) > 0:
- return (False, {'errors': errors })
-
- if prioritized:
- fdom.addAttribute('ordered', '1')
- else:
- fdom.addAttribute('ordered', '0')
-
- if restricted:
- fdom.addAttribute('restricted', '1')
- else:
- fdom.addAttribute('restricted', '0')
-
- if nofailback:
- fdom.addAttribute('nofailback', '1')
- else:
- fdom.addAttribute('nofailback', '0')
-
- for i in model.getNodeNames():
- if request.form.has_key(i):
- fdn = FailoverDomainNode()
- fdn.addAttribute('name', i)
- if prioritized:
- priority = 1
- try:
- priority = int(request.form['__PRIORITY__%s' % i].strip())
- if priority < 1:
- priority = 1
- except Exception, e:
- priority = 1
- fdn.addAttribute('priority', str(priority))
- fdom.addChild(fdn)
-
- try:
- fdom_ptr = model.getFailoverDomainPtr()
- if not oldname:
- fdom_ptr.addChild(fdom)
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('validateFdom2: %r %s' % (e, str(e)))
- errors.append('Unable to update the cluster configuration')
-
- if len(errors) > 0:
- return (False, {'errors': errors })
-
- if oldname:
- action = FDOM
- status_msg = 'Updating failover domain "%s"' % oldname
- else:
- action = FDOM_ADD
- status_msg = 'Creating failover domain "%s"' % name
+ ret = validate_fdom(self, request)
+ if ret[0] is not True:
+ return ret
- ret = propagateClusterConfAsync(self, model, None, action, status_msg)
+ ret = propagateClusterConfAsync(self, model, None,
+ ret[1]['action'], ret[1]['msg'])
if ret[0] is not True:
return ret
request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&fdomname=%s&busyfirst=true' % (baseurl, FDOM, clustername, name))
def validateVM(self, request):
- errors = list()
-
- fvar = GetReqVars(request, [ 'clustername', 'vmname', 'oldname', 'vmpath', 'recovery', 'domain', 'URL' ])
+ from LuciValidation import validate_vmsvc_form
+ fvar = GetReqVars(request, [ 'clustername', 'URL' ])
baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
clustername = fvar['clustername']
@@ -2039,101 +1038,14 @@
luci_log.debug_verbose('validateVM1: no model')
return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
- vm_name = fvar['vmname']
- if vm_name is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('validateVM2: no vm name')
- errors.append('No virtual machine name was given')
-
- vm_path = fvar['vmpath']
- if vm_path is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('validateVM3: no vm path')
- errors.append('No path to the virtual machine configuration directory was given')
-
- autostart = 1
- if request.form.has_key('autostart'):
- autostart = 1
- else:
- autostart = 0
-
- exclusive = 0
- if request.form.has_key('exclusive'):
- exclusive = 1
- else:
- exclusive = 0
-
- recovery = fvar['recovery']
- if recovery is not None and recovery != 'restart' and recovery != 'relocate' and recovery != 'disable':
- errors.append('You entered an invalid recovery option: "%s" Valid options are "restart" "relocate" and "disable"')
-
- fdom = fvar['domain']
-
- if len(errors) > 0:
- return (False, {'errors': errors })
-
- isNew = False
- old_name = fvar['oldname']
- if old_name is None:
- isNew = True
-
- delete_vm = False
- if request.form.has_key('delete'):
- try:
- xvm = model.retrieveVMsByName(old_name)
- if not xvm:
- raise Exception, 'not found'
- rmptr = model.getResourceManagerPtr()
- rmptr.removeChild(xvm)
- delete_vm = True
- except:
- return (False, {'errors': ['No virtual machine service named "%s" exists' % old_name ]})
- else:
- if isNew is True:
- xvm = Vm()
- xvm.addAttribute('name', vm_name)
- xvm.addAttribute('path', vm_path)
- rmptr = model.getResourceManagerPtr()
- rmptr.addChild(xvm)
- else:
- try:
- xvm = model.retrieveVMsByName(old_name)
- if not xvm:
- raise Exception, 'not found'
- except:
- return (False, {'errors': ['No virtual machine service named "%s" exists' % old_name ]})
- xvm.addAttribute('name', vm_name)
- xvm.addAttribute('path', vm_path)
-
- xvm.addAttribute('autostart', str(autostart))
- xvm.addAttribute('exclusive', str(exclusive))
- if fdom:
- xvm.addAttribute('domain', fdom)
- else:
- try:
- xvm.removeAttribute('domain')
- except:
- pass
-
- if recovery:
- xvm.addAttribute('recovery', recovery)
- else:
- try:
- xvm.removeAttribute('recovery')
- except:
- pass
+ ret = validate_vmsvc_form(model, request)
+ if ret[0] is not True:
+ return ret
- if delete_vm is True:
- action = VM_CONFIG
- status_msg = 'Deleting virtual machine service "%s"' % vm_name
- elif isNew is True:
- action = VM_ADD
- status_msg = 'Creating virtual machine service "%s"' % vm_name
- else:
- action = VM_CONFIG
- status_msg = 'Configuring virtual machine service "%s"' % vm_name
+ action_type = ret[1]['action_type']
+ action_msg = ret[1]['action_msg']
- ret = propagateClusterConfAsync(self, model, None, action, status_msg)
+ ret = propagateClusterConfAsync(self, model, None, action_type, action_msg)
if ret[0] is not True:
return ret
@@ -2204,7 +1116,7 @@
cc = None
if req.has_key('new_cluster_conf'):
cc = req['new_cluster_conf']
- msg_list.append('Checking if valid XML - ')
+ msg_list.append('Checking XML validity - ')
cc_xml = None
try:
cc_xml = minidom.parseString(cc)
@@ -2269,7 +1181,8 @@
57: deleteFenceDevice,
58: validateNodeFenceConfig,
60: validate_xvm_key_dist,
- 80: process_cluster_conf_editor
+ 80: process_cluster_conf_editor,
+ 1001: validate_clusvc_async
}
def validatePost(self, request):
@@ -2278,41 +1191,15 @@
except Exception, e:
if LUCI_DEBUG_MODE is True:
luci_log.debug_verbose('VP0: error: %r %s' % (e, str(e)))
- return None
+ return (False, {})
if not pagetype in formValidators:
if LUCI_DEBUG_MODE is True:
luci_log.debug_verbose('VP1: no handler for page type %d' % pagetype)
- return None
+ return (False, {})
else:
return formValidators[pagetype](self, request)
-def getClusterURL(self, request, model):
- try:
- clustername = request['clustername'].strip()
- if not clustername:
- raise Exception, 'cluster name from request is blank'
- except:
- try:
- clustername = model.getClusterName()
- if not clustername:
- raise Exception, 'cluster name from model is blank'
- except:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('GCURL0: unable to get cluster name')
- return ''
-
- return '/luci/cluster/index_html?pagetype=7&clustername=%s' % clustername
-
-def getRicciAgentForCluster(self, req):
- fvar = GetReqVars(req, [ 'clustername' ])
- clustername = fvar['clustername']
- if clustername is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug('GRAFC0: no cluster name was found')
- return None
- return getRicciAgent(self, clustername)
-
def clusterTaskProcess(self, model, request):
fvar = GetReqVars(request, [ 'task', 'clustername', 'URL' ])
@@ -2366,10 +1253,10 @@
fvar = GetReqVars(request, [ 'task', 'clustername', 'nodename', 'URL' ])
task = fvar['task']
- clustername = fvar['clustername']
nodename = fvar['nodename']
baseurl = fvar['URL'] or LUCI_CLUSTER_BASE_URL
+ clustername = fvar['clustername']
if clustername is None:
if LUCI_DEBUG_MODE is True:
luci_log.debug('NTP0: missing cluster name')
@@ -2435,67 +1322,201 @@
# we'll hit it again, and try again then
pass
- if rc is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug('NTP7: node %s is not authenticated' \
- % nodename_resolved)
- return (False, { 'errors': [ 'Node "%s" is not authenticated' % nodename_resolved ]})
+ if rc is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug('NTP7: node %s is not authenticated' \
+ % nodename_resolved)
+ return (False, { 'errors': [ 'Node "%s" is not authenticated' % nodename_resolved ]})
+
+ if task == NODE_LEAVE_CLUSTER:
+ from LuciClusterActions import NodeLeaveCluster
+ if NodeLeaveCluster(self, rc, clustername, nodename_resolved) is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('NTP8: nodeLeave failed')
+ return (False, {'errors': [ 'Node "%s" failed to leave cluster "%s"' % (nodename_resolved, clustername) ]})
+
+ response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+ % (baseurl, NODES, clustername))
+ elif task == NODE_JOIN_CLUSTER:
+ from LuciClusterActions import NodeJoinCluster
+ if NodeJoinCluster(self, rc, clustername, nodename_resolved) is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('NTP9: nodeJoin failed')
+ return (False, {'errors': [ 'Node "%s" failed to join cluster "%s"' % (nodename_resolved, clustername) ]})
+
+ response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+ % (baseurl, NODES, clustername))
+ elif task == NODE_REBOOT:
+ from LuciClusterActions import NodeReboot
+ if NodeReboot(self, rc, clustername, nodename_resolved) is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('NTP10: nodeReboot failed')
+ return (False, {'errors': [ 'Node "%s" failed to reboot' \
+ % nodename_resolved ]})
+
+ response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+ % (baseurl, NODES, clustername))
+ elif task == NODE_FENCE:
+ from LuciClusterActions import NodeFence
+ if NodeFence(self, clustername, nodename, nodename_resolved) is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('NTP11: nodeFencefailed')
+ return (False, {'errors': [ 'Fencing of node "%s" failed' \
+ % nodename_resolved]})
+ response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+ % (baseurl, NODES, clustername))
+ elif task == NODE_DELETE:
+ from LuciClusterActions import NodeDeleteFromCluster
+ if NodeDeleteFromCluster(self, rc, model, clustername, nodename, nodename_resolved) is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('NTP12: nodeDelete failed')
+ return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
+
+ response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+ % (baseurl, NODES, clustername))
+ elif task == NODE_FORCE_DELETE:
+ from LuciClusterActions import NodeForceDeleteFromCluster
+ if NodeForceDeleteFromCluster(self, model, clustername, nodename, nodename_resolved) is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('NTP13: nodeForceDelete failed')
+ return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
+
+ response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+ % (baseurl, NODES, clustername))
+
+def getResourceInfo(model, request):
+ if not model:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('GRI0: no model object in session')
+ return {}
+
+ fvars = GetReqVars(request,
+ [ 'resourcename', 'type', 'value', 'clustername', 'URL' ])
+
+ baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+ name = fvars['resourcename']
+ if name is None:
+ if fvars['type'] == 'ip':
+ name = fvars['value']
+
+ if name is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('GRI1: missing res name')
+ return {}
+
+ from LuciClusterInfo import getResourceInfo as gri
+ return gri(model, name, baseurl, res=None)
+
+def serviceRestart(self, rc, req):
+ from LuciClusterActions import RestartCluSvc
+
+ fvars = GetReqVars(req,
+ [ 'clustername', 'servicename', 'nodename', 'URL' ])
+ baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+ ret = RestartCluSvc(self, rc, fvars)
+ if ret is None:
+ response = req.RESPONSE
+ response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+ % (baseurl, SERVICE_LIST, fvars['clustername']))
+ else:
+ return ret
+
+def serviceStop(self, rc, req):
+ from LuciClusterActions import StopCluSvc
+
+ fvars = GetReqVars(req,
+ [ 'clustername', 'servicename', 'nodename', 'URL' ])
+ baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+ ret = StopCluSvc(self, rc, fvars)
+ if ret is None:
+ response = req.RESPONSE
+ response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+ % (baseurl, SERVICE_LIST, fvars['clustername']))
+ else:
+ return ret
+
+def serviceStart(self, rc, req):
+ from LuciClusterActions import StartCluSvc
+
+ fvars = GetReqVars(req,
+ [ 'clustername', 'servicename', 'nodename', 'URL' ])
+ baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+ ret = StartCluSvc(self, rc, fvars)
+ if ret is None:
+ response = req.RESPONSE
+ response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+ % (baseurl, SERVICE_LIST, fvars['clustername']))
+ else:
+ return ret
+
+def serviceDelete(self, rc, req):
+ from LuciClusterActions import DeleteCluSvc
+
+ fvars = GetReqVars(req,
+ [ 'clustername', 'servicename', 'nodename', 'URL' ])
+ baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+ clustername = fvars['clustername']
- if task == NODE_LEAVE_CLUSTER:
- from LuciClusterActions import NodeLeaveCluster
- if NodeLeaveCluster(self, rc, clustername, nodename_resolved) is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('NTP8: nodeLeave failed')
- return (False, {'errors': [ 'Node "%s" failed to leave cluster "%s"' % (nodename_resolved, clustername) ]})
+ model = LuciExtractCluModel(self, req, clustername)
+ if model is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('serviceDelete0: no model')
+ return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
+ ret = DeleteCluSvc(self, rc, fvars, model)
+ if ret is None:
+ response = req.RESPONSE
response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, NODES, clustername))
- elif task == NODE_JOIN_CLUSTER:
- from LuciClusterActions import NodeJoinCluster
- if NodeJoinCluster(self, rc, clustername, nodename_resolved) is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('NTP9: nodeJoin failed')
- return (False, {'errors': [ 'Node "%s" failed to join cluster "%s"' % (nodename_resolved, clustername) ]})
+ % (baseurl, SERVICES, clustername))
+ else:
+ return ret
- response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, NODES, clustername))
- elif task == NODE_REBOOT:
- from LuciClusterActions import NodeReboot
- if NodeReboot(self, rc, clustername, nodename_resolved) is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('NTP10: nodeReboot failed')
- return (False, {'errors': [ 'Node "%s" failed to reboot' \
- % nodename_resolved ]})
+def serviceMigrate(self, rc, req):
+ from LuciClusterActions import MigrateCluSvc
- response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, NODES, clustername))
- elif task == NODE_FENCE:
- from LuciClusterActions import NodeFence
- if NodeFence(self, clustername, nodename, nodename_resolved) is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('NTP11: nodeFencefailed')
- return (False, {'errors': [ 'Fencing of node "%s" failed' \
- % nodename_resolved]})
- response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, NODES, clustername))
- elif task == NODE_DELETE:
- from LuciClusterActions import NodeDeleteFromCluster
- if NodeDeleteFromCluster(self, rc, model, clustername, nodename, nodename_resolved) is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('NTP12: nodeDelete failed')
- return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
+ fvars = GetReqVars(req,
+ [ 'clustername', 'servicename', 'nodename', 'URL' ])
+ baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+ ret = MigrateCluSvc(self, rc, fvars)
+ if ret is None:
+ response = req.RESPONSE
response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, NODES, clustername))
- elif task == NODE_FORCE_DELETE:
- from LuciClusterActions import NodeForceDeleteFromCluster
- if NodeForceDeleteFromCluster(self, model, clustername, nodename, nodename_resolved) is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('NTP13: nodeForceDelete failed')
- return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
+ % (baseurl, SERVICE_LIST, fvars['clustername']))
+ else:
+ return ret
+
+def resourceDelete(self, rc, req):
+ from LuciClusterActions import DeleteResource
+
+ fvars = GetReqVars(req,
+ [ 'clustername', 'resourcename', 'nodename', 'URL' ])
+ baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+ clustername = fvars['clustername']
+
+ model = LuciExtractCluModel(self, req, clustername)
+ if model is None:
+ if LUCI_DEBUG_MODE is True:
+ luci_log.debug_verbose('resourceDelete0: no model')
+ return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
+ ret = DeleteResource(self, rc, model, fvars['resourcename'])
+ if ret is None:
+ response = req.RESPONSE
response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, NODES, clustername))
+ % (baseurl, RESOURCES, clustername))
+ else:
+ return ret
+
+def getSystemLogs(self, req):
+ from LuciClusterActions import GetSystemLogs
+
+ fvars = GetReqVars(req, [ 'clustername', 'nodename' ])
+ return GetSystemLogs(self, fvars)
def isClusterBusy(self, req):
items = None
@@ -2832,166 +1853,3 @@
if LUCI_DEBUG_MODE is True:
luci_log.debug_verbose('ICB26: returning busy_map: %s' % str(busy_map))
return busy_map
-
-# These are called from external methods.
-def getResourceInfo(model, request):
- if not model:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('GRI0: no model object in session')
- return {}
-
- fvars = GetReqVars(request,
- [ 'resourcename', 'type', 'value', 'clustername', 'URL' ])
-
- baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
-
- name = fvars['resourcename']
- if name is None:
- if fvars['type'] == 'ip':
- name = fvars['value']
-
- if name is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('GRI1: missing res name')
- return {}
-
- from LuciClusterInfo import getResourceInfo as gri
- return gri(model, name, baseurl, res=None)
-
-def serviceRestart(self, rc, req):
- from LuciClusterActions import RestartCluSvc
-
- fvars = GetReqVars(req,
- [ 'clustername', 'servicename', 'nodename', 'URL' ])
- baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
-
- ret = RestartCluSvc(self, rc, fvars)
- if ret is None:
- response = req.RESPONSE
- response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, SERVICE_LIST, fvars['clustername']))
- else:
- return ret
-
-def serviceStop(self, rc, req):
- from LuciClusterActions import StopCluSvc
-
- fvars = GetReqVars(req,
- [ 'clustername', 'servicename', 'nodename', 'URL' ])
- baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
-
- ret = StopCluSvc(self, rc, fvars)
- if ret is None:
- response = req.RESPONSE
- response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, SERVICE_LIST, fvars['clustername']))
- else:
- return ret
-
-def serviceStart(self, rc, req):
- from LuciClusterActions import StartCluSvc
-
- fvars = GetReqVars(req,
- [ 'clustername', 'servicename', 'nodename', 'URL' ])
- baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
-
- ret = StartCluSvc(self, rc, fvars)
- if ret is None:
- response = req.RESPONSE
- response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, SERVICE_LIST, fvars['clustername']))
- else:
- return ret
-
-def serviceDelete(self, rc, req):
- from LuciClusterActions import DeleteCluSvc
-
- fvars = GetReqVars(req,
- [ 'clustername', 'servicename', 'nodename', 'URL' ])
- baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
- clustername = fvars['clustername']
-
- model = LuciExtractCluModel(self, req, clustername)
- if model is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('serviceDelete0: no model')
- return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
-
- ret = DeleteCluSvc(self, rc, fvars, model)
- if ret is None:
- response = req.RESPONSE
- response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, SERVICES, clustername))
- else:
- return ret
-
-def serviceMigrate(self, rc, req):
- from LuciClusterActions import MigrateCluSvc
-
- fvars = GetReqVars(req,
- [ 'clustername', 'servicename', 'nodename', 'URL' ])
- baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
-
- ret = MigrateCluSvc(self, rc, fvars)
- if ret is None:
- response = req.RESPONSE
- response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, SERVICE_LIST, fvars['clustername']))
- else:
- return ret
-
-def resourceDelete(self, rc, req):
- from LuciClusterActions import DeleteResource
-
- fvars = GetReqVars(req,
- [ 'clustername', 'resourcename', 'nodename', 'URL' ])
- baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
- clustername = fvars['clustername']
-
- model = LuciExtractCluModel(self, req, clustername)
- if model is None:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('resourceDelete0: no model')
- return (False, { 'errors': [ 'Unable to retrieve the cluster configuration for %s. The configuration XML may contain errors' % clustername ]})
-
- ret = DeleteResource(self, rc, model, fvars['resourcename'])
- if ret is None:
- response = req.RESPONSE
- response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, RESOURCES, clustername))
- else:
- return ret
-
-def resourceAdd(self, req, model, res):
- from LuciClusterActions import AddResource, EditResource
- fvars = GetReqVars(req, [ 'URL' ])
- baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
-
- try:
- cluname = model.getClusterName()
- rc = getRicciAgent(self, cluname)
- if rc is None:
- raise Exception, 'no rc'
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('resourceAdd0: no ricci agent: %r %s' \
- % (e, str(e)))
- return (False, { 'errors': [ 'Unable to find a ricci agent for cluster "%s"' % cluname ]})
-
- if req.form.has_key('edit'):
- ret = EditResource(self, rc, model, res)
- else:
- ret = AddResource(self, rc, model, res)
-
- if ret is None:
- response = req.RESPONSE
- response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
- % (baseurl, RESOURCES, cluname))
- else:
- return ret
-
-def getSystemLogs(self, req):
- from LuciClusterActions import GetSystemLogs
-
- fvars = GetReqVars(req, [ 'clustername', 'nodename' ])
- return GetSystemLogs(self, fvars)
More information about the Cluster-devel
mailing list