[Cluster-devel] conga luci/load_site.py luci/pack.py luci/clus ...
rmccabe at sourceware.org
rmccabe at sourceware.org
Mon Feb 5 21:27:27 UTC 2007
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL4
Changes by: rmccabe at sourceware.org 2007-02-05 21:27:23
Modified files:
luci : load_site.py pack.py
luci/cluster : form-macros
luci/site/luci/Extensions: ModelBuilder.py cluster_adapters.py
luci/test : CGA_0200_Create_cluster.py congaDemoTests.py
conga_Helpers.py conga_suite.py
tests_README.txt
luci/utils : luci_admin luci_cleanup luci_manage
ricci/docs : cluster_api.html modules.html rpm_api.html
service_api.html
ricci/modules/cluster: ClusterStatus.cpp
ricci/modules/service: ServiceManager.cpp
Log message:
Don't install or start fenced on GULM clusters.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/load_site.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.15&r2=1.15.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/pack.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.5&r2=1.5.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.176&r2=1.176.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ModelBuilder.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.19&r2=1.19.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.227&r2=1.227.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/test/CGA_0200_Create_cluster.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.2&r2=1.2.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/test/congaDemoTests.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.8&r2=1.8.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/test/conga_Helpers.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.10&r2=1.10.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/test/conga_suite.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.7&r2=1.7.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/test/tests_README.txt.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.1&r2=1.1.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/utils/luci_admin.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.52&r2=1.52.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/utils/luci_cleanup.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.4.4.1&r2=1.4.4.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/utils/luci_manage.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.1.4.1&r2=1.1.4.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/docs/cluster_api.html.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.4&r2=1.4.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/docs/modules.html.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.4&r2=1.4.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/docs/rpm_api.html.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.2&r2=1.2.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/docs/service_api.html.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.1&r2=1.1.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/cluster/ClusterStatus.cpp.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.16&r2=1.16.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/service/ServiceManager.cpp.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.7&r2=1.7.2.1
--- conga/luci/load_site.py 2006/11/02 00:46:49 1.15
+++ conga/luci/load_site.py 2007/02/05 21:27:21 1.15.2.1
@@ -3,6 +3,7 @@
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
+# Copyright (C) 2006-2007 Red Hat, Inc.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
--- conga/luci/pack.py 2006/11/02 00:46:49 1.5
+++ conga/luci/pack.py 2007/02/05 21:27:22 1.5.2.1
@@ -1,5 +1,7 @@
#!/usr/bin/python
+# Copyright (C) 2006-2007 Red Hat, Inc.
+
import os, sys, string
sys.path.extend((
--- conga/luci/cluster/form-macros 2007/02/02 04:34:35 1.176
+++ conga/luci/cluster/form-macros 2007/02/05 21:27:22 1.176.2.1
@@ -549,7 +549,8 @@
class python: 'configTab' + (configTabNum == 1 and ' configTabActive' or '');
">General</a>
</li>
- <li class="configTab">
+ <li class="configTab"
+ tal:condition="not:clusterinfo/gulm">
<a tal:attributes="
href clusterinfo/fencedaemon_url | nothing;
class python: 'configTab' + (configTabNum == 2 and ' configTabActive' or '');
--- conga/luci/site/luci/Extensions/ModelBuilder.py 2007/02/01 20:49:08 1.19
+++ conga/luci/site/luci/Extensions/ModelBuilder.py 2007/02/05 21:27:22 1.19.2.1
@@ -277,15 +277,13 @@
obj_tree.addAttribute("name","alpha_cluster")
obj_tree.addAttribute("config_version","1")
- fdp = FenceDaemon()
- obj_tree.addChild(fdp)
- self.fence_daemon_ptr = fdp
cns = ClusterNodes()
obj_tree.addChild(cns)
self.clusternodes_ptr = cns
gulm = Gulm()
self.GULM_ptr = gulm
+ self.fence_daemon_ptr = None
obj_tree.addChild(gulm)
fds = FenceDevices()
@@ -701,7 +699,7 @@
self.resourcemanager_ptr.addChild(rcs)
self.resources_ptr = rcs
- if self.fence_daemon_ptr == None:
+ if self.GULM_ptr is None and self.fence_daemon_ptr is None:
fdp = FenceDaemon()
self.cluster_ptr.addChild(fdp)
self.fence_daemon_ptr = fdp
@@ -932,7 +930,7 @@
def check_fence_daemon(self):
- if self.fence_daemon_ptr == None:
+ if self.GULM_ptr is None and self.fence_daemon_ptr is None:
self.fence_daemon_ptr = FenceDaemon()
self.cluster_ptr.addChild(self.fence_daemon_ptr)
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2007/02/02 04:34:35 1.227
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2007/02/05 21:27:22 1.227.2.1
@@ -1151,6 +1151,9 @@
def validateFenceConfig(model, form):
errors = list()
+ if model.getGULMPtr() is not None:
+ return (False, {'errors': [ 'GULM clusters do not support fenced.' ]})
+
try:
post_fail_delay = int(form['post_fail_delay'])
if post_fail_delay < 0:
@@ -3509,23 +3512,24 @@
#-------------
#new cluster params - if rhel5
#-------------
- #Fence Daemon Props
- fencedaemon_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_FENCE_TAB
- clumap['fencedaemon_url'] = fencedaemon_url
- fdp = model.getFenceDaemonPtr()
- pjd = fdp.getAttribute('post_join_delay')
- if pjd is None:
- pjd = "6"
- pfd = fdp.getAttribute('post_fail_delay')
- if pfd is None:
- pfd = "0"
- #post join delay
- clumap['pjd'] = pjd
- #post fail delay
- clumap['pfd'] = pfd
gulm_ptr = model.getGULMPtr()
if not gulm_ptr:
+ #Fence Daemon Props
+ fencedaemon_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_FENCE_TAB
+ clumap['fencedaemon_url'] = fencedaemon_url
+ fdp = model.getFenceDaemonPtr()
+ pjd = fdp.getAttribute('post_join_delay')
+ if pjd is None:
+ pjd = "6"
+ pfd = fdp.getAttribute('post_fail_delay')
+ if pfd is None:
+ pfd = "0"
+ #post join delay
+ clumap['pjd'] = pjd
+ #post fail delay
+ clumap['pfd'] = pfd
+
#-------------
#if multicast
multicast_url = prop_baseurl + PROPERTIES_TAB + "=" + PROP_MCAST_TAB
@@ -4293,9 +4297,9 @@
dlist.append("ccsd")
if model.getGULMPtr() is None:
dlist.append("cman")
+ dlist.append("fenced")
else:
dlist.append("lock_gulmd")
- dlist.append("fenced")
dlist.append("rgmanager")
states = getDaemonStates(rc, dlist)
infohash['d_states'] = states
--- conga/luci/test/CGA_0200_Create_cluster.py 2006/12/14 20:01:40 1.2
+++ conga/luci/test/CGA_0200_Create_cluster.py 2007/02/05 21:27:22 1.2.2.1
@@ -67,9 +67,9 @@
# sel.type("__SYSTEM1:Passwd", "password")
# sel.click("//input[@value='Add Another Row']")
systemCounter = 0
- for systemName in CONGA_CLUSTER_SYSTEMS.keys():
+ for systemName in CONGA_SMALL_CLUSTER_SYSTEMS.keys():
sel.type("__SYSTEM" + str(systemCounter) + ":Addr", systemName)
- sel.type("__SYSTEM" + str(systemCounter) + ":Passwd", CONGA_CLUSTER_SYSTEMS[systemName])
+ sel.type("__SYSTEM" + str(systemCounter) + ":Passwd", CONGA_SMALL_CLUSTER_SYSTEMS[systemName])
systemCounter = systemCounter + 1
if (systemCounter > 2):
sel.click("//input[@value='Add Another Row']")
@@ -102,7 +102,7 @@
self.assertTrue (sel.is_text_present('Removed cluster "testCluster" successfully'))
# Delete the storage systems created when the cluster was created
- for systemName in CONGA_CLUSTER_SYSTEMS:
+ for systemName in CONGA_SMALL_CLUSTER_SYSTEMS:
deleteStorageSystem(sel, systemName)
# Validation - verify that the success message was displayed for each storage system
self.assertEqual("Do you really want to remove the following managed systems:\nStorage Systems:\n-" + systemName, sel.get_confirmation())
--- conga/luci/test/congaDemoTests.py 2006/12/14 20:01:40 1.8
+++ conga/luci/test/congaDemoTests.py 2007/02/05 21:27:22 1.8.2.1
@@ -122,13 +122,13 @@
# sel.type("__SYSTEM1:Passwd", "password")
# sel.click("//input[@value='Add Another Row']")
systemCounter = 0
- for systemName in CONGA_CLUSTER_SYSTEMS.keys():
+ for systemName in CONGA_SMALL_CLUSTER_SYSTEMS.keys():
sel.type("__SYSTEM" + str(systemCounter) + ":Addr", systemName)
- sel.type("__SYSTEM" + str(systemCounter) + ":Passwd", CONGA_CLUSTER_SYSTEMS[systemName])
+ sel.type("__SYSTEM" + str(systemCounter) + ":Passwd", CONGA_SMALL_CLUSTER_SYSTEMS[systemName])
systemCounter = systemCounter + 1
if (systemCounter > 2):
- sel.click("//input[@value='Add Another Row']")
- sel.click("document.adminform.rhn_dl[1]")
+ sel.click("//input[@value='Add another entry']")
+ #sel.click("document.adminform.rhn_dl[1]")
sel.click("Submit")
sel.wait_for_page_to_load(PAGE_DISPLAY_DELAY)
@@ -157,8 +157,8 @@
self.assertTrue (sel.is_text_present('Removed cluster "testCluster" successfully'))
# Delete the storage systems created when the cluster was created
- for systemName in CONGA_CLUSTER_SYSTEMS:
- deleteStorageSystem(sel, systemName)
+ for systemName in CONGA_SMALL_CLUSTER_SYSTEMS:
+ deleteStorageSystem(sel, systemName, self.logger)
# Validation - verify that the success message was displayed for each storage system
self.assertEqual("Do you really want to remove the following managed systems:\nStorage Systems:\n-" + systemName, sel.get_confirmation())
sel.wait_for_page_to_load(PAGE_DISPLAY_DELAY)
--- conga/luci/test/conga_Helpers.py 2006/12/14 20:01:40 1.10
+++ conga/luci/test/conga_Helpers.py 2007/02/05 21:27:22 1.10.2.1
@@ -45,20 +45,23 @@
# 20061130 - Node tng3-1 isn't booting, node tng3-4 is having some problems too
-CONGA_STORAGE_SYSTEMS = {'tng3-1.lab.msp.redhat.com':'password',
-# 'tng3-2.lab.msp.redhat.com':'password',
+CONGA_STORAGE_SYSTEMS = { 'tng3-1.lab.msp.redhat.com':'password',
+ 'tng3-2.lab.msp.redhat.com':'password',
'tng3-3.lab.msp.redhat.com':'password',
-# 'tng3-4.lab.msp.redhat.com':'password',
+ 'tng3-4.lab.msp.redhat.com':'password',
'tng3-5.lab.msp.redhat.com':'password'}
-#CONGA_CLUSTER_SYSTEMS = {'tng3-1.lab.msp.redhat.com':'password',
-# 'tng3-2.lab.msp.redhat.com':'password',
-# 'tng3-3.lab.msp.redhat.com':'password',
-# 'tng3-4.lab.msp.redhat.com':'password'}
-
-CONGA_CLUSTER_SYSTEMS = {'tng3-1.lab.msp.redhat.com':'password',
+CONGA_SMALL_CLUSTER_SYSTEMS = { 'tng3-1.lab.msp.redhat.com':'password',
+ 'tng3-2.lab.msp.redhat.com':'password',
+ 'tng3-3.lab.msp.redhat.com':'password',
'tng3-4.lab.msp.redhat.com':'password' }
+CONGA_LARGE_CLUSTER_SYSTEMS = {'tng3-1.lab.msp.redhat.com':'password',
+ 'tng3-2.lab.msp.redhat.com':'password',
+ 'tng3-3.lab.msp.redhat.com':'password',
+ 'tng3-4.lab.msp.redhat.com':'password',
+ 'tng3-5.lab.msp.redhat.com':'password'}
+
CONGA_USERS = {'user1':'user1_password',
'user2':'user2_password',
'user3':'user3_password',
@@ -71,14 +74,14 @@
'user10':'user10_password'}
CONGA_USERS_SYSTEMS = {'user1':'tng3-1.lab.msp.redhat.com',
-# 'user2':'tng3-2.lab.msp.redhat.com',
+ 'user2':'tng3-2.lab.msp.redhat.com',
'user3':'tng3-3.lab.msp.redhat.com',
-# 'user4':'tng3-4.lab.msp.redhat.com',
+ 'user4':'tng3-4.lab.msp.redhat.com',
'user5':'tng3-5.lab.msp.redhat.com',
'user6':'tng3-1.lab.msp.redhat.com',
-# 'user7':'tng3-2.lab.msp.redhat.com',
+ 'user7':'tng3-2.lab.msp.redhat.com',
'user8':'tng3-3.lab.msp.redhat.com',
-# 'user9':'tng3-4.lab.msp.redhat.com',
+ 'user9':'tng3-4.lab.msp.redhat.com',
'user10':'tng3-5.lab.msp.redhat.com'}
# Data used to verify the on-line help contents. The Dictionary contains
@@ -93,9 +96,9 @@
HELP_DICTIONARY = {'Conga User Manual':77,
'Introduction':200,
'Conga Architecture':225,
- 'Homebase Tab':4225,
- 'Cluster Tab':10121,
- 'Storage Tab':20452 }
+ 'Homebase Tab':4257,
+ 'Cluster Tab':12280,
+ 'Storage Tab':23833 }
def createStorageSystem(sel, systemName, systemPassword, theLogger):
"""Common code to create storage systems"""
--- conga/luci/test/conga_suite.py 2006/12/14 20:01:40 1.7
+++ conga/luci/test/conga_suite.py 2007/02/05 21:27:22 1.7.2.1
@@ -67,15 +67,15 @@
# Assemble the suite
suite = unittest.TestSuite()
-suite.addTest(congaDemoSuite)
+#suite.addTest(congaDemoSuite)
#suite.addTest(CGA_0160_Add_UserSuite)
suite.addTest(CGA_0170_Online_Documentation_Portlet_Suite)
#suite.addTest(CGA_0200_Create_cluster_Suite)
# Run the test suite
-# unittest.TextTestRunner(verbosity=2).run(suite)
+unittest.TextTestRunner(verbosity=2).run(suite)
-# Write all pyunit-generated messages to a log file
-output = open(CONGA_LOG, "w")
-unittest.TextTestRunner(output).run(suite)
-output.close()
+# Or, write all pyunit-generated messages to a log file
+#output = open(CONGA_LOG, "w")
+#unittest.TextTestRunner(output).run(suite)
+#output.close()
--- conga/luci/test/tests_README.txt 2006/12/11 21:37:22 1.1
+++ conga/luci/test/tests_README.txt 2007/02/05 21:27:22 1.1.2.1
@@ -3,9 +3,10 @@
(Rev 1.0 - 20061211, ldimaggi at redhat.com)
-1) Install Selenium Core
+1) Install Selenium Core and Selenium RC (Remote Control)
a. wget http://release.openqa.org/selenium-core/0.8.1/selenium-core-0.8.1.zip
- b. Unzip someplace - I used /opt
+ b. wget http://release.openqa.org/selenium-remote-control/0.9.0/selenium-remote-control-0.9.0.zip
+ c. Unzip someplace - I used /opt
2) Make sure Firefox 1.5 or newer is installed - I'm using 1.5.0.8
--- conga/luci/utils/luci_admin 2007/01/18 03:02:38 1.52
+++ conga/luci/utils/luci_admin 2007/02/05 21:27:22 1.52.2.1
@@ -1,5 +1,7 @@
#!/usr/bin/python
+# Copyright (C) 2006-2007 Red Hat, Inc.
+
import sys, os, stat, select, string, pwd
from sys import stderr, argv
import types
--- conga/luci/utils/luci_cleanup 2007/02/02 19:52:08 1.4.4.1
+++ conga/luci/utils/luci_cleanup 2007/02/05 21:27:22 1.4.4.2
@@ -1,5 +1,7 @@
#!/usr/bin/python
+# Copyright (C) 2006-2007 Red Hat, Inc.
+
import sys, os, pwd
import types
--- conga/luci/utils/luci_manage 2007/02/02 19:52:08 1.1.4.1
+++ conga/luci/utils/luci_manage 2007/02/05 21:27:22 1.1.4.2
@@ -1,5 +1,7 @@
#!/usr/bin/python
+# Copyright (C) 2006-2007 Red Hat, Inc.
+
import sys, os, pwd
import types
--- conga/ricci/docs/cluster_api.html 2006/10/05 17:38:01 1.4
+++ conga/ricci/docs/cluster_api.html 2007/02/05 21:27:22 1.4.4.1
@@ -8,7 +8,7 @@
<META NAME="CHANGED" CONTENT="20060620;15340700">
</HEAD>
<BODY LANG="en-US" DIR="LTR">
-<P>Cluster module manages Redhat Cluster Suite.
+<P>Cluster module manages Red Hat Cluster Suite.
</P>
<P>Module name: âclusterâ
</P>
--- conga/ricci/docs/modules.html 2006/06/05 19:54:40 1.4
+++ conga/ricci/docs/modules.html 2007/02/05 21:27:22 1.4.4.1
@@ -14,7 +14,7 @@
<P>Management Modules:</P>
<UL>
<LI><P><A HREF="cluster_api.html">Cluster Module</A> â Manages
- Redhat Cluster Suite</P>
+ Red Hat Cluster Suite</P>
<LI><P><A HREF="rpm_api.html">Rpm Module</A> â Manages rpm
packages. Allows retrieval of currently installed rpms, querying
repositories, and installation/upgrade of rpms using repositories
@@ -35,4 +35,4 @@
<P><BR><BR>
</P>
</BODY>
-</HTML>
\ No newline at end of file
+</HTML>
--- conga/ricci/docs/rpm_api.html 2006/10/12 19:13:11 1.2
+++ conga/ricci/docs/rpm_api.html 2007/02/05 21:27:22 1.2.4.1
@@ -36,10 +36,10 @@
installed; and upgraded, if already installed.
</P>
<P>There are couple of predefined rpm sets: <BR>- âCluster Baseâ
-- base infrastructure of Redhat Cluster Suite (currently ccs, cman,
+- base infrastructure of Red Hat Cluster Suite (currently ccs, cman,
dlm, fence, and respective kernel-... rpms) <BR>- âCluster Base -
-Gulmâ - base infrastructure of Redhat Cluster Suite using Gulm lock
-manager (currently ccs, gulm, fence and respective kernel-... rpms)
+Gulmâ - base infrastructure of Red Hat Cluster Suite using GULM lock
+manager (currently ccs, gulm, and respective kernel-... rpms)
<BR>- âCluster Service Managerâ - (currently rgmanager, magma,
magma-plugins) <BR>- âClustered Storageâ - shared storage
(currently GFS, lvm2-cluster and respective kernel-... rpms) <BR>-
--- conga/ricci/docs/service_api.html 2006/04/12 15:47:09 1.1
+++ conga/ricci/docs/service_api.html 2007/02/05 21:27:22 1.1.4.1
@@ -30,10 +30,10 @@
running="true/false"/><BR>âenabledâ - enabled on
boot; ârunningâ - currently running.</P>
<P>There are couple of predefined service sets: <BR>- âCluster
-Baseâ - base infrastructure of Redhat Cluster Suite (currently
+Baseâ - base infrastructure of Red Hat Cluster Suite (currently
ccsd, cman, fenced) <BR>- âCluster Base - Gulmâ - base
-infrastructure of Redhat Cluster Suite using Gulm lock manager
-(currently ccsd, lock_gulmd, fenced) <BR>- âCluster Service
+infrastructure of Red Hat Cluster Suite using GULM lock manager
+(currently ccsd, lock_gulmd) <BR>- âCluster Service
Managerâ - (currently rgmanager) <BR>- âClustered Storageâ -
shared storage (currently clvmd, gfs)<BR>- âLinux Virtual Serverâ
- (currently pulse, piranha-gui)</P>
@@ -135,4 +135,4 @@
<P ALIGN=LEFT><BR><BR>
</P>
</BODY>
-</HTML>
\ No newline at end of file
+</HTML>
--- conga/ricci/modules/cluster/ClusterStatus.cpp 2006/10/24 14:56:56 1.16
+++ conga/ricci/modules/cluster/ClusterStatus.cpp 2007/02/05 21:27:22 1.16.2.1
@@ -157,10 +157,11 @@
{
XMLObject cluster_conf(ClusterConf::get()); // bailout if cluster.conf not present
XMLObject stat = status();
+ bool cman_cluster = ClusterConf::is_cman(cluster_conf)
if (stat.get_attr("cluster_version") == "4") {
run_initd("ccsd", true, false);
- if (ClusterConf::is_cman(cluster_conf))
+ if (cman_cluster)
try {
run_initd("cman", true, true);
} catch ( ... ) {
@@ -197,7 +198,8 @@
if (use_qdisk)
run_initd("qdiskd", true, false);
- run_initd("fenced", true, false);
+ if (cman_cluster)
+ run_initd("fenced", true, false);
run_initd("clvmd", true, false);
run_initd("gfs", true, false);
run_initd("rgmanager", true, true);
@@ -205,18 +207,19 @@
// enable them on boot
run_chkconfig("ccsd", true);
- if (ClusterConf::is_cman(cluster_conf)) {
+ if (cman_cluster) {
run_chkconfig("cman", true);
run_chkconfig("lock_gulmd", false);
+ run_chkconfig("fenced", true);
} else {
run_chkconfig("cman", false);
+ run_chkconfig("fenced", false);
run_chkconfig("lock_gulmd", true);
}
if (use_qdisk)
run_chkconfig("qdiskd", true);
else
run_chkconfig("qdiskd", false);
- run_chkconfig("fenced", true);
run_chkconfig("clvmd", true);
run_chkconfig("gfs", true);
run_chkconfig("rgmanager", true);
--- conga/ricci/modules/service/ServiceManager.cpp 2006/10/25 18:47:16 1.7
+++ conga/ricci/modules/service/ServiceManager.cpp 2007/02/05 21:27:23 1.7.2.1
@@ -444,7 +444,6 @@
s = ServiceSet(name, descr);
servs.push_back("ccsd");
servs.push_back("lock_gulmd");
- servs.push_back("fenced");
if (populate_set(s, servs))
sets[name] = s;
}
More information about the Cluster-devel
mailing list