[Libvirt-cim] [PATCH 5 of 5] [TEST] Remove diskpool creation from RAFP, RP, and SDC tests
Deepti B Kalakeri
deeptik at linux.vnet.ibm.com
Mon Sep 8 12:38:33 UTC 2008
+1 for me.
Kaitlin Rupert wrote:
> # HG changeset patch
> # User Kaitlin Rupert <karupert at us.ibm.com>
> # Date 1220654590 25200
> # Node ID 3e14187adcadb92c530131368972b968ace5bc3b
> # Parent 553bf81e676d9cdb9f752614aa6b7b60652b6802
> [TEST] Remove diskpool creation from RAFP, RP, and SDC tests.
>
> The diskpool is now being created before the tests are run.
>
> Signed-off-by: Kaitlin Rupert <karupert at us.ibm.com>
>
> diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py
> --- a/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py Fri Sep 05 15:43:10 2008 -0700
> +++ b/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py Fri Sep 05 15:43:10 2008 -0700
> @@ -31,10 +31,8 @@
> from XenKvmLib.vxml import get_class
> from CimTest import Globals
> from CimTest.Globals import logger
> -from XenKvmLib.const import do_main
> +from XenKvmLib.const import do_main, default_pool_name, default_network_name
> from CimTest.ReturnCodes import PASS, FAIL, XFAIL
> -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf
> -from XenKvmLib.const import default_network_name
>
> sup_types = ['Xen', 'XenFV', 'KVM', 'LXC']
>
> @@ -120,11 +118,7 @@
> vsxml.undefine(options.ip)
> return status
>
> - status, diskid = create_diskpool_conf(options.ip, options.virt)
> - if status != PASS:
> - cleanup_restore(options.ip, options.virt)
> - vsxml.undefine(options.ip)
> - return status
> + diskp_id = "DiskPool/%s" % default_pool_name
>
> if options.virt == 'LXC':
> pool = { "MemoryPool" : {'InstanceID' : "MemoryPool/0"} }
> @@ -132,7 +126,7 @@
> else:
> pool = { "MemoryPool" : {'InstanceID' : "MemoryPool/0"},
> "ProcessorPool" : {'InstanceID' : "ProcessorPool/0"},
> - "DiskPool" : {'InstanceID' : diskid},
> + "DiskPool" : {'InstanceID' : diskp_id},
> "NetworkPool" : {'InstanceID' : "NetworkPool/%s" \
> % test_npool }}
> rasd = { "MemoryPool" : "%s/mem" % test_dom,
> @@ -150,7 +144,6 @@
> if status != PASS:
> break
>
> - cleanup_restore(options.ip, options.virt)
> vsxml.undefine(options.ip)
> return status
>
> diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py
> --- a/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py Fri Sep 05 15:43:10 2008 -0700
> +++ b/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py Fri Sep 05 15:43:10 2008 -0700
> @@ -31,11 +31,9 @@
> from XenKvmLib.classes import get_typed_class
> from CimTest import Globals
> from CimTest.Globals import logger
> -from XenKvmLib.const import do_main
> +from XenKvmLib.const import do_main, default_pool_name, default_network_name
> from CimTest.ReturnCodes import PASS, FAIL
> from XenKvmLib import enumclass
> -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf
> -from XenKvmLib.const import default_network_name
>
> sup_types = ['Xen', 'XenFV', 'KVM', 'LXC']
> test_dom = "RAFP_dom"
> @@ -87,7 +85,7 @@
> }
>
> disk = { 'rasd_id' : '%s/%s' % (test_dom, test_disk),
> - 'pool_id' : diskid
> + 'pool_id' : 'DiskPool/%s' % default_pool_name
> }
>
> if virt == 'LXC':
> @@ -170,12 +168,7 @@
> vsxml.undefine(server)
> return status
>
> - status, diskid = create_diskpool_conf(server, virt)
> - if status != PASS:
> - vsxml.undefine(server)
> - return status
> -
> - cn_id_list = init_list(test_disk, diskid, options.virt)
> + cn_id_list = init_list(test_disk, default_pool_name, options.virt)
>
> for rasd_cn, id_info in cn_id_list.iteritems():
> status = get_rasdinst_verify_pool_from_RAFP(server, virt, vsxml,
> @@ -183,7 +176,6 @@
> if status != PASS:
> return status
>
> - cleanup_restore(server, virt)
> vsxml.undefine(server)
> return status
>
> diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/ResourcePool/01_enum.py
> --- a/suites/libvirt-cim/cimtest/ResourcePool/01_enum.py Fri Sep 05 15:43:10 2008 -0700
> +++ b/suites/libvirt-cim/cimtest/ResourcePool/01_enum.py Fri Sep 05 15:43:10 2008 -0700
> @@ -32,28 +32,20 @@
> from XenKvmLib import vxml
> from CimTest import Globals
> from CimTest.Globals import logger
> -from XenKvmLib.const import do_main
> +from XenKvmLib.const import do_main, default_pool_name
> from CimTest.ReturnCodes import PASS, FAIL, SKIP
> from VirtLib.live import net_list
> from XenKvmLib.vsms import RASD_TYPE_PROC, RASD_TYPE_MEM, RASD_TYPE_NET_ETHER, \
> RASD_TYPE_DISK
> -from XenKvmLib.common_util import cleanup_restore, test_dpath, \
> -create_diskpool_file
>
> sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
>
> -diskid = "%s/%s" % ("DiskPool", test_dpath)
> dp_cn = 'DiskPool'
> mp_cn = 'MemoryPool'
> pp_cn = 'ProcessorPool'
> np_cn = 'NetworkPool'
>
> def init_list(server, virt):
> - # Verify DiskPool on machine
> - status = create_diskpool_file()
> - if status != PASS:
> - return status, None
> -
> # Verify the Virtual network on machine
> vir_network = net_list(server, virt)
> if len(vir_network) > 0:
> @@ -68,7 +60,7 @@
> test_network)
> return SKIP, None
>
> - disk_instid = '%s/%s' % (dp_cn, test_dpath)
> + disk_instid = '%s/%s' % (dp_cn, default_pool_name)
> net_instid = '%s/%s' % (np_cn, test_network)
> mem_instid = '%s/0' % mp_cn
> proc_instid = '%s/0' % pp_cn
> @@ -78,7 +70,7 @@
> get_typed_class(virt, dp_cn) : [disk_instid, RASD_TYPE_DISK],
> get_typed_class(virt, np_cn) : [net_instid, RASD_TYPE_NET_ETHER]
> }
> - return status, pool_list
> + return PASS, pool_list
>
> def print_error(fieldname="", ret_value="", exp_value=""):
> logger.error("%s Mismatch", fieldname)
> @@ -113,6 +105,7 @@
> virt = "Xen"
> else:
> virt = main.options.virt
> +
> status, pool_list = init_list(ip, virt)
> if status != PASS:
> logger.error("Failed to initialise the list")
> @@ -149,7 +142,6 @@
> return FAIL
> status = verify_fields(pool_list, netpool, get_typed_class(virt, np_cn))
>
> - cleanup_restore(ip, virt)
> return status
>
> if __name__ == "__main__":
> diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/ResourcePool/02_rp_gi_errors.py
> --- a/suites/libvirt-cim/cimtest/ResourcePool/02_rp_gi_errors.py Fri Sep 05 15:43:10 2008 -0700
> +++ b/suites/libvirt-cim/cimtest/ResourcePool/02_rp_gi_errors.py Fri Sep 05 15:43:10 2008 -0700
> @@ -36,9 +36,7 @@
> from distutils.file_util import move_file
> from CimTest.ReturnCodes import PASS, SKIP
> from CimTest.Globals import logger, CIM_USER, CIM_PASS, CIM_NS
> -from XenKvmLib.const import do_main
> -from XenKvmLib.common_util import cleanup_restore, test_dpath, \
> -create_diskpool_file
> +from XenKvmLib.const import do_main, default_pool_name
>
> sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
>
> @@ -95,11 +93,6 @@
> conn = assoc.myWBEMConnection('http://%s' % ip, (CIM_USER, CIM_PASS),
> CIM_NS)
>
> - # Verify DiskPool on machine
> - status = create_diskpool_file()
> - if status != PASS:
> - return status
> -
> # Verify the Virtual Network on the machine.
> vir_network = net_list(ip, virt)
> if len(vir_network) > 0:
> @@ -112,7 +105,6 @@
> if not ret:
> logger.error("Failed to create the Virtual Network '%s'",
> test_network)
> - cleanup_restore(ip, virt)
> return SKIP
> netid = "%s/%s" % ("NetworkPool", test_network)
>
> @@ -134,16 +126,13 @@
> ret_value = err_invalid_instid_keyname(conn, cn, instid)
> if ret_value != PASS:
> logger.error("------ FAILED: Invalid InstanceID Key Name.------")
> - cleanup_restore(ip, virt)
> return ret_value
>
> ret_value = err_invalid_instid_keyvalue(conn, cn)
> if ret_value != PASS:
> logger.error("------ FAILED: Invalid InstanceID Key Value.------")
> - cleanup_restore(ip, virt)
> return ret_value
>
> - cleanup_restore(ip, virt)
> return PASS
>
> if __name__ == "__main__":
> diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py
> --- a/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Fri Sep 05 15:43:10 2008 -0700
> +++ b/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Fri Sep 05 15:43:10 2008 -0700
> @@ -60,17 +60,16 @@
> from VirtLib.live import virsh_version
> from CimTest.ReturnCodes import PASS, FAIL, SKIP
> from CimTest.Globals import logger, CIM_ERROR_GETINSTANCE, CIM_ERROR_ASSOCIATORS
> -from XenKvmLib.const import do_main
> +from XenKvmLib.const import do_main, default_pool_name, default_network_name
> from XenKvmLib.classes import get_typed_class
> -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf, \
> - print_field_error
> -from XenKvmLib.const import default_network_name
> +from XenKvmLib.common_util import print_field_error
>
> platform_sup = ['Xen', 'KVM', 'XenFV', 'LXC']
>
> -memid = "%s/%s" % ("MemoryPool", 0)
> -procid = "%s/%s" % ("ProcessorPool", 0)
> -test_npool = default_network_name
> +memid = "MemoryPool/0"
> +procid = "ProcessorPool/0"
> +netid = "NetworkPool/%s" % default_network_name
> +diskid = "DiskPool/%s" % default_pool_name
>
> def get_or_bail(virt, ip, id, pool_class):
> """
> @@ -83,7 +82,6 @@
> except Exception, detail:
> logger.error(CIM_ERROR_GETINSTANCE, '%s' % pool_class)
> logger.error("Exception: %s", detail)
> - cleanup_restore(ip, virt)
> sys.exit(FAIL)
> return instance
>
> @@ -132,20 +130,14 @@
> dpool = npool = mpool = ppool = None
> pool_set = []
> try :
> - status, diskid = create_diskpool_conf(server, virt)
> - if status != PASS:
> - return status, pool_set, None
> -
> - dpool = get_pool_info(virt, server, diskid, poolname="DiskPool")
> + dpool = get_pool_info(virt, server, diskid, poolname="DiskPool")
> mpool = get_pool_info(virt, server, memid, poolname= "MemoryPool")
> ppool = get_pool_info(virt, server, procid, poolname= "ProcessorPool")
>
> - netid = "%s/%s" % ("NetworkPool", test_npool)
> npool = get_pool_info(virt, server, netid, poolname= "NetworkPool")
> if dpool.InstanceID == None or mpool.InstanceID == None \
> or npool.InstanceID == None or ppool.InstanceID == None:
> logger.error("Get pool None")
> - cleanup_restore(server, virt)
> return FAIL
> else:
> pool_set = [dpool, mpool, ppool, npool]
> @@ -204,12 +196,10 @@
>
> status, pool = get_pool_details(virt, server)
> if status != PASS:
> - cleanup_restore(server, virt)
> return FAIL
>
> status = verify_sdc_with_ac(virt, server, pool)
>
> - cleanup_restore(server, virt)
> return status
>
> if __name__ == "__main__":
>
> _______________________________________________
> Libvirt-cim mailing list
> Libvirt-cim at redhat.com
> https://www.redhat.com/mailman/listinfo/libvirt-cim
>
More information about the Libvirt-cim
mailing list