[Libvirt-cim] [PATCH] Remove revision branches

Guo Lian Yun yunguol at cn.ibm.com
Mon Jun 16 07:50:26 UTC 2008


libvirt-cim-bounces at redhat.com wrote on 2008-06-11 05:33:03:

> # HG changeset patch
> # User Kaitlin Rupert <karupert at us.ibm.com>
> # Date 1213121071 25200
> # Node ID 58f7c9cf54677356b88b30e2b5a5db9685a15604
> # Parent  fdc71e98f2d1417fcd50bb8b2b6a91300a8296c4
> Remove revision branches.
> 
> These brances were needed to distinguish the behavior between the 0.
> 3.0 release and the current head of the mercurial tree.  Now that 
> the cimtest tree has been tagged, cimtest release_0_1 should 
> correspond with the functionality in libvirt-cim 0.4.0.
> 
> Signed-off-by: Kaitlin Rupert <karupert at us.ibm.com>
> 
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/AllocationCapabilities/02_alloccap_gi_errs.py
> --- a/suites/libvirt-
> cim/cimtest/AllocationCapabilities/02_alloccap_gi_errs.py   Fri May 
> 23 10:39:01 2008 -0700
> +++ b/suites/libvirt-
> cim/cimtest/AllocationCapabilities/02_alloccap_gi_errs.py   Tue Jun 
> 10 11:04:31 2008 -0700
> @@ -62,14 +62,12 @@
>  from XenKvmLib.test_doms import create_vnet
>  from CimTest.Globals import do_main, platform_sup
>  from XenKvmLib.classes import get_typed_class
> -from XenKvmLib.const import CIM_REV
>  from XenKvmLib.common_util import cleanup_restore, test_dpath, \
>  create_diskpool_file
> 
>  diskid = "%s/%s" % ("DiskPool", test_dpath)
>  memid = "%s/%s" % ("MemoryPool", 0)
>  procid = "%s/%s" % ("ProcessorPool", 0)
> -rev = 463
> 
>  sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
>  @do_main(sup_types) 
> @@ -112,8 +110,6 @@
>                                    'desc' : 'No InstanceID specified' },
>              "invalid_keyvalue" : { 'rc' : pywbem.CIM_ERR_NOT_FOUND,
>                                     'desc' : 'Instance not found' }}
> -    if CIM_REV < rev:
> -        exp['invalid_keyvalue']['desc'] = 'Object could not be found'
> 
>      ret_value = try_getinstance(conn, classname, keys, 
field_name=field,
> 
> expr_values=exp['invalid_keyvalue'], bug_no="")
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/ElementAllocatedFromPool/03_reverse_errs.py
> --- a/suites/libvirt-
> cim/cimtest/ElementAllocatedFromPool/03_reverse_errs.py   Fri May 23
> 10:39:01 2008 -0700
> +++ b/suites/libvirt-
> cim/cimtest/ElementAllocatedFromPool/03_reverse_errs.py   Tue Jun 10
> 11:04:31 2008 -0700
> @@ -34,7 +34,6 @@
>  from CimTest import Globals
>  from XenKvmLib.test_doms import destroy_and_undefine_all
>  from XenKvmLib.common_util import try_assoc
> -from XenKvmLib.const import CIM_REV
>  from CimTest.ReturnCodes import PASS, FAIL 
>  from CimTest.Globals import do_main, platform_sup, logger
>  from XenKvmLib.vxml import get_class
> @@ -60,7 +59,6 @@
>                                          'desc'  : 'No such instance'
>                                      } 
>                }
> -libvirt_rev = 393
> 
>  def err_invalid_ccname():
>  # This is used to verify the that the 
> @@ -157,10 +155,6 @@
>                get_typed_class(virt, "ProcessorPool")
>               ]
> 
> -    if CIM_REV < libvirt_rev:
> -        expr_values['invalid_keyvalue']['desc'] = 'Invalid 
> InstanceID or unsupported pool type'
> -        expr_values['invalid_keyvalue']['rc'] = pywbem.CIM_ERR_FAILED
> -
>      for classname in sorted(lelist):
>          keys = { "InstanceID" : "InvalidKeyValue" }
>          field = "InstanceID_KeyValue" 
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/ElementCapabilities/01_forward.py
> --- a/suites/libvirt-cim/cimtest/ElementCapabilities/01_forward.py 
> Fri May 23 10:39:01 2008 -0700
> +++ b/suites/libvirt-cim/cimtest/ElementCapabilities/01_forward.py 
> Tue Jun 10 11:04:31 2008 -0700
> @@ -30,11 +30,9 @@
>  from CimTest.Globals import do_main, logger, CIM_ERROR_ASSOCIATORNAMES, 
\
>  CIM_ERROR_ENUMERATE
>  from CimTest.ReturnCodes import PASS, FAIL, SKIP
> -from XenKvmLib.const import CIM_REV
>  from XenKvmLib.enumclass import enumerate
> 
>  sup_types = ['Xen', 'XenFV', 'KVM', 'LXC']
> -ac_to_pool_version = 561
> 
>  def append_to_list(server, virt, poolname, valid_elc_id):
>      keys_list = ['InstanceID']
> @@ -87,11 +85,10 @@
>      valid_elc_id = ["ManagementCapabilities", 
>                      "MigrationCapabilities"]
> 
> -    if CIM_REV >= ac_to_pool_version:
> -        valid_elc_name.append(get_typed_class(virt, 
> "AllocationCapabilities"))
> -        status, valid_elc_id = set_pool_info(server, virt, 
valid_elc_id)
> -        if status != PASS:
> -            return status
> +    valid_elc_name.append(get_typed_class(virt, 
"AllocationCapabilities"))
> +    status, valid_elc_id = set_pool_info(server, virt, valid_elc_id)
> +    if status != PASS:
> +        return status
> 
>      if len(elc) == 0:
>          logger.error("ElementCapabilities association failed, 
> excepted at least one instance")
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/HostSystem/02_hostsystem_to_rasd.py
> --- a/suites/libvirt-cim/cimtest/HostSystem/02_hostsystem_to_rasd.py
> Fri May 23 10:39:01 2008 -0700
> +++ b/suites/libvirt-cim/cimtest/HostSystem/02_hostsystem_to_rasd.py
> Tue Jun 10 11:04:31 2008 -0700
> @@ -58,7 +58,6 @@
>  from CimTest.ReturnCodes import PASS, FAIL
>  from XenKvmLib.rasd import verify_procrasd_values, 
verify_netrasd_values, \
>  verify_diskrasd_values, verify_memrasd_values
> -from XenKvmLib.const import CIM_REV
> 
>  sup_types = ['Xen', 'KVM', 'XenFV']
> 
> @@ -67,8 +66,6 @@
>  test_vcpus  = 1
>  test_mem    = 128
>  test_mac    = "00:11:22:33:44:aa"
> -rev = 529
> -proc_instid_rev = 590
> 
>  def init_list(vsxml, virt="Xen"):
>      """
> @@ -103,11 +100,6 @@
>                                                "VirtualQuantity" : 
> (test_mem * 1024), 
>                                             }
>                    } 
> -    if CIM_REV < rev:
> -       rasd_values[mem_cn]['AllocationUnits'] = "MegaBytes"
> -
> -    if CIM_REV < proc_instid_rev:
> -       rasd_values[proc_cn]['InstanceID'] = '%s/%s' %(test_dom, 0)
> 
>      return rasd_values
> 
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/HostSystem/03_hs_to_settdefcap.py
> --- a/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py 
> Fri May 23 10:39:01 2008 -0700
> +++ b/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py 
> Tue Jun 10 11:04:31 2008 -0700
> @@ -49,12 +49,10 @@
>  from CimTest.ReturnCodes import PASS, FAIL
>  from XenKvmLib.test_xml import testxml
>  from XenKvmLib.test_doms import destroy_and_undefine_all
> -from XenKvmLib.const import CIM_REV
> 
>  sup_types = ['Xen', 'KVM', 'XenFV']
>  test_dom = "domgst"
>  test_vcpus = 1
> -libvirtcim_sdc_rasd_rev = 571
> 
>  def setup_env(server, virt="Xen"):
>      status = PASS
> @@ -243,26 +241,6 @@
>              logger.error("In ResourceType for %s " % rt)
>              return FAIL
> 
> -        # The following properties have been removed in the patchset 
571
> -        # but is present in the rpm libvirt-cim and hence retained it.
> -
> -        if CIM_REV < libvirtcim_sdc_rasd_rev:
> -            ppolicy = inst['PropertyPolicy']
> -            if ppolicy != 0 and ppolicy != 1:
> -                logger.error("In PropertyPolicy for %s " % ppolicy)
> -                return FAIL
> -
> -            vrole  = inst['ValueRole']
> -            if vrole < 0 or vrole > 4:
> -                logger.error("In ValueRole %s " % vrole)
> -                return FAIL
> -
> -            insid  = inst['InstanceID']
> -            vrange = rangelist[insid]
> -            if vrange != inst['ValueRange']:
> -                logger.error("In ValueRange for %s " % vrange)
> -                return FAIL
> -
>      except Exception, detail:
>          logger.error("Error checking RASD attribute values %s" % 
detail)
>          return FAIL
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/Memory/01_memory.py
> --- a/suites/libvirt-cim/cimtest/Memory/01_memory.py   Fri May 23 
> 10:39:01 2008 -0700
> +++ b/suites/libvirt-cim/cimtest/Memory/01_memory.py   Tue Jun 10 
> 11:04:31 2008 -0700
> @@ -31,13 +31,11 @@
>  from XenKvmLib.vxml import XenXML, KVMXML, get_class
>  from CimTest.Globals import logger
>  from CimTest.Globals import do_main
> -from XenKvmLib.const import CIM_REV
> 
>  sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
> 
>  test_dom = "test_domain"
>  mem = 256 #MB
> -mem_change_version=585
> 
>  @do_main(sup_types)
>  def main():
> @@ -45,10 +43,7 @@
> 
>      vsxml = get_class(options.virt)(test_dom, mem)
>      vsxml.define(options.ip)
> -    if CIM_REV >= mem_change_version: 
> -        alloc_mem = int(vsxml.xml_get_mem())
> -    else:
> -        alloc_mem = int(vsxml.xml_get_mem())/1024
> +    alloc_mem = int(vsxml.xml_get_mem())
> 
>      devid = "%s/mem" % test_dom
>      key_list = { 'DeviceID' : devid,
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/RASD/01_verify_rasd_fields.py
> --- a/suites/libvirt-cim/cimtest/RASD/01_verify_rasd_fields.py   Fri
> May 23 10:39:01 2008 -0700
> +++ b/suites/libvirt-cim/cimtest/RASD/01_verify_rasd_fields.py   Tue
> Jun 10 11:04:31 2008 -0700
> @@ -54,7 +54,6 @@
>  from XenKvmLib import vxml 
>  from XenKvmLib.classes import get_typed_class
>  from XenKvmLib import rasd 
> -from XenKvmLib.const import CIM_REV
>  from CimTest.Globals import logger
>  from CimTest.ReturnCodes import PASS, FAIL
> 
> @@ -64,8 +63,6 @@
>  test_vcpus  = 1
>  test_mem    = 128
>  test_mac    = "00:11:22:33:44:aa"
> -prev = 531
> -mrev = 529
> 
>  def init_list(xml, disk, virt="Xen"):
>      """
> @@ -93,10 +90,6 @@
>              "AllocationUnits" : "KiloBytes",
>              "VirtualQuantity" : (test_mem * 1024),
>              "CreationClassName" : get_typed_class(virt, rasd.masd_cn)}
> -    if CIM_REV < prev:
> -        procrasd['InstanceID'] = '%s/0' % test_dom
> -    if CIM_REV < mrev:
> -        memrasd['AllocationUnits'] = 'MegaBytes'
> 
>      return procrasd, netrasd, diskrasd, memrasd
> 
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/RASD/02_enum.py
> --- a/suites/libvirt-cim/cimtest/RASD/02_enum.py   Fri May 23 10:39:
> 01 2008 -0700
> +++ b/suites/libvirt-cim/cimtest/RASD/02_enum.py   Tue Jun 10 11:04:
> 31 2008 -0700
> @@ -34,7 +34,6 @@
>  from XenKvmLib.vxml import get_class
>  from XenKvmLib.classes import get_typed_class
>  from XenKvmLib import rasd 
> -from XenKvmLib.const import CIM_REV
>  from CimTest.Globals import logger
>  from CimTest.ReturnCodes import PASS, FAIL
> 
> @@ -44,8 +43,6 @@
>  test_vcpus  = 1
>  test_mem    = 128
>  test_mac    = "00:11:22:33:44:aa"
> -prev = 531
> -mrev = 529
> 
>  def init_list(virt):
>      """
> @@ -73,10 +70,6 @@
>              "AllocationUnits" : "KiloBytes",
>              "VirtualQuantity" : (test_mem * 1024),
>              "CreationClassName" : get_typed_class(virt, rasd.masd_cn)}
> -    if CIM_REV < prev:
> -        proc['InstanceID'] = '%s/0' % test_dom
> -    if CIM_REV < mrev:
> -        mem['AllocationUnits'] = 'MegaBytes'
> 
>      return proc, net, disk, mem
> 
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/ReferencedProfile/01_verify_refprof.py
> --- a/suites/libvirt-
> cim/cimtest/ReferencedProfile/01_verify_refprof.py   Fri May 23 10:
> 39:01 2008 -0700
> +++ b/suites/libvirt-
> cim/cimtest/ReferencedProfile/01_verify_refprof.py   Tue Jun 10 11:
> 04:31 2008 -0700
> @@ -79,12 +79,9 @@
>  from CimTest.Globals import do_main
>  from XenKvmLib.classes import get_typed_class
>  from CimTest.ReturnCodes import FAIL, PASS, SKIP
> -from XenKvmLib.const import CIM_REV
>  from XenKvmLib.common_util import print_field_error
> 
>  sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
> -
> -libvirtcim_rev = 501
> 
>  def get_proflist():
>      proflist = []
> @@ -167,12 +164,6 @@
>      server = options.ip
>      status = PASS
> 
> -    # Referenced Profile was introduced as part of changeset 501 
> -    # and is not available in the libvirt-cim rpm, hence skipping tc
> -    # if CIM_REV  501
> -    if CIM_REV < libvirtcim_rev:
> -        return SKIP
> -
>      prev_namespace = Globals.CIM_NS
>      Globals.CIM_NS = 'root/interop'
>      reg_classname = get_typed_class(virt, 'RegisteredProfile')
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/ReferencedProfile/02_refprofile_errs.py
> --- a/suites/libvirt-
> cim/cimtest/ReferencedProfile/02_refprofile_errs.py   Fri May 23 10:
> 39:01 2008 -0700
> +++ b/suites/libvirt-
> cim/cimtest/ReferencedProfile/02_refprofile_errs.py   Tue Jun 10 11:
> 04:31 2008 -0700
> @@ -60,10 +60,8 @@
>  from XenKvmLib.classes import get_typed_class
>  from CimTest.ReturnCodes import FAIL, PASS, SKIP
>  from XenKvmLib.common_util import try_assoc
> -from XenKvmLib.const import CIM_REV
> 
>  sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
> -libvirtcim_rev = 501
> 
>  expr_values = {
>                  'INVALID_Instid_KeyName'  :  {
> @@ -124,11 +122,6 @@
>      virt = options.virt
>      server = options.ip
>      status = PASS
> -    # Referenced Profile was introduced as part of changeset 501 
> -    # and is not available in the libvirt-cim rpm, hence skipping tc
> -    # if CIM_REV  501
> -    if CIM_REV < libvirtcim_rev:
> -        return SKIP
> 
>      prev_namespace = Globals.CIM_NS
>      Globals.CIM_NS = 'root/interop'
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/ResourceAllocationFromPool/03_forward_errs.py
> --- a/suites/libvirt-
> cim/cimtest/ResourceAllocationFromPool/03_forward_errs.py   Fri May 
> 23 10:39:01 2008 -0700
> +++ b/suites/libvirt-
> cim/cimtest/ResourceAllocationFromPool/03_forward_errs.py   Tue Jun 
> 10 11:04:31 2008 -0700
> @@ -28,11 +28,9 @@
>  from CimTest import Globals
>  from CimTest.Globals import logger, do_main
>  from CimTest.ReturnCodes import PASS, FAIL, XFAIL
> -from XenKvmLib.const import CIM_REV
> 
>  sup_types = ['Xen', 'XenFV', 'KVM']
> 
> -rev = 466
> 
>  @do_main(sup_types)
>  def main():
> @@ -49,9 +47,6 @@
> 
>      exp_rc = 6 #CIM_ERR_NOT_FOUND
>      exp_desc = "No such instance (wrong) - resource pool type mismatch"
> -    if CIM_REV < rev:
> -        exp_rc = 1 # CIM_ERR_FAILED
> -        exp_desc = "Invalid InstanceID or unsupported pool type"
> 
>      for k, v in poollist.items():
>          instanceref = CIMInstanceName(k, 
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/SettingsDefine/01_forward.py
> --- a/suites/libvirt-cim/cimtest/SettingsDefine/01_forward.py   Fri 
> May 23 10:39:01 2008 -0700
> +++ b/suites/libvirt-cim/cimtest/SettingsDefine/01_forward.py   Tue 
> Jun 10 11:04:31 2008 -0700
> @@ -32,7 +32,6 @@
>  from XenKvmLib import assoc
>  from XenKvmLib import devices
>  from XenKvmLib.classes import get_typed_class
> -from XenKvmLib.const import CIM_REV
>  from CimTest import Globals
>  from CimTest.Globals import do_main
>  from CimTest.ReturnCodes import PASS, FAIL 
> @@ -42,8 +41,6 @@
>  test_dom = "domu1"
>  test_mac = "00:11:22:33:44:aa"
>  test_vcpus = 1
> -proc_instid_rev = 590
> -
> 
>  def print_error(cn, detail):
>      Globals.logger.error(Globals.CIM_ERROR_GETINSTANCE, cn)
> @@ -96,7 +93,7 @@
>      for cn in cn_id.keys():
>          key_list = get_keys(cn, cn_id[cn], 'ComputerSystem', 
options.virt)
> 
> -        if CIM_REV >= proc_instid_rev and cn == 'Processor':
> +        if cn == 'Processor':
>              exp_inst_id_val[cn] = "%s/%s" % (test_dom, "proc") 
>          else:
>              exp_inst_id_val[cn] = key_list['DeviceID']
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/SettingsDefine/02_reverse.py
> --- a/suites/libvirt-cim/cimtest/SettingsDefine/02_reverse.py   Fri 
> May 23 10:39:01 2008 -0700
> +++ b/suites/libvirt-cim/cimtest/SettingsDefine/02_reverse.py   Tue 
> Jun 10 11:04:31 2008 -0700
> @@ -59,8 +59,6 @@
>  from XenKvmLib import vxml
>  from XenKvmLib.classes import get_typed_class, get_class_basename
>  from XenKvmLib.rasd import InstId_err
> -from XenKvmLib.const import CIM_REV
> -
> 
>  sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
> 
> @@ -68,9 +66,6 @@
>  test_vcpus  = 1
>  test_mem    = 128
>  test_mac    = "00:11:22:33:44:aa"
> -proc_instid_rev = 590
> -
> -
> 
>  def call_assoc(ip, inst, exp_id, ccn, virt):
>      if inst['InstanceID'] != exp_id:
> @@ -196,9 +191,6 @@
>              'DiskResourceAllocationSettingData' : '%s/%s' % 
> (test_dom, test_disk),
>              'MemResourceAllocationSettingData'  : '%s/%s' % 
> (test_dom, 'mem')}
> 
> -    if CIM_REV < proc_instid_rev :
> -        rasd_devid['ProcResourceAllocationSettingData'] = "%s/%s" %
> (test_dom, "0")
> -
>      global dev_devid
>      dev_devid = {
>              'Processor'   : '%s/%s' % (test_dom, test_vcpus-1),
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/SettingsDefineCapabilities/01_forward.py
> --- a/suites/libvirt-
> cim/cimtest/SettingsDefineCapabilities/01_forward.py   Fri May 23 
> 10:39:01 2008 -0700
> +++ b/suites/libvirt-
> cim/cimtest/SettingsDefineCapabilities/01_forward.py   Tue Jun 10 
> 11:04:31 2008 -0700
> @@ -65,14 +65,11 @@
>  from XenKvmLib.common_util import cleanup_restore, 
create_diskpool_conf, \
>  create_netpool_conf
>  from XenKvmLib.common_util import print_field_error
> -from XenKvmLib.const import CIM_REV
> 
>  platform_sup = ['Xen', 'KVM', 'XenFV', 'LXC']
> 
>  memid = "%s/%s" % ("MemoryPool", 0)
>  procid = "%s/%s" % ("ProcessorPool", 0)
> -libvirtcim_sdc_rasd_rev = 571
> -
> 
>  def get_or_bail(virt, ip, id, pool_class):
>      """
> @@ -165,27 +162,6 @@
>                                rtype[cllist[loop]])
>              return FAIL 
> 
> -        # The following properties have been removed in the patchset 
571
> -        # but is present in the rpm libvirt-cim and hence retained it.
> -
> -        if CIM_REV < libvirtcim_sdc_rasd_rev:
> -            ppolicy = inst['PropertyPolicy']
> -            if ppolicy != 0 and ppolicy != 1:
> -                print_field_error("PropertyPolicy", 
inst['PropertyPolicy'], 
> -                                   ppolicy)
> -                return FAIL 
> -
> -            vrole  = inst['ValueRole']
> -            if vrole < 0 or vrole > 4:
> -                print_field_error("ValueRole", inst['ValueRole'], 
vrole)
> -                return FAIL 
> -
> -            insid  = inst['InstanceID']
> -            vrange = rangelist[insid]
> -            if vrange != inst['ValueRange']:
> -                print_field_error("ValueRange", inst['ValueRange'], 
vrange)
> -                return FAIL 
> -
>      return PASS
> 
>  def verify_sdc_with_ac(virt, server, dpool, npool, mpool, ppool):
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/VSSD/04_vssd_to_rasd.py
> --- a/suites/libvirt-cim/cimtest/VSSD/04_vssd_to_rasd.py   Fri May 
> 23 10:39:01 2008 -0700
> +++ b/suites/libvirt-cim/cimtest/VSSD/04_vssd_to_rasd.py   Tue Jun 
> 10 11:04:31 2008 -0700
> @@ -53,7 +53,6 @@
>  from XenKvmLib.classes import get_typed_class
>  from XenKvmLib.rasd import verify_procrasd_values, 
verify_netrasd_values, \
>  verify_diskrasd_values, verify_memrasd_values 
> -from XenKvmLib.const import CIM_REV
> 
>  sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
> 
> @@ -61,8 +60,6 @@
>  test_vcpus  = 1
>  test_mem    = 128
>  test_mac    = "00:11:22:33:44:aa"
> -proc_rev = 531
> -mem_rev = 529
> 
>  def setup_env(virt): 
>      vsxml_info = None
> @@ -97,8 +94,6 @@
>                   "ResourceType" : 3,
>                   "CreationClassName": get_typed_class(virt, 
> 'ProcResourceAllocationSettingData')
>                  }
> -    if CIM_REV < proc_rev:
> -        procrasd['InstanceID'] = '%s/%s' %(test_dom, "0")
> 
>      netrasd = {
>                  "InstanceID"  : '%s/%s' %(test_dom,test_mac), 
> @@ -122,8 +117,7 @@
>                 "VirtualQuantity" : (test_mem * 1024), 
>                 "CreationClassName": get_typed_class(virt, 
> 'MemResourceAllocationSettingData')
>                }
> -    if CIM_REV < mem_rev:
> -        memrasd['AllocationUnits'] = "MegaBytes"
> +
>      return procrasd, netrasd, diskrasd, memrasd
> 
>  def get_inst_from_list(classname, vssd_list, filter_name, exp_val):
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/VirtualSystemManagementService/02_destroysystem.py
> --- a/suites/libvirt-
> cim/cimtest/VirtualSystemManagementService/02_destroysystem.py   Fri
> May 23 10:39:01 2008 -0700
> +++ b/suites/libvirt-
> cim/cimtest/VirtualSystemManagementService/02_destroysystem.py   Tue
> Jun 10 11:04:31 2008 -0700
> @@ -28,14 +28,12 @@
>  from VirtLib.live import domain_list, active_domain_list
>  from XenKvmLib import vsms, vxml
>  from XenKvmLib.classes import get_typed_class
> -from XenKvmLib.const import CIM_REV
>  from CimTest.Globals import do_main
>  from CimTest.Globals import logger
>  from CimTest.ReturnCodes import PASS, FAIL
> 
>  sup_types = ['Xen', 'KVM', 'XenFV']
>  default_dom = 'test_domain'
> -rev = 528
> 
>  @do_main(sup_types)
>  def main():
> @@ -50,11 +48,7 @@
>      cs_ref = CIMInstanceName(classname, keybindings = {
>                                          'Name':default_dom,
>                                          'CreationClassName':classname})
> -    if CIM_REV < rev:
> -        dl_func = active_domain_list
> -    else:
> -        dl_func = domain_list
> -    list_before = dl_func(options.ip, options.virt)
> +    list_before = domain_list(options.ip, options.virt)
>      status = PASS
>      rc = -1
> 
> @@ -66,7 +60,7 @@
>          logger.error(details)
>          status = FAIL
> 
> -    list_after = dl_func(options.ip, options.virt)
> +    list_after = domain_list(options.ip, options.virt)
> 
>      status = PASS
>      if default_dom not in list_before:
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/VirtualSystemManagementService/05_destroysystem_neg.py
> --- a/suites/libvirt-
> cim/cimtest/VirtualSystemManagementService/05_destroysystem_neg.py 
> Fri May 23 10:39:01 2008 -0700
> +++ b/suites/libvirt-
> cim/cimtest/VirtualSystemManagementService/05_destroysystem_neg.py 
> Tue Jun 10 11:04:31 2008 -0700
> @@ -27,14 +27,12 @@
>  from VirtLib import utils
>  from XenKvmLib import vsms
>  from XenKvmLib.classes import get_typed_class
> -from XenKvmLib.const import CIM_REV
>  from XenKvmLib.test_doms import undefine_test_domain
>  from CimTest.Globals import logger
>  from CimTest.Globals import do_main
>  from CimTest.ReturnCodes import FAIL, PASS, SKIP
> 
>  sup_types = ['Xen', 'KVM', 'XenFV']
> -vsms_status_version = 534
> 
>  def destroysystem_fail(tc, options):
>      service = vsms.get_vsms_class(options.virt)(options.ip)
> @@ -45,25 +43,19 @@
>          cs_ref = CIMInstanceName(classname, 
>                                keybindings = 
{'CreationClassName':classname})
> 
> -        if CIM_REV < vsms_status_version:
> -            exp_rc = 2 #IM_RC_FAILED
> -        else:
> -            exp_value = { 'rc'    : pywbem.CIM_ERR_FAILED,
> -                          'desc'  : 'CIM_ERR_FAILED: Unable to 
> retrieve domain\
> +        exp_value = { 'rc'    : pywbem.CIM_ERR_FAILED,
> +                      'desc'  : 'CIM_ERR_FAILED: Unable to retrieve 
domain\
>   name.'
> -                        }
> +                    }
> 
>      elif tc == 'nonexistent':
>          cs_ref = CIMInstanceName(classname,keybindings = {
>                                  'Name':'##@@!!cimtest_domain',
>                                  'CreationClassName':classname})
> 
> -        if CIM_REV < vsms_status_version:
> -            exp_rc = 4 #IM_RC_SYS_NOT_FOUND
> -        else:
> -            exp_value = { 'rc'   : pywbem.CIM_ERR_FAILED,
> -                          'desc' : 'CIM_ERR_FAILED: Failed to find 
domain' 
> -                        }
> +        exp_value = { 'rc'   : pywbem.CIM_ERR_FAILED,
> +                      'desc' : 'CIM_ERR_FAILED: Failed to find domain' 
> +                    }
> 
>      else:
>          return SKIP
> @@ -71,25 +63,15 @@
>      status = FAIL
>      try:
>          ret = service.DestroySystem(AffectedSystem=cs_ref)
> -        if CIM_REV < vsms_status_version:
> -            if ret[0] == exp_rc:
> -                logger.info('destroy_fail>>%s: Got expected return code 
%s', 
> -                            tc, exp_rc)
> -                return PASS 
> -            else:
> -                logger.error('destroy_fail>>%s: Got rc: %s, but we 
> expect %s',
> -                            tc, ret[0], exp_rc)
> -                return FAIL 
> 
>      except Exception, details:
> -        if CIM_REV >= vsms_status_version:
> -            err_no   = details[0]
> -            err_desc = details[1]
> -            if err_no == exp_value['rc'] and err_desc == 
exp_value['desc']:
> -                logger.error("For Invalid Scenario '%s'", tc)
> -                logger.info('Got expected error no: %s', err_no)
> -                logger.info('Got expected error desc: %s',err_desc)
> -                return PASS
> +        err_no   = details[0]
> +        err_desc = details[1]
> +        if err_no == exp_value['rc'] and err_desc == exp_value['desc']:
> +            logger.error("For Invalid Scenario '%s'", tc)
> +            logger.info('Got expected error no: %s', err_no)
> +            logger.info('Got expected error desc: %s',err_desc)
> +            return PASS
> 
>          logger.error('destroy_fail>> %s: Error executing DestroySystem' 
% tc)
>          logger.error(details)
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/VirtualSystemSettingDataComponent/02_reverse.py
> --- a/suites/libvirt-
> cim/cimtest/VirtualSystemSettingDataComponent/02_reverse.py   Fri 
> May 23 10:39:01 2008 -0700
> +++ b/suites/libvirt-
> cim/cimtest/VirtualSystemSettingDataComponent/02_reverse.py   Tue 
> Jun 10 11:04:31 2008 -0700
> @@ -55,7 +55,6 @@
>  from XenKvmLib.rasd import InstId_err
>  from CimTest.Globals import logger, do_main
>  from CimTest.ReturnCodes import PASS, FAIL
> -from XenKvmLib.const import CIM_REV
> 
>  sup_types = ['Xen', 'XenFV', 'KVM']
> 
> @@ -63,7 +62,6 @@
>  test_vcpus  = 1
>  test_mem    = 128
>  test_mac    = "00:11:22:33:44:aa"
> -rev = 531
> 
>  def check_rasd_values(id, exp_id):
>      try:
> @@ -90,8 +88,6 @@
>                   "disk_rasd" : '%s/%s' %(test_dom, test_disk),
>                   "mem_rasd"  : '%s/%s' %(test_dom, "mem")
>                  }
> -    if CIM_REV < rev:
> -        rasd_list['proc_rasd'] = '%s/%s' %(test_dom, "0")
> 
>      try: 
>          if len(assoc_info) <= 0: 
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/cimtest/VirtualSystemSettingDataComponent/03_vssdc_fwd_errs.py
> --- a/suites/libvirt-
> cim/cimtest/VirtualSystemSettingDataComponent/03_vssdc_fwd_errs.py 
> Fri May 23 10:39:01 2008 -0700
> +++ b/suites/libvirt-
> cim/cimtest/VirtualSystemSettingDataComponent/03_vssdc_fwd_errs.py 
> Tue Jun 10 11:04:31 2008 -0700
> @@ -62,14 +62,12 @@
>  from CimTest.ReturnCodes import PASS, FAIL
>  from CimTest.Globals import do_main, logger
>  from CimTest.Globals import CIM_USER, CIM_PASS, CIM_NS
> -from XenKvmLib.const import CIM_REV
> 
>  sup_types = ['Xen', 'XenFV', 'KVM']
> 
>  test_dom     = "domu1"
>  test_mac     = "00:11:22:33:44:aa"
>  test_vcpus   = 1
> -rev = 417
> 
>  expr_values = {
>      "INVALID_InstID_Keyname"   : { 'rc'   : pywbem.CIM_ERR_FAILED, \
> @@ -77,10 +75,6 @@
>      "INVALID_InstID_Keyval"    : { 'rc'   : pywbem.CIM_ERR_NOT_FOUND, \
>                       'desc' : 'No such instance 
(INVALID_InstID_Keyval)'}
>  }
> -
> -if CIM_REV < rev:
> -    expr_values['INVALID_InstID_Keyval'] = {'rc' : 
pywbem.CIM_ERR_FAILED, \
> -                                            'desc' : 'Invalid 
InstanceID'}
> 
>  def try_invalid_assoc(classname, name_val, i, field, virt="Xen"):
>      ac_classname = get_typed_class(virt, 
"VirtualSystemSettingDataComponent")
> diff -r fdc71e98f2d1 -r 58f7c9cf5467 suites/libvirt-
> cim/lib/XenKvmLib/common_util.py
> --- a/suites/libvirt-cim/lib/XenKvmLib/common_util.py   Fri May 23 
> 10:39:01 2008 -0700
> +++ b/suites/libvirt-cim/lib/XenKvmLib/common_util.py   Tue Jun 10 
> 11:04:31 2008 -0700
> @@ -33,17 +33,11 @@
>  from XenKvmLib.classes import get_typed_class
>  from CimTest.Globals import logger, log_param, CIM_ERROR_ENUMERATE
>  from CimTest.ReturnCodes import PASS, FAIL, XFAIL_RC
> -from XenKvmLib.const import CIM_REV
>  from VirtLib.live import diskpool_list, virsh_version, net_list
>  from XenKvmLib.vxml import PoolXML, NetXML
> 
>  test_dpath = "foo"
> -diskpoolconf_rev = 558
> -
> -if CIM_REV < diskpoolconf_rev:
> -    disk_file = '/tmp/diskpool.conf'
> -else:
> -    disk_file = '/etc/libvirt/diskpool.conf'
> +disk_file = '/etc/libvirt/diskpool.conf'
> 
>  back_disk_file = disk_file + "." + "backup"
> 
> @@ -276,11 +270,10 @@
>      status = PASS
>      libvirt_version = virsh_version(server, virt)
>      # The conf file is not present on  the machine if 
> -    # libvirt_version >= 0.4.1 and CIM_REV > 558
> +    # libvirt_version >= 0.4.1
>      # Hence Skipping the logic to delete the new conf file
>      # and just returning PASS
> -    if libvirt_version >= '0.4.1' and \
> -       CIM_REV > diskpoolconf_rev:
> +    if libvirt_version >= '0.4.1':
>          return status
>      try:
>          if os.path.exists(back_disk_file):
> @@ -328,8 +321,7 @@
> 
>  def create_diskpool_conf(server, virt):
>      libvirt_version = virsh_version(server, virt)
> -    if libvirt_version >= '0.4.1' and \
> -       CIM_REV > diskpoolconf_rev:
> +    if libvirt_version >= '0.4.1':
>          status, dpoolname = create_diskpool(server, virt=virt)
>          diskid = "%s/%s" % ("DiskPool", dpoolname)
>      else:
> 
  +1 from me. 
  Would you please update ResourceAllocationFromPool.02 that remove
  revision branch? Also, do we need the global CIM_REV variable on the 
next? If
  not, would you please update related functions such as const.py and 
main.py?

  Thanks!
> _______________________________________________
> Libvirt-cim mailing list
> Libvirt-cim at redhat.com
> https://www.redhat.com/mailman/listinfo/libvirt-cim
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://listman.redhat.com/archives/libvirt-cim/attachments/20080616/66fdd010/attachment.htm>


More information about the Libvirt-cim mailing list