[Libvirt-cim] [PATCH 2 of 2] [TEST] Update tests to use get_exp_template_rasd_len()

Kaitlin Rupert kaitlin at linux.vnet.ibm.com
Mon Apr 20 18:35:59 UTC 2009


# HG changeset patch
# User Kaitlin Rupert <karupert at us.ibm.com>
# Date 1240252517 25200
# Node ID e2dcfef8f97127a0d191f0f148c62a0ce49a21a5
# Parent  100c1a61b0b48a5afdfed3758bc01e0ea8d29b08
[TEST] Update tests to use get_exp_template_rasd_len()

This way, the template RASD calculation only needs to be updated in one
place if it changes in the future.

Signed-off-by: Kaitlin Rupert <karupert at us.ibm.com>

diff -r 100c1a61b0b4 -r e2dcfef8f971 suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py
--- a/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py	Mon Apr 20 11:35:00 2009 -0700
+++ b/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py	Mon Apr 20 11:35:17 2009 -0700
@@ -29,13 +29,15 @@
 # Steps:
 #  1. Create a guest.
 #  2. Enumerate the HostSystem .
-#  3. Using the HostedResourcePool association, get the HostSystem instances on the system
-#  4. Using the ElementCapabilities association get the ProcessorPool, MemPool, DiskPool &
-#     NetPool instances on the system.
-#  5. Using the SettingsDefineCapabilities association on the AllocationCapabilities, get 
-#     the (Default, Minimum, Maximum and Increment) instances for ProcRASD.
-#  6. Similarly for the MemRASD, DiskRASD & NetRASD get the SettingDefineCap assocn and \
-#     get the instances for (Def, Min, Max and Inc).
+#  3. Using the HostedResourcePool association, get the HostSystem instances 
+#      on the system
+#  4. Using the ElementCapabilities association get the ProcessorPool, 
+#      MemPool, DiskPool & NetPool instances on the system.
+#  5. Using the SettingsDefineCapabilities association on the 
+#      AllocationCapabilities, get the (Default, Minimum, Maximum and
+#      Increment) instances for ProcRASD.
+#  6. Similarly for the MemRASD, DiskRASD & NetRASD get the SettingDefineCap 
+#      assocn and get the instances for (Def, Min, Max and Inc).
 #
 # Feb 13 2008
 
@@ -50,15 +52,11 @@
 from CimTest.ReturnCodes import PASS, FAIL
 from XenKvmLib.test_xml import testxml
 from XenKvmLib.test_doms import destroy_and_undefine_all
-from XenKvmLib.const import get_provider_version
-from XenKvmLib.pool import enum_volumes
+from XenKvmLib.rasd import get_exp_template_rasd_len
 
 sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
 test_dom = "domgst_test"
 test_vcpus = 1
-libvirt_rasd_template_changes = 707
-libvirt_rasd_new_changes = 805
-libvirt_rasd_dpool_changes = 839
 
 def setup_env(server, virt="Xen"):
     status = PASS
@@ -173,7 +171,8 @@
                                      InstanceID = inst['InstanceID'])
 
             if len(assoc_info) < 1:
-                logger.error("'%s' has returned %i objects", an, len(assoc_info))
+                logger.error("'%s' has returned %i objects", an, 
+                             len(assoc_info))
                 status = FAIL
                 return status, alloccap
 
@@ -181,10 +180,10 @@
                 if c != inst.classname:
                     continue
                 status, setdefcap = get_inst_from_list(an,
-                                                      c,
-                                                      assoc_info,
-                                                      filter,
-                                                      rt )
+                                                       c,
+                                                       assoc_info,
+                                                       filter,
+                                                       rt )
                 if status != FAIL:
                     alloccap.append(setdefcap) 
 
@@ -216,25 +215,7 @@
                                      ccn,
                                      InstanceID = ap['InstanceID'])
 
-            curr_cim_rev, changeset = get_provider_version(virt, server)
-            exp_len = 4
-            if 'DiskPool' in ap['InstanceID']:
-                # For Diskpool, we have info 1 for each of Min, Max, 
-                # default, Increment and 1 for each of PV and FV 
-                # hence 4 * 2 = 8 records
-                if virt == 'Xen':
-                    if curr_cim_rev >= libvirt_rasd_template_changes and \
-                       curr_cim_rev < libvirt_rasd_new_changes:
-                        exp_len = 8
-                    if curr_cim_rev >= libvirt_rasd_new_changes:
-                        exp_len = 16
-                if virt == 'KVM':
-                    if curr_cim_rev >= libvirt_rasd_new_changes and \
-                       curr_cim_rev < libvirt_rasd_dpool_changes:
-                        exp_len = 8
-                    if curr_cim_rev >= libvirt_rasd_dpool_changes:
-                        volumes = enum_volumes(virt, server)
-                        exp_len = volumes * 4
+            exp_len = get_exp_template_rasd_len(virt, server, ap['InstanceID'])
 
             if len(assoc_info) != exp_len:
                 logger.error("'%s' returned %i RASD objects instead of %i", 
diff -r 100c1a61b0b4 -r e2dcfef8f971 suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py
--- a/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py	Mon Apr 20 11:35:00 2009 -0700
+++ b/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py	Mon Apr 20 11:35:17 2009 -0700
@@ -63,13 +63,9 @@
 from XenKvmLib.const import do_main, default_pool_name, default_network_name
 from XenKvmLib.classes import get_typed_class
 from XenKvmLib.common_util import print_field_error
-from XenKvmLib.const import get_provider_version
-from XenKvmLib.pool import enum_volumes
+from XenKvmLib.rasd import get_exp_template_rasd_len 
 
 platform_sup = ['Xen', 'KVM', 'XenFV', 'LXC']
-libvirt_rasd_template_changes = 707
-libvirt_rasd_new_changes = 805
-libvirt_rasd_dpool_changes = 839
 
 memid = "MemoryPool/0"
 procid = "ProcessorPool/0"
@@ -173,25 +169,7 @@
             assoc_info = assoc.Associators(server, assoc_cname, cn, 
                                            InstanceID = instid)  
 
-            curr_cim_rev, changeset = get_provider_version(virt, server)
-            exp_len = 4
-            if 'DiskPool' in instid:
-                # For Diskpool, we have info 1 for each of Min, Max, 
-                # default, Increment and 1 for each of PV and FV 
-                # hence 4 * 2 = 8 records
-                if virt == 'Xen' or virt == 'XenFV':
-                    if curr_cim_rev >= libvirt_rasd_template_changes and \
-                       curr_cim_rev < libvirt_rasd_new_changes:
-                        exp_len = 8
-                    if curr_cim_rev >= libvirt_rasd_new_changes:  
-                        exp_len = 16
-                if virt == 'KVM':
-                    if curr_cim_rev >= libvirt_rasd_new_changes and \
-                       curr_cim_rev < libvirt_rasd_dpool_changes:
-                        exp_len = 8
-                    if curr_cim_rev >= libvirt_rasd_dpool_changes:
-                        volumes = enum_volumes(virt, server)
-                        exp_len = volumes * 4
+            exp_len = get_exp_template_rasd_len(virt, server, instid)
 
             if len(assoc_info) != exp_len:
                 logger.error("%s returned %i ResourcePool objects instead"




More information about the Libvirt-cim mailing list