[Libvirt-cim] [PATCH] [TEST] Fixing 02_reverse.py of RAPF

Deepti B. Kalakeri deeptik at linux.vnet.ibm.com
Thu May 29 13:48:07 UTC 2008


# HG changeset patch
# User Deepti B. Kalakeri <deeptik at linux.vnet.ibm.com>
# Date 1212068877 25200
# Node ID 9fae4065c84575412d10f7c1ea07f153a934db4c
# Parent  3ac66cf562f082546883c1de0d748471b557cd39
[TEST] Fixing 02_reverse.py of RAPF.


Added the following extra steps:
1) Defining a domain.
2) creating diskpool, netpool.

Without the steps 1 and 2 the tc used to simply passes.
The tc will fail on KVM with old libvirt-cim rpm as expected and will pass with the latest source with the fix that Dan submitted yest.

Signed-off-by: Deepti B. Kalakeri <deeptik at linux.vnet.ibm.com>

diff -r 3ac66cf562f0 -r 9fae4065c845 suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py
--- a/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py	Fri May 30 00:24:45 2008 +0800
+++ b/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py	Thu May 29 06:47:57 2008 -0700
@@ -6,6 +6,7 @@
 #    Guolian Yun <yunguol at cn.ibm.com>
 #    Kaitlin Rupert <karupert at us.ibm.com>
 #    Zhengang Li <lizg at cn.ibm.com>
+#    Deepti B. Kalakeri <deeptik at linux.vnet.ibm.com>
 #
 # This library is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public
@@ -25,69 +26,149 @@ import sys
 import sys
 from VirtLib import utils
 from XenKvmLib import assoc
-from XenKvmLib import devices
+from XenKvmLib.test_doms import destroy_and_undefine_all
+from XenKvmLib.vxml import get_class
 from XenKvmLib.classes import get_typed_class
 from CimTest import Globals
 from CimTest.Globals import logger, do_main
-from CimTest.ReturnCodes import PASS, FAIL, XFAIL
+from CimTest.ReturnCodes import PASS, FAIL
+from XenKvmLib import enumclass
+from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf, \
+create_netpool_conf
+
 
 sup_types = ['Xen', 'XenFV', 'KVM']
+test_dom    = "RAPF_dom"
+test_vcpus  = 1
+test_mem    = 128
+test_mac    = "00:11:22:33:44:aa"
+
+def setup_env(server, virt):
+    destroy_and_undefine_all(server)
+    vsxml = None
+    if virt == "Xen":
+        test_disk = "xvda"
+    else:
+        test_disk = "hda"
+
+    virtxml = get_class(virt)
+    vsxml = virtxml(test_dom, mem=test_mem, vcpus = test_vcpus,
+                    mac = test_mac, disk = test_disk)
+    try:
+        ret = vsxml.define(server)
+        if not ret:
+            logger.error("Failed to Define the domain: %s", test_dom)
+            return FAIL, vsxml 
+
+    except Exception, details:
+        logger.error("Exception : %s", details)
+        return FAIL, vsxml
+
+    return PASS, vsxml
+
+def get_rasd_or_pool_instid(server, virt, cn):
+    key_list = ["InstanceID"]
+    inst = []
+    try:
+        inst = enumclass.enumerate(server, cn, key_list, virt)
+    except Exception:
+        logger.error(Globals.CIM_ERROR_ENUMERATE, cn)
+        return inst, FAIL
+    return inst, PASS
+
+def get_instance(server, virt, vsxml, cn, pool_list, app_val=0):
+    instances, status = get_rasd_or_pool_instid(server, virt, cn)
+    if status != PASS:
+        vsxml.undefine(server)    
+        return pool_list, status
+
+    if app_val == 1:
+        for inst in instances:
+            pool_list.append(inst.InstanceID)
+    return instances, pool_list, status
+
+
+def verify_pool_from_RAPF(server, virt, instances, pool_instid_list, cn):
+    pool = []
+    for inst in instances:
+        try:
+            pool = assoc.AssociatorNames(server, "ResourceAllocationFromPool",
+                                         cn, virt, InstanceID = inst.InstanceID)
+        except Exception:
+            logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES, inst.InstanceID)
+            status = FAIL
+
+        if len(pool) < 1:
+            logger.error("No associated pool for %s", inst.InstanceID)
+            return FAIL
+
+        if not pool[0]['InstanceID'] in pool_instid_list:
+            logger.error("InstanceID Mismatch")
+            return FAIL
+
+    return PASS
+
+def get_inst_verify_pool_from_RAPF(server, virt, vsxml, pool_cn, cn):
+    pool_list = []
+    pool, pool_list, status = get_instance(server, virt, vsxml, 
+                                           pool_cn, pool_list, app_val=1)
+    if status != PASS:
+        return status
+
+    devinst, pool_list, status = get_instance(server, virt, vsxml, cn,
+                                              pool_list, app_val=0)
+    if status != PASS:
+        return status
+
+    status = verify_pool_from_RAPF(server, virt, devinst, pool_list, cn)
+
+    if status != PASS:
+        vsxml.undefine(server)    
+
+    return status
+    
 
 @do_main(sup_types)
 def main():
     options = main.options
     status = PASS
+    server = options.ip
+    virt = options.virt
+    
+    status,vsxml = setup_env(server, virt)
+    if status != PASS:
+        return status
 
-    key_list = ["DeviceID", "CreationClassName", "SystemName",
-                "SystemCreationClassName"]
-    try:
-        mem = devices.enumerate(options.ip, 'Memory', key_list, options.virt)
-    except Exception:
-        logger.error(Globals.CIM_ERROR_ENUMERATE % 'Memory')
-        return FAIL
+    status, diskid = create_diskpool_conf(server, virt)
+    if status != PASS:
+        return status
 
-    try:
-        proc = devices.enumerate(options.ip, 'Processor', key_list, options.virt)
-    except Exception:
-        logger.error(Globals.CIM_ERROR_ENUMERATE % 'Processor')
-        return FAIL
-        
-    for i in range(len(mem)):
-        try:
-            mempool = assoc.AssociatorNames(options.ip, "ResourceAllocationFromPool",
-                                            "MemResourceAllocationSettingData", 
-                                            options.virt,
-                                            InstanceID = mem[i].DeviceID)
-        except Exception:
-            logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % mem[i].DeviceID)
-            status = FAIL
+    status, test_network = create_netpool_conf(server, virt)
+    if status != PASS:
+        return status
 
-        if len(mempool) < 1:
-            logger.error("No associated pool for %s" % mem[i].DeviceID)
-            return FAIL
+    status = get_inst_verify_pool_from_RAPF(server, virt, vsxml, 'MemoryPool', 
+                                           'MemResourceAllocationSettingData')
+    if status != PASS:
+        return status
 
-        if mempool[0].keybindings['InstanceID'] != "MemoryPool/0":
-            logger.error("MemResourceAllocationSettingData association error")
-            return FAIL
-            
-    for j in range(len(proc)):
-        try:
-            procpool = assoc.AssociatorNames(options.ip, "ResourceAllocationFromPool",
-                                             "ProcResourceAllocationSettingData",
-                                             options.virt,
-                                             InstanceID = proc[j].DeviceID)
-        except Exception:
-            logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % proc[j].DeviceID)
-            return FAIL
-            
-        if len(procpool) < 1:
-            logger.error("No associated pool for %s" % proc[j].DeviceID)
-            return FAIL
+    status = get_inst_verify_pool_from_RAPF(server, virt, vsxml, 'ProcessorPool', 
+                                           'ProcResourceAllocationSettingData')
+    if status != PASS:
+        return status
 
-        if procpool[0].keybindings['InstanceID'] != "ProcessorPool/0":
-            logger.error("ProcResourceAllocationSettingData association failed")
-            status = FAIL
+    status = get_inst_verify_pool_from_RAPF(server, virt, vsxml, 'DiskPool', 
+                                           'DiskResourceAllocationSettingData')
+    if status != PASS:
+        return status
 
+    status = get_inst_verify_pool_from_RAPF(server, virt, vsxml, 'NetworkPool', 
+                                           'NetResourceAllocationSettingData')
+    if status != PASS:
+        return status
+
+    cleanup_restore(server, virt)
+    vsxml.undefine(server)    
     return status
 
 if __name__ == "__main__":




More information about the Libvirt-cim mailing list