[Libvirt-cim] [PATCH] [TEST] #3 Adding new tc to verify fs storage pool creation

Deepti B. Kalakeri deeptik at linux.vnet.ibm.com
Tue Jun 30 13:53:55 UTC 2009


# HG changeset patch
# User Deepti B. Kalakeri<deeptik at linux.vnet.ibm.com>
# Date 1246370002 25200
# Node ID 479f287a17fd08e4e22ac37459393f6f8327315a
# Parent  fe9471d9dd3372b673da5596a18cc49e553b13fa
[TEST] #3 Adding new tc to verify fs storage pool creation.

Updates in patch 3:
-------------------
1) Used the pre_check from XenKvmLib.common_util
2) imported PASS /FAIL from CimTest.ReturnCodes
3) Modified the verify_pool()
4) Modified the clean_up()
5) Addressed some minor typos

Update in patch 2:
------------------
1) rearranged import stmst
2) add check to see if cimserver is started
3) Added options to clean the old log
4) Added options to get the debug msg on the stdout
5) Added lxc support
6) Moved the looping for setting the poolsettings in a function
7) Rectified the virt_type to virt and also to use KVM for checking while setting vuri



Patch 1:
--------
This tc will not be run in the batch mode of cimtest and hence needs to
be run individually using the command below.

python create_verify_storagepool.py -t 2  -d /dev/sda4 -m /tmp/mnt -n diskfs
         -v Xen -u <username> -p <passwd>

Tested with Xen on RHEL with current sources for fs type pool.
Will Update the patch to include logical pool verification as well.
Signed-off-by: Deepti B. Kalakeri <deeptik at linux.vnet.ibm.com>

diff -r fe9471d9dd33 -r 479f287a17fd suites/libvirt-cim/misc_cimtests/create_verify_storagepool.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/suites/libvirt-cim/misc_cimtests/create_verify_storagepool.py	Tue Jun 30 06:53:22 2009 -0700
@@ -0,0 +1,356 @@
+#!/usr/bin/python
+#
+# Copyright 2009 IBM Corp.
+#
+# Authors:
+#    Deepti B. Kalakeri<dkalaker at in.ibm.com> 
+#    
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
+#
+# This test case should test the  CreateChildResourcePool service 
+# supplied by the RPCS provider. 
+# This tc verifies the FileSystem Type storage pool.
+#
+# The test case is not run in the batch run and we need to run it using 
+# the following command:
+# python create_verify_storagepool.py -t 2  -d /dev/sda4 -m /tmp/mnt -n diskfs 
+#         -v Xen -u <username> -p <passwd>
+# 
+# Where t can be :
+#       2 - FileSystem
+#       4 - Logical etc
+# 
+# 
+#                                                         Date : 27.06.2009
+
+import os
+import sys
+from optparse import OptionParser
+from commands  import getstatusoutput
+from distutils.text_file import TextFile
+from pywbem import WBEMConnection, cim_types, CIMInstanceName
+sys.path.append('../../../lib')
+from CimTest import Globals
+from CimTest.Globals import logger, log_param
+from CimTest.ReturnCodes import PASS, FAIL
+sys.path.append('../lib')
+from XenKvmLib.classes import inst_to_mof, get_typed_class
+from XenKvmLib.pool import get_pool_rasds
+from XenKvmLib.common_util import pre_check
+from XenKvmLib.enumclass import EnumInstances
+
+TEST_LOG="cimtest.log"
+
+supp_types = [ 'Xen', 'KVM' , 'LXC' ]
+pool_types = { 'DISK_POOL_FS' : 2 }
+
+def verify_cmd_options(options):
+    try: 
+        if options.part_dev == None:
+            raise Exception("Free Partition to be mounted not specified")
+
+        if options.mnt_pt == None:
+            raise Exception("Mount points to be used not specified")
+
+        if options.pool_name == None:
+            raise Exception("Must specify the Pool Name to be created")
+
+        if options.virt == None or options.virt not in supp_types:
+            raise Exception("Must specify virtualization type")
+
+        if options.pool_type == None:
+            raise Exception("Must specify pool type to be tested")
+
+    except Exception, details:
+        print "FATAL: ", details
+        print parser.print_help()
+        return FAIL
+
+    return PASS
+
+def env_setup(sysname, virt, clean, debug):
+    env_ready = pre_check(sysname, virt)
+    if env_ready != None: 
+        print "\n%s.  Please check your environment.\n" % env_ready
+        return FAIL
+
+    if clean:
+        cmd = "rm -f %s" % (os.path.join(os.getcwd(), TEST_LOG))
+        status, output = getstatusoutput(cmd)
+
+    if debug:
+        dbg = "-d"
+    else:
+        dbg = ""
+
+    return PASS
+
+def get_pooltype(pooltype, virt):
+    if pooltype == "fs":
+       pool_type = pool_types['DISK_POOL_FS']
+    else:
+       logger.error("Invalid pool type ....")
+       return None, None
+    return PASS, pool_type
+
+def verify_inputs(part_dev, mount_pt):
+    del_dir = False   
+    cmd = "mount"
+    status, mount_info = getstatusoutput(cmd)
+    if status != PASS:
+        logger.error("Failed to get mount info.. ")
+        return FAIL, del_dir
+     
+    for line in mount_info.split('\n'):
+        try:
+            # Check if the specified partition is mounted before using it 
+            part_name = line.split()[0]
+            if part_dev == part_name:
+                logger.error("[%s] already mounted", part_dev)
+                raise Exception("Please specify free partition other than " \
+                                "[%s]" % part_dev)
+
+            # Check if mount point is already used for mounting
+            mount_name = line.split()[2]
+	    if mount_pt == mount_name:
+                logger.error("[%s] already mounted", mount_pt)
+                raise Exception("Please specify dir other than [%s]" %mount_pt)
+
+        except Exception, details:
+            logger.error("%s", details)
+            return FAIL, del_dir
+
+    # Check if the mount point specified already exist, if not then create it..
+    if not os.path.exists(mount_pt):
+        os.mkdir(mount_pt)
+
+        # set del_dir to True so that we remove it before exiting from the tc.
+        del_dir = True 
+    else:
+        # Check if the mount point specified is a dir
+        if not os.path.isdir(mount_pt):
+            logger.error("The mount point [%s] should be a dir", mount_pt)
+            return FAIL, del_dir
+
+        files = os.listdir(mount_pt)
+        if len(files) != 0:
+            logger.info("The mount point [%s] given is not empty", mount_pt)
+
+    return PASS, del_dir
+
+def get_uri(virt):
+    if virt == 'Xen':
+        vuri = 'xen:///'
+    elif virt == 'KVM':
+        vuri = 'qemu:///system'
+    elif virt == 'LXC':
+        vuri = 'lxc:///system'
+    return vuri
+
+def get_pool_settings(dp_rasds, pooltype, part_dev, mount_pt, pool_name):
+    pool_settings = None
+    for dpool_rasd in dp_rasds:
+        if dpool_rasd['Type'] == pooltype and \
+            dpool_rasd['InstanceID'] == 'Default':
+            dpool_rasd['DevicePaths'] = [part_dev]
+            dpool_rasd['Path'] = mount_pt
+            dp_pid = "%s/%s" % ("DiskPool", pool_name)
+            dpool_rasd['PoolID'] = dpool_rasd['InstanceID'] = dp_pid
+            break
+
+    if not pool_name in dpool_rasd['InstanceID']:
+        return pool_settings
+
+    pool_settings = inst_to_mof(dpool_rasd)
+    return pool_settings
+
+
+def verify_pool(sysname, virt, pool_name, dp_cn):
+    try:
+        pool = EnumInstances(sysname, dp_cn)
+        for dpool in pool:
+            ret_pool = dpool.InstanceID
+            if pool_name == ret_pool: 
+               logger.info("Found the pool '%s'", pool_name)
+               return PASS
+    except Exception, details:
+        logger.error("Exception details: %s", details)
+
+    return FAIL
+
+def cleanup(virt, rpcs_conn, rpcs_cn, dp_cn, dp_id, 
+            pool_name, sysname, mount_pt, del_dir, res):
+
+    if res == PASS:
+        pool_settings = CIMInstanceName(dp_cn, namespace=Globals.CIM_NS, 
+                                        keybindings = {'InstanceID': dp_id})
+        rpcs_conn.InvokeMethod("DeleteResourcePool",
+                               rpcs_cn,
+                               Pool = pool_settings)
+        pool = EnumInstances(sysname, dp_cn)
+        for dpool in pool:
+            ret_pool = dpool.InstanceID
+            if ret_pool == dp_id:
+                logger.error("Failed to delete diskpool '%s'", pool_name)
+                return FAIL
+
+    if del_dir == True:
+        cmd ="rm -rf %s"  % mount_pt
+        ret, out = getstatusoutput(cmd)
+        if ret != PASS:
+            logger.error("WARNING: '%s' was not removed", mount_pt)
+            logger.error("WARNING: Please remove %s manually", mount_pt)
+
+    return PASS
+
+
+def main():
+    usage = "usage: %prog [options] \nex: %prog -i localhost"
+    parser = OptionParser(usage)
+
+    parser.add_option("-i", "--host-url", dest="h_url", default="localhost:5988",
+                      help="URL of CIMOM to connect to (host:port)")
+    parser.add_option("-N", "--ns", dest="ns", default="root/virt",
+                      help="Namespace (default is root/virt)")
+    parser.add_option("-u", "--user", dest="username", default=None,
+                      help="Auth username for CIMOM on source system")
+    parser.add_option("-p", "--pass", dest="password", default=None,
+                      help="Auth password for CIMOM on source system")
+    parser.add_option("-v", "--virt-type", dest="virt", default=None,
+                      help="Virtualization type [ Xen | KVM ]")
+    parser.add_option("-t", "--pool-type", dest="pool_type", default=None,
+                      help="Pool type:[ fs | logical ]")
+    parser.add_option("-d", "--part-dev", dest="part_dev", default=None,
+                      help="specify the free partition to be used")
+    parser.add_option("-m", "--mnt_pt", dest="mnt_pt", default=None, 
+                      help="Mount point to be used")
+    parser.add_option("-n", "--pool-name", dest="pool_name", default=None, 
+                      help="Pool to be created")
+    parser.add_option("-c", "--clean-log",  
+                      action="store_true", dest="clean",
+                      help="Will remove existing log files before test run")
+    parser.add_option("-l", "--debug-output", action="store_true", dest="debug",
+                      help="Duplicate the output to stderr")
+
+    (options, args) = parser.parse_args()
+
+    # Verify command line options
+    status = verify_cmd_options(options)
+    if status != PASS:
+       return status
+    
+    part_dev = options.part_dev
+    mount_pt = options.mnt_pt
+    pool_name = options.pool_name
+    virt = options.virt
+
+    if ":" in options.h_url:
+        (sysname, port) = options.h_url.split(":")
+    else:
+        sysname = options.h_url
+
+    # Verify if the CIMOM is running, clean cimtest.log if requested
+    # Set Debug option if requested
+    status = env_setup(sysname, virt, options.clean, options.debug)
+    if status != PASS:
+       return status
+
+    log_param(file_name=TEST_LOG)
+
+    print "Please check cimtest.log in the curr dir for debug log msgs..."
+
+    status, pooltype = get_pooltype(options.pool_type, virt)
+    if status != PASS:
+       return FAIL
+   
+    pooltype = cim_types.Uint16(pooltype)
+
+    status, del_dir = verify_inputs(part_dev, mount_pt)
+    if status != PASS:
+        if del_dir == True:
+            cmd ="rm -rf %s" % mount_pt
+            status, out = getstatusoutput(cmd)
+        logger.error("Input verification failed")
+        return status
+
+   
+    os.environ['CIM_NS'] = Globals.CIM_NS = options.ns
+    os.environ['CIM_USER'] = Globals.CIM_USER = options.username
+    os.environ['CIM_PASS'] = Globals.CIM_PASS = options.password
+    cn = "DiskPool"
+    dp_cn = get_typed_class(virt, cn)
+    dp_id = "%s/%s" % (cn, pool_name) 
+    rpcs_cn = get_typed_class(virt, "ResourcePoolConfigurationService")
+
+    status = verify_pool(sysname, virt, dp_id, dp_cn)
+    if status == PASS:
+        logger.error("Pool --> '%s' already exist", pool_name)
+        logger.error("Specify some other pool name")
+        return status
+
+    res = [FAIL]
+    try:
+        src_conn = WBEMConnection('http://%s' % sysname, (options.username, 
+                                   options.password), options.ns)
+   
+        # Get DiskPoolRASD's from SDC association with AC of DiskPool/0
+        status, dp_rasds = get_pool_rasds(sysname, virt, cn)
+        if status != PASS:
+           raise Exception("Failed to get DiskPool Rasd's")
+
+        # Get the DiskPoolRASD mof with appropriate values of diskpool 
+        # to be created....
+        pool_settings = get_pool_settings(dp_rasds, pooltype, part_dev, 
+                                          mount_pt, pool_name)
+        if pool_settings == None:
+            raise Exception("Did not get the required pool settings ...")
+
+        # Create DiskPool..
+        res = src_conn.InvokeMethod("CreateChildResourcePool",
+                                    rpcs_cn,
+                                    Settings=[pool_settings],
+                                    ElementName=pool_name)
+
+    except Exception, details:
+        logger.error("In main(), exception '%s'", details)
+
+    # Verify if the desired pool was successfully created ..
+    if res[0] == PASS:
+        status = verify_pool(sysname, virt, dp_id, dp_cn)
+        if status != PASS:
+            logger.error("Failed to verify pool: %s " % pool_name)
+
+    # Clean up the pool and the mount dir that was created ...
+    status = cleanup(virt, src_conn, rpcs_cn, dp_cn, dp_id,
+                      pool_name, sysname, mount_pt, del_dir, res[0])
+
+    if res[0] == PASS and status == PASS:
+        logger.info("Pool %s was successfully verified for pool type %s", 
+                    pool_name , options.pool_type)
+
+        # Place holder to give a hint to the user the tc passed 
+        # otherwise the user will have to look into the cimtest.log in the 
+        # current dir.
+        print "Pool '", pool_name,"' was successfully verified for pool type "\
+              "'", options.pool_type , "'"
+        return PASS
+    else:
+        logger.error("Test Failed to verify '%s' pool creation ....", 
+                     options.pool_type)
+        return FAIL
+if __name__=="__main__":
+    sys.exit(main())
+




More information about the Libvirt-cim mailing list