[Libvirt-cim] [PATCH] [TEST] 3# Update RAFP.01 for LXC support

Kaitlin Rupert kaitlin at linux.vnet.ibm.com
Thu Jun 19 15:33:41 UTC 2008


> +def setup_env(server, virt):
> +    destroy_and_undefine_all(server)
> +    vsxml = None
> +    if virt == "Xen":
> +        test_disk = "xvda"
> +    if virt == "XenFV" or virt == "KVM":
> +        test_disk = "hda"
> +
> +    virtxml = get_class(virt)
> +    if virt == 'LXC':
> +        vsxml = virtxml(test_dom)
> +    else:
> +        vsxml = virtxml(test_dom, mem=test_mem, vcpus = test_vcpus,
> +                        mac = test_mac, disk = test_disk)
> +    try:
> +        ret = vsxml.define(server)
> +        if not ret:
> +            logger.error("Failed to Define the domain: %s", test_dom)
> +            return FAIL, vsxml, test_disk

No need to return test_disk here.

> +
> +    except Exception, details:
> +        logger.error("Exception : %s", details)
> +        return FAIL, vsxml, test_disk

Same here.

> +
> +    return PASS, vsxml
> +
> +def get_instance(server, pool, list, virt='Xen'):
> +    try:
> +        inst = enumclass.getInstance(server,
> +                                     pool,
> +                                     list,
> +                                     virt)
> +    except Exception:
> +        logger.error(Globals.CIM_ERROR_GETINSTANCE  % pool)
> +        return FAIL
> +  
> +    return inst

Mis-matching return types here.  I'd return both a status and an instance.

> +
> +def verify_rasd(server, assoc_cn, cn, virt, list, rasd_id):
> +    try:
> +        data = assoc.AssociatorNames(server,
> +                                     assoc_cn,
> +                                     cn,
> +                                     virt,
> +                                     InstanceID=list)
> +    except Exception:
> +        logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % cn)
> +        status = FAIL
> +
> +    if len(data) != 1:

You can't guarantee one instance back because the system may have more 
than one guest defined (Xen systems always have a Domain-0 defined).

Instead, you'll want to make sure you get an instance or instances that 
correspond to the domain you're trying to verify.

> +        logger.error("No associated rasd for %s", cn)
> +        return FAIL
> +    
> +    if data[0]['InstanceID'] not in rasd:
> +        logger.error("InstanceID Mismatch")
> +        logger.error("Returned %s error" % data[0]['InstanceID'])
> +        return FAIL
> +

Should return PASS if all goes well here.

> +               
>  @do_main(sup_types)
>  def main():
>      options = main.options
>      status = PASS
> 
> -    try:
> -        key_list = { 'InstanceID' : "MemoryPool/0" }
> -        mempool = enumclass.getInstance(options.ip,
> -                                        "MemoryPool",
> -                                        key_list,
> -                                        options.virt)
> -    except Exception:
> -        logger.error(Globals.CIM_ERROR_GETINSTANCE  % "MemoryPool")
> -        return FAIL
> +    status, vsxml = setup_env(options.ip, options.virt)
> +    if status != PASS:
> +        return status
> +    
> +    status, diskid = create_diskpool_conf(options.ip, options.virt)
> +    if status != PASS:
> +        return status

> +    status, test_network = create_netpool_conf(options.ip, options.virt)
> +    if status != PASS:
> +        return status

If create_diskpool_conf() or create_netpool_conf() fail, you'll need to 
make sure you undefine the guest and call cleanup_restore() .

> +    global rasd

Instead of using a global, pass rasd as an argument to the 
verify_rasd().  I'd like to move away from using globals like this.


> +    if options.virt == 'LXC':
> +        pool = { "MemoryPool" : {'InstanceID' : "MemoryPool/0"} }
> +        rasd = [ "%s/mem" % test_dom ]
> +    else:
> +        pool = { "MemoryPool"    : {'InstanceID' : "MemoryPool/0"},
> +                 "ProcessorPool" : {'InstanceID' : "ProcessorPool/0"},
> +                 "DiskPool"      : {'InstanceID' : diskid},
> +                 "NetworkPool"   : {'InstanceID' : "NetworkPool/%s" % test_network }}
> +        rasd = [ "%s/mem" % test_dom, 
> +                 "%s/proc" % test_dom, 
> +                 "%s/%s" %(test_dom, diskid), 
> +                 "%s/%s" % (test_dom, test_network) ]
> +
> +    for k, v in pool.iteritems():
> +        inst = get_instance(options.ip, k, v, options.virt) 

You'll want to verify this passes or fails.  If it fails, don't forget 
to clean up your environment before returning.

> +
> +        verify_rasd(options.ip, "ResourceAllocationFromPool", 
> +                    k, options.virt, inst.InstanceID,
> +                    v['InstanceID'])

You'll need to capture the return of verify_rasd().  As the code is now, 
you're returning the status of the create_netpool_conf() call.

> +
> +    cleanup_restore(options.ip, options.virt)
> +    vsxml.undefine(options.ip)
>      return status 
> -        
>          
>  if __name__ == "__main__":
>      sys.exit(main())

-- 
Kaitlin Rupert
IBM Linux Technology Center
kaitlin at linux.vnet.ibm.com




More information about the Libvirt-cim mailing list