From kaitlin at linux.vnet.ibm.com Fri May 1 16:33:23 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 01 May 2009 09:33:23 -0700 Subject: [Libvirt-cim] [PATCH] Change pools to pool in parse_diskpool_line() Message-ID: <43b5e7214d1b21256893.1241195603@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1241194039 25200 # Node ID 43b5e7214d1b212568936dd46fa669941e7e00ff # Parent 7a0de3babfcc4bd847081ed973b90f57dc1b2651 Change pools to pool in parse_diskpool_line() Signed-off-by: Kaitlin Rupert diff -r 7a0de3babfcc -r 43b5e7214d1b src/Virt_DevicePool.c --- a/src/Virt_DevicePool.c Thu Apr 30 16:09:30 2009 -0700 +++ b/src/Virt_DevicePool.c Fri May 01 09:07:19 2009 -0700 @@ -216,7 +216,7 @@ free(pool->tag); free(pool->path); } - pools->primordial = false; + pool->primordial = false; return (ret == 2); } From kaitlin at linux.vnet.ibm.com Fri May 1 20:02:36 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 01 May 2009 13:02:36 -0700 Subject: [Libvirt-cim] [PATCH 0 of 2] Add Network parent pool and NetPoolRASD templates Message-ID: This is similar to the support that was added for disk pools. From kaitlin at linux.vnet.ibm.com Fri May 1 20:02:37 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 01 May 2009 13:02:37 -0700 Subject: [Libvirt-cim] [PATCH 1 of 2] Create parent network pool In-Reply-To: References: Message-ID: # HG changeset patch # User Kaitlin Rupert # Date 1241124395 25200 # Node ID d392e94325eca1a937a1a6a7ab11809174987368 # Parent 3b8498fe6e2b575456486434056042047187ccb9 Create parent network pool. Signed-off-by: Kaitlin Rupert diff -r 3b8498fe6e2b -r d392e94325ec src/Virt_DevicePool.c --- a/src/Virt_DevicePool.c Fri May 01 13:02:07 2009 -0700 +++ b/src/Virt_DevicePool.c Thu Apr 30 13:46:35 2009 -0700 @@ -651,7 +651,8 @@ uint16_t type, const char *id, const char *units, - const char *caption) + const char *caption, + bool primordial) { CMSetProperty(inst, "InstanceID", (CMPIValue *)id, CMPI_chars); @@ -669,6 +670,9 @@ if (caption != NULL) CMSetProperty(inst, "Caption", (CMPIValue *)caption, CMPI_chars); + + CMSetProperty(inst, "Primordial", + (CMPIValue *)&primordial, CMPI_boolean); } static CMPIStatus mempool_instance(virConnectPtr conn, @@ -696,7 +700,7 @@ mempool_set_total(inst, conn); mempool_set_reserved(inst, conn); - set_params(inst, CIM_RES_TYPE_MEM, id, "KiloBytes", NULL); + set_params(inst, CIM_RES_TYPE_MEM, id, "KiloBytes", NULL, true); inst_list_add(list, inst); @@ -727,13 +731,51 @@ procpool_set_total(inst, conn); - set_params(inst, CIM_RES_TYPE_PROC, id, "Processors", NULL); + set_params(inst, CIM_RES_TYPE_PROC, id, "Processors", NULL, true); inst_list_add(list, inst); return s; } +static CMPIStatus _netpool_for_parent(struct inst_list *list, + const char *ns, + const char *refcn, + const CMPIBroker *broker) +{ + CMPIStatus s = {CMPI_RC_OK, NULL}; + char *id = NULL; + CMPIInstance *inst; + + inst = get_typed_instance(broker, + refcn, + "NetworkPool", + ns); + if (inst == NULL) { + CU_DEBUG("Unable to get instance: %s:%s_NetworkPool", + ns, refcn); + cu_statusf(broker, &s, + CMPI_RC_ERR_FAILED, + "Error getting pool instance"); + goto out; + } + + if (asprintf(&id, "NetworkPool/0") == -1) { + cu_statusf(broker, &s, + CMPI_RC_ERR_FAILED, + ""); + goto out; + } + + set_params(inst, CIM_RES_TYPE_NET, id, NULL, NULL, true); + free(id); + + inst_list_add(list, inst); + out: + + return s; +} + static CMPIStatus _netpool_for_network(struct inst_list *list, const char *ns, virConnectPtr conn, @@ -748,6 +790,9 @@ CMPIInstance *inst; virNetworkPtr network = NULL; + if (STREQC(netname, "0")) + return _netpool_for_parent(list, ns, refcn, broker); + CU_DEBUG("Looking up network `%s'", netname); network = virNetworkLookupByName(conn, netname); if (network == NULL) { @@ -787,7 +832,7 @@ goto out; } - set_params(inst, CIM_RES_TYPE_NET, id, NULL, cap); + set_params(inst, CIM_RES_TYPE_NET, id, NULL, cap, false); free(id); free(cap); free(bridge); @@ -839,6 +884,17 @@ nets = virConnectListNetworks(conn, netnames, nets); + nets++; + netnames = realloc(netnames, (nets) * (sizeof(*netnames))); + if (netnames == NULL) { + cu_statusf(broker, &s, + CMPI_RC_ERR_FAILED, + "Failed to allocate memory for %i net names", nets); + goto out; + } + + netnames[nets - 1] = "0"; + for (i = 0; i < nets; i++) { _netpool_for_network(list, ns, @@ -868,10 +924,12 @@ if (asprintf(&poolid, "DiskPool/%s", pool->tag) == -1) return NULL; - set_params(inst, CIM_RES_TYPE_DISK, poolid, "Megabytes", pool->tag); - - CMSetProperty(inst, "Primordial", - (CMPIValue *)&pool->primordial, CMPI_boolean); + set_params(inst, + CIM_RES_TYPE_DISK, + poolid, + "Megabytes", + pool->tag, + pool->primordial); if (!diskpool_set_capacity(conn, inst, pool)) CU_DEBUG("Failed to set capacity for disk pool: %s", @@ -951,7 +1009,7 @@ return s; } - set_params(inst, CIM_RES_TYPE_GRAPHICS, id, NULL, NULL); + set_params(inst, CIM_RES_TYPE_GRAPHICS, id, NULL, NULL, true); inst_list_add(list, inst); @@ -987,7 +1045,7 @@ return s; } - set_params(inst, CIM_RES_TYPE_INPUT, id, NULL, NULL); + set_params(inst, CIM_RES_TYPE_INPUT, id, NULL, NULL, true); inst_list_add(list, inst); From kaitlin at linux.vnet.ibm.com Fri May 1 20:02:38 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 01 May 2009 13:02:38 -0700 Subject: [Libvirt-cim] [PATCH 2 of 2] Expose template NetPoolRASDs In-Reply-To: References: Message-ID: # HG changeset patch # User Kaitlin Rupert # Date 1241208154 25200 # Node ID aa3094d471dd82b0d682f76342d33d48a29e29cb # Parent d392e94325eca1a937a1a6a7ab11809174987368 Expose template NetPoolRASDs. Signed-off-by: Kaitlin Rupert diff -r d392e94325ec -r aa3094d471dd src/Virt_RASD.c --- a/src/Virt_RASD.c Thu Apr 30 13:46:35 2009 -0700 +++ b/src/Virt_RASD.c Fri May 01 13:02:34 2009 -0700 @@ -598,6 +598,9 @@ case CIM_RES_TYPE_DISK: *classname = "DiskPoolResourceAllocationSettingData"; break; + case CIM_RES_TYPE_NET: + *classname = "NetPoolResourceAllocationSettingData"; + break; default: rc = CMPI_RC_ERR_FAILED; } diff -r d392e94325ec -r aa3094d471dd src/Virt_SettingsDefineCapabilities.c --- a/src/Virt_SettingsDefineCapabilities.c Thu Apr 30 13:46:35 2009 -0700 +++ b/src/Virt_SettingsDefineCapabilities.c Fri May 01 13:02:34 2009 -0700 @@ -609,6 +609,143 @@ return s; } +static CMPIStatus set_net_pool_props(const CMPIObjectPath *ref, + const char *id, + uint16_t pool_type, + struct inst_list *list) +{ + CMPIStatus s = {CMPI_RC_OK, NULL}; + CMPIInstance *inst; + const char *addr = "192.168.122.1"; + const char *netmask = "255.255.255.0"; + const char *ip_start = "192.168.122.2"; + const char *ip_stop = "192.168.122.254"; + int dev_count; + int i; + + /* Isolated network pools don't have a forward device */ + if (pool_type == NETPOOL_FORWARD_NONE) + dev_count = 1; + else + dev_count = 2; + + for (i = 0; i < dev_count; i++) { + inst = sdc_rasd_inst(&s, ref, CIM_RES_TYPE_NET, POOL_RASD); + if ((inst == NULL) || (s.rc != CMPI_RC_OK)) + goto out; + + CMSetProperty(inst, "InstanceID", (CMPIValue *)id, CMPI_chars); + + CMSetProperty(inst, "Address", + (CMPIValue *)addr, CMPI_chars); + + CMSetProperty(inst, "Netmask", + (CMPIValue *)netmask, CMPI_chars); + + CMSetProperty(inst, "IPRangeStart", + (CMPIValue *)ip_start, CMPI_chars); + + CMSetProperty(inst, "IPRangeEnd", + (CMPIValue *)ip_stop, CMPI_chars); + + CMSetProperty(inst, "ForwardMode", + (CMPIValue *)&pool_type, CMPI_uint16); + + if (i == 1) { + CMSetProperty(inst, "ForwardDevice", + (CMPIValue *)"eth0", CMPI_chars); + } + + inst_list_add(list, inst); + } + + out: + return s; +} + +static CMPIStatus net_pool_template(const CMPIObjectPath *ref, + int template_type, + struct inst_list *list) +{ + const char *id; + CMPIStatus s = {CMPI_RC_OK, NULL}; + int type[3] = {NETPOOL_FORWARD_NONE, + NETPOOL_FORWARD_NAT, + NETPOOL_FORWARD_ROUTED}; + int pool_types = 3; + int i; + + switch (template_type) { + case SDC_RASD_MIN: + id = "Minimum"; + break; + case SDC_RASD_MAX: + id = "Maximum"; + break; + case SDC_RASD_INC: + id = "Increment"; + break; + case SDC_RASD_DEF: + id = "Default"; + break; + default: + cu_statusf(_BROKER, &s, + CMPI_RC_ERR_FAILED, + "Unsupported sdc_rasd type"); + goto out; + } + + for (i = 0; i < pool_types; i++) { + s = set_net_pool_props(ref, id, type[i], list); + if (s.rc != CMPI_RC_OK) + goto out; + } + + out: + return s; +} + +static CMPIStatus net_dev_or_pool_template(const CMPIObjectPath *ref, + int template_type, + struct inst_list *list) +{ + CMPIStatus s = {CMPI_RC_OK, NULL}; + CMPIInstance *inst; + const char *poolid; + bool val; + + if (cu_get_str_path(ref, "InstanceID", &poolid) != CMPI_RC_OK) { + cu_statusf(_BROKER, &s, + CMPI_RC_ERR_FAILED, + "Missing InstanceID"); + goto out; + } + + s = get_pool_by_name(_BROKER, ref, poolid, &inst); + if (s.rc != CMPI_RC_OK) { + cu_statusf(_BROKER, &s, + CMPI_RC_ERR_FAILED, + "Unable to get pool instance from capabilities"); + goto out; + } + + if (cu_get_bool_prop(inst, "Primordial", &val) != CMPI_RC_OK) { + cu_statusf(_BROKER, &s, + CMPI_RC_ERR_FAILED, + "Unable to determine pool type"); + goto out; + } + + if (val) + s = net_pool_template(ref, template_type, list); + else + s = net_template(ref, template_type, list); + + out: + + return s; +} + static CMPIStatus set_disk_props(int type, const CMPIObjectPath *ref, const char *id, @@ -1390,7 +1527,7 @@ else if (type == CIM_RES_TYPE_PROC) s = proc_template(ref, i, list); else if (type == CIM_RES_TYPE_NET) - s = net_template(ref, i, list); + s = net_dev_or_pool_template(ref, i, list); else if (type == CIM_RES_TYPE_DISK) s = disk_dev_or_pool_template(ref, i, list); else if (type == CIM_RES_TYPE_GRAPHICS) From deeptik at linux.vnet.ibm.com Mon May 4 10:49:44 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Mon, 04 May 2009 03:49:44 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Fixing xmt-makefv.sh typo Message-ID: <92caf252c2fa8c8a7a9b.1241434184@localhost.localdomain> # HG changeset patch # User Deepti B. Kalakeri # Date 1241434172 25200 # Node ID 92caf252c2fa8c8a7a9b70548d12b03c52f3935c # Parent e8dc06eefada41252ba8d27b08fcef8ef6604251 [TEST] Fixing xmt-makefv.sh typo. Signed-off-by: Deepti B. Kalakeri diff -r e8dc06eefada -r 92caf252c2fa suites/libvirt-cim/images/xmt-makefv.sh --- a/suites/libvirt-cim/images/xmt-makefv.sh Tue Apr 21 17:08:06 2009 -0700 +++ b/suites/libvirt-cim/images/xmt-makefv.sh Mon May 04 03:49:32 2009 -0700 @@ -25,7 +25,7 @@ fi if [ -z $QEMU_FILE ]; then - CU_QEMU_VER=0 + CUR_QEMU_VER=0 else CUR_QEMU_VER=`strings $QEMU_FILE | awk '/version [0-9]/ { print $5; }' | sed 's/,//' | sed 's/\.//g'` fi From rmaciel at linux.vnet.ibm.com Mon May 4 22:14:27 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Mon, 04 May 2009 19:14:27 -0300 Subject: [Libvirt-cim] [PATCH] Add ForwardDevice attribute for network pools In-Reply-To: <327098c60da19f0f3c2c.1241132914@localhost.localdomain> References: <327098c60da19f0f3c2c.1241132914@localhost.localdomain> Message-ID: <49FF68C3.7000801@linux.vnet.ibm.com> +1 Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1241132900 25200 > # Node ID 327098c60da19f0f3c2c0a3ec69955503a16602a > # Parent 94b1108c2a8be28472a171d9a09d08a2580cc53f > Add ForwardDevice attribute for network pools > > libvirt allows you to restrict the firewall rules of a pool so that traffic > is forwarded through a specific device. > > Also, fix a bug where the value for ForwardMode specified by the user wasn't > being read in properly. Also change FowardMode to a value map attribute > instead of a string. > > Fix a bug where the value for IPRangeStart is read in twice - IPRangeEnd wasn't > being read in at all. > > Fix a bug in xmlgen code for the network pool forwarding device. > > Signed-off-by: Kaitlin Rupert > > diff -r 94b1108c2a8b -r 327098c60da1 libxkutil/xmlgen.c > --- a/libxkutil/xmlgen.c Thu Apr 30 13:46:35 2009 -0700 > +++ b/libxkutil/xmlgen.c Thu Apr 30 16:08:20 2009 -0700 > @@ -785,7 +785,7 @@ > if (xmlNewProp(forward, > BAD_CAST "dev", > BAD_CAST pool->forward_dev) == NULL) > - goto out; > + goto out; > } > } > > diff -r 94b1108c2a8b -r 327098c60da1 schema/ResourceAllocationSettingData.mof > --- a/schema/ResourceAllocationSettingData.mof Thu Apr 30 13:46:35 2009 -0700 > +++ b/schema/ResourceAllocationSettingData.mof Thu Apr 30 16:08:20 2009 -0700 > @@ -171,7 +171,12 @@ > string Netmask; > string IPRangeStart; > string IPRangeEnd; > - string ForwardMode; > + string ForwardDevice; > + > + [Description ("Network pool forwarding mode"), > + ValueMap {"0", "1", "2"}, > + Values {"None", "NAT", "Routed"}] > + uint16 ForwardMode; > }; > > [Description ("KVM virtual network pool settings"), > @@ -182,7 +187,12 @@ > string Netmask; > string IPRangeStart; > string IPRangeEnd; > - string ForwardMode; > + string ForwardDevice; > + > + [Description ("Network pool forwarding mode"), > + ValueMap {"0", "1", "2"}, > + Values {"None", "NAT", "Routed"}] > + uint16 ForwardMode; > }; > > [Description ("LXC virtual network pool settings"), > @@ -193,7 +203,12 @@ > string Netmask; > string IPRangeStart; > string IPRangeEnd; > - string ForwardMode; > + string ForwardDevice; > + > + [Description ("Network pool forwarding mode"), > + ValueMap {"0", "1", "2"}, > + Values {"None", "NAT", "Routed"}] > + uint16 ForwardMode; > }; > > [Description ("Xen virtual disk pool settings"), > diff -r 94b1108c2a8b -r 327098c60da1 src/Virt_ResourcePoolConfigurationService.c > --- a/src/Virt_ResourcePoolConfigurationService.c Thu Apr 30 13:46:35 2009 -0700 > +++ b/src/Virt_ResourcePoolConfigurationService.c Thu Apr 30 16:08:20 2009 -0700 > @@ -82,6 +82,7 @@ > { > const char *val = NULL; > const char *msg = NULL; > + uint16_t type; > > /*FIXME: Need to add validation of addresses if user specified */ > > @@ -103,12 +104,37 @@ > free(pool->pool_info.net.ip_start); > pool->pool_info.net.ip_start = strdup(val); > > - if (cu_get_str_prop(inst, "IPRangeStart", &val) != CMPI_RC_OK) > + if (cu_get_str_prop(inst, "IPRangeEnd", &val) != CMPI_RC_OK) > val = "192.168.122.254"; > > free(pool->pool_info.net.ip_end); > pool->pool_info.net.ip_end = strdup(val); > > + if (cu_get_u16_prop(inst, "ForwardMode", &type) != CMPI_RC_OK) { > + pool->pool_info.net.forward_mode = strdup("nat"); > + } else { > + free(pool->pool_info.net.forward_mode); > + > + switch (type) { > + case NETPOOL_FORWARD_NONE: > + pool->pool_info.net.forward_mode = NULL; > + break; > + case NETPOOL_FORWARD_NAT: > + pool->pool_info.net.forward_mode = strdup("nat"); > + break; > + case NETPOOL_FORWARD_ROUTED: > + pool->pool_info.net.forward_mode = strdup("route"); > + break; > + default: > + return "Storage pool type not supported"; > + } > + } > + > + if (cu_get_str_prop(inst, "ForwardDevice", &val) == CMPI_RC_OK) { > + free(pool->pool_info.net.forward_dev); > + pool->pool_info.net.forward_dev = strdup(val); > + } > + > return msg; > > } > diff -r 94b1108c2a8b -r 327098c60da1 src/svpc_types.h > --- a/src/svpc_types.h Thu Apr 30 13:46:35 2009 -0700 > +++ b/src/svpc_types.h Thu Apr 30 16:08:20 2009 -0700 > @@ -66,6 +66,10 @@ > #define CIM_SAP_INACTIVE_STATE 3 > #define CIM_SAP_AVAILABLE_STATE 6 > > +#define NETPOOL_FORWARD_NONE 0 > +#define NETPOOL_FORWARD_NAT 1 > +#define NETPOOL_FORWARD_ROUTED 2 > + > #include > #include > > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From rmaciel at linux.vnet.ibm.com Mon May 4 23:05:49 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Mon, 04 May 2009 20:05:49 -0300 Subject: [Libvirt-cim] [PATCH] Change pools to pool in parse_diskpool_line() In-Reply-To: <43b5e7214d1b21256893.1241195603@localhost.localdomain> References: <43b5e7214d1b21256893.1241195603@localhost.localdomain> Message-ID: <49FF74CD.4030705@linux.vnet.ibm.com> +1 Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1241194039 25200 > # Node ID 43b5e7214d1b212568936dd46fa669941e7e00ff > # Parent 7a0de3babfcc4bd847081ed973b90f57dc1b2651 > Change pools to pool in parse_diskpool_line() > > Signed-off-by: Kaitlin Rupert > > diff -r 7a0de3babfcc -r 43b5e7214d1b src/Virt_DevicePool.c > --- a/src/Virt_DevicePool.c Thu Apr 30 16:09:30 2009 -0700 > +++ b/src/Virt_DevicePool.c Fri May 01 09:07:19 2009 -0700 > @@ -216,7 +216,7 @@ > free(pool->tag); > free(pool->path); > } > - pools->primordial = false; > + pool->primordial = false; > > return (ret == 2); > } > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From rmaciel at linux.vnet.ibm.com Mon May 4 23:47:10 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Mon, 04 May 2009 20:47:10 -0300 Subject: [Libvirt-cim] [PATCH 0 of 2] Add Network parent pool and NetPoolRASD templates In-Reply-To: References: Message-ID: <49FF7E7E.2040505@linux.vnet.ibm.com> +1 Kaitlin Rupert wrote: > This is similar to the support that was added for disk pools. > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From yunguol at cn.ibm.com Tue May 5 02:59:04 2009 From: yunguol at cn.ibm.com (Guo Lian Yun) Date: Tue, 5 May 2009 10:59:04 +0800 Subject: [Libvirt-cim] Test Run Summary (May 05 2009): KVM on Fedora release 10 (Cambridge) with sfcb Message-ID: ================================================= Test Run Summary (May 05 2009): KVM on Fedora release 10 (Cambridge) with sfcb ================================================= Distro: Fedora release 10 (Cambridge) Kernel: 2.6.27.15-170.2.24.fc10.x86_64 libvirt: 0.4.5 Hypervisor: QEMU 0.9.1 CIMOM: sfcb sfcbd 1.3.4preview Libvirt-cim revision: 863 Libvirt-cim changeset: 68e42eb53c40 Cimtest revision: 675 Cimtest changeset: 92caf252c2fa ================================================= FAIL : 5 XFAIL : 4 SKIP : 9 PASS : 133 ----------------- Total : 151 ================================================= FAIL Test Summary: ComputerSystemIndication - 01_created_indication.py: FAIL HostSystem - 03_hs_to_settdefcap.py: FAIL ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: FAIL ResourcePoolConfigurationService - 07_DeleteResourcePool.py: FAIL SettingsDefineCapabilities - 01_forward.py: FAIL ================================================= XFAIL Test Summary: ComputerSystem - 32_start_reboot.py: XFAIL ComputerSystem - 33_suspend_reboot.py: XFAIL VirtualSystemManagementService - 09_procrasd_persist.py: XFAIL VirtualSystemManagementService - 16_removeresource.py: XFAIL ================================================= SKIP Test Summary: ComputerSystem - 02_nosystems.py: SKIP LogicalDisk - 02_nodevs.py: SKIP VSSD - 02_bootldr.py: SKIP VirtualSystemMigrationService - 01_migratable_host.py: SKIP VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP ================================================= Full report: -------------------------------------------------------------------- AllocationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- AllocationCapabilities - 02_alloccap_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 01_enum.py: PASS -------------------------------------------------------------------- ComputerSystem - 02_nosystems.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- ComputerSystem - 03_defineVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 04_defineStartVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 05_activate_defined_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 06_paused_active_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 22_define_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 23_pause_pause.py: PASS -------------------------------------------------------------------- ComputerSystem - 27_define_pause_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 32_start_reboot.py: XFAIL ERROR - Got CIM error Unable to reboot domain: this function is not supported by the hypervisor: virDomainReboot with return code 1 ERROR - Exception: Unable reboot dom 'cs_test_domain' InvokeMethod(RequestStateChange): Unable to reboot domain: this function is not supported by the hypervisor: virDomainReboot Bug:<00005> -------------------------------------------------------------------- ComputerSystem - 33_suspend_reboot.py: XFAIL ERROR - Got CIM error State not supported with return code 7 ERROR - Exception: Unable Suspend dom 'test_domain' InvokeMethod(RequestStateChange): State not supported Bug:<00012> -------------------------------------------------------------------- ComputerSystem - 35_start_reset.py: PASS -------------------------------------------------------------------- ComputerSystem - 40_RSC_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 41_cs_to_settingdefinestate.py: PASS -------------------------------------------------------------------- ComputerSystem - 42_cs_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystemIndication - 01_created_indication.py: FAIL ERROR - Exception : Request Failed: 200 Traceback (most recent call last): File "./lib/XenKvmLib/const.py", line 139, in do_try File "01_created_indication.py", line 146, in main sub_list, ind_names, dict = sub_ind(ip, virt) File "01_created_indication.py", line 60, in sub_ind sub.subscribe(dict['default_url'], dict['default_auth']) File "/data/users/daisy/cimtest/suites/libvirt-cim/lib/XenKvmLib/indication_tester.py", line 345, in subscribe "CreateInstance", auth_hdr) File "/data/users/daisy/cimtest/suites/libvirt-cim/lib/XenKvmLib/indication_tester.py", line 330, in __do_cimpost (resp.status, resp.reason)) Exception: Request Failed: 200 ERROR - None -------------------------------------------------------------------- ElementAllocatedFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 03_reverse_errs.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 04_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 01_forward.py: PASS -------------------------------------------------------------------- ElementCapabilities - 02_reverse.py: PASS -------------------------------------------------------------------- ElementCapabilities - 03_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 05_hostsystem_cap.py: PASS -------------------------------------------------------------------- ElementConforms - 01_forward.py: PASS -------------------------------------------------------------------- ElementConforms - 02_reverse.py: PASS -------------------------------------------------------------------- ElementConforms - 03_ectp_fwd_errs.py: PASS -------------------------------------------------------------------- ElementConforms - 04_ectp_rev_errs.py: PASS -------------------------------------------------------------------- ElementSettingData - 01_forward.py: PASS -------------------------------------------------------------------- ElementSettingData - 03_esd_assoc_with_rasd_errs.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 02_elecap_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 01_enum.py: PASS -------------------------------------------------------------------- HostSystem - 02_hostsystem_to_rasd.py: PASS -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - 'KVM_SettingsDefineCapabilities' returned 8 RASD objects instead of 4 Class not found -------------------------------------------------------------------- HostSystem - 04_hs_to_EAPF.py: PASS -------------------------------------------------------------------- HostSystem - 05_hs_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 06_hs_to_vsms.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 01_forward.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 01_forward.py: PASS -------------------------------------------------------------------- HostedDependency - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 03_enabledstate.py: PASS -------------------------------------------------------------------- HostedDependency - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedResourcePool - 01_forward.py: PASS -------------------------------------------------------------------- HostedResourcePool - 02_reverse.py: PASS -------------------------------------------------------------------- HostedResourcePool - 03_forward_errs.py: PASS -------------------------------------------------------------------- HostedResourcePool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedService - 01_forward.py: PASS -------------------------------------------------------------------- HostedService - 02_reverse.py: PASS -------------------------------------------------------------------- HostedService - 03_forward_errs.py: PASS -------------------------------------------------------------------- HostedService - 04_reverse_errs.py: PASS -------------------------------------------------------------------- KVMRedirectionSAP - 01_enum_KVMredSAP.py: PASS -------------------------------------------------------------------- LogicalDisk - 01_disk.py: PASS -------------------------------------------------------------------- LogicalDisk - 02_nodevs.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- LogicalDisk - 03_ld_gi_errs.py: PASS -------------------------------------------------------------------- Memory - 01_memory.py: PASS -------------------------------------------------------------------- Memory - 02_defgetmem.py: PASS -------------------------------------------------------------------- Memory - 03_mem_gi_errs.py: PASS -------------------------------------------------------------------- NetworkPort - 01_netport.py: PASS -------------------------------------------------------------------- NetworkPort - 02_np_gi_errors.py: PASS -------------------------------------------------------------------- NetworkPort - 03_user_netport.py: PASS -------------------------------------------------------------------- Processor - 01_processor.py: PASS -------------------------------------------------------------------- Processor - 02_definesys_get_procs.py: PASS -------------------------------------------------------------------- Processor - 03_proc_gi_errs.py: PASS -------------------------------------------------------------------- Profile - 01_enum.py: PASS -------------------------------------------------------------------- Profile - 02_profile_to_elec.py: PASS -------------------------------------------------------------------- Profile - 03_rprofile_gi_errs.py: PASS -------------------------------------------------------------------- RASD - 01_verify_rasd_fields.py: PASS -------------------------------------------------------------------- RASD - 02_enum.py: PASS -------------------------------------------------------------------- RASD - 03_rasd_errs.py: PASS -------------------------------------------------------------------- RASD - 04_disk_rasd_size.py: PASS -------------------------------------------------------------------- RASD - 05_disk_rasd_emu_type.py: PASS -------------------------------------------------------------------- RedirectionService - 01_enum_crs.py: PASS -------------------------------------------------------------------- RedirectionService - 02_enum_crscap.py: PASS -------------------------------------------------------------------- RedirectionService - 03_RedirectionSAP_errs.py: PASS -------------------------------------------------------------------- ReferencedProfile - 01_verify_refprof.py: PASS -------------------------------------------------------------------- ReferencedProfile - 02_refprofile_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 03_forward_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 05_RAPF_err.py: PASS -------------------------------------------------------------------- ResourcePool - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePool - 02_rp_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 02_rpcc_gi_errs.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 02_rcps_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 03_CreateResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: FAIL ERROR - Unexpected rc code 4 and description One or more parameter values passed to the method were invalid InvokeMethod(CreateChildResourcePool): One or more parameter values passed to the method were invalid -------------------------------------------------------------------- ResourcePoolConfigurationService - 05_AddResourcesToResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 06_RemoveResourcesFromResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 07_DeleteResourcePool.py: FAIL ERROR - Unexpected rc code 4 and description One or more parameter values passed to the method were invalid InvokeMethod(DeleteResourcePool): One or more parameter values passed to the method were invalid -------------------------------------------------------------------- ServiceAccessBySAP - 01_forward.py: PASS -------------------------------------------------------------------- ServiceAccessBySAP - 02_reverse.py: PASS -------------------------------------------------------------------- SettingsDefine - 01_forward.py: PASS -------------------------------------------------------------------- SettingsDefine - 02_reverse.py: PASS -------------------------------------------------------------------- SettingsDefine - 03_sds_fwd_errs.py: PASS -------------------------------------------------------------------- SettingsDefine - 04_sds_rev_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 01_forward.py: FAIL ERROR - KVM_SettingsDefineCapabilities returned 8 ResourcePool objects instead of 4 -------------------------------------------------------------------- SettingsDefineCapabilities - 03_forward_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 04_forward_vsmsdata.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 05_reverse_vsmcap.py: PASS -------------------------------------------------------------------- SystemDevice - 01_forward.py: PASS -------------------------------------------------------------------- SystemDevice - 02_reverse.py: PASS -------------------------------------------------------------------- SystemDevice - 03_fwderrs.py: PASS -------------------------------------------------------------------- VSSD - 01_enum.py: PASS -------------------------------------------------------------------- VSSD - 02_bootldr.py: SKIP -------------------------------------------------------------------- VSSD - 03_vssd_gi_errs.py: PASS -------------------------------------------------------------------- VSSD - 04_vssd_to_rasd.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 02_vsmcap_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 01_definesystem_name.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 02_destroysystem.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 03_definesystem_ess.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 04_definesystem_ers.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 05_destroysystem_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 06_addresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 07_addresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 08_modifyresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 09_procrasd_persist.py: XFAIL -------------------------------------------------------------------- VirtualSystemManagementService - 10_hv_version.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 11_define_memrasdunits.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 12_referenced_config.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 13_refconfig_additional_devs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 14_define_sys_disk.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 15_mod_system_settings.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 16_removeresource.py: XFAIL ERROR - 0 RASD insts for domain/mouse:ps2 No such instance (no device domain/mouse:ps2) Bug:<00014> -------------------------------------------------------------------- VirtualSystemManagementService - 17_removeresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 02_vsmc_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationService - 01_migratable_host.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 02_vsmsd_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 01_forward.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 02_reverse.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 03_vssdc_fwd_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 04_vssdc_rev_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 02_vs_sservice_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 02_vs_sservicecap_gi_errs.py: PASS -------------------------------------------------------------------- -------------- next part -------------- An HTML attachment was scrubbed... URL: From yunguol at cn.ibm.com Tue May 5 06:40:11 2009 From: yunguol at cn.ibm.com (Guo Lian Yun) Date: Tue, 5 May 2009 14:40:11 +0800 Subject: [Libvirt-cim] Test Run Summary (May 05 2009): KVM on Fedora release 10.90 (Rawhide) with Pegasus Message-ID: ================================================= Test Run Summary (May 05 2009): KVM on Fedora release 10.90 (Rawhide) with Pegasus ================================================= Distro: Fedora release 10.90 (Rawhide) Kernel: 2.6.29-0.24.rc0.git13.fc11.x86_64 libvirt: 0.6.3 Hypervisor: QEMU 0.10.1 CIMOM: Pegasus 2.7.2 Libvirt-cim revision: 863 Libvirt-cim changeset: 68e42eb53c40 Cimtest revision: 675 Cimtest changeset: 92caf252c2fa ================================================= FAIL : 6 XFAIL : 3 SKIP : 9 PASS : 133 ----------------- Total : 151 ================================================= FAIL Test Summary: HostSystem - 03_hs_to_settdefcap.py: FAIL RASD - 05_disk_rasd_emu_type.py: FAIL ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: FAIL ResourcePoolConfigurationService - 07_DeleteResourcePool.py: FAIL SettingsDefineCapabilities - 01_forward.py: FAIL SystemDevice - 01_forward.py: FAIL ================================================= XFAIL Test Summary: ComputerSystem - 33_suspend_reboot.py: XFAIL VirtualSystemManagementService - 09_procrasd_persist.py: XFAIL VirtualSystemManagementService - 16_removeresource.py: XFAIL ================================================= SKIP Test Summary: ComputerSystem - 02_nosystems.py: SKIP LogicalDisk - 02_nodevs.py: SKIP VSSD - 02_bootldr.py: SKIP VirtualSystemMigrationService - 01_migratable_host.py: SKIP VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP ================================================= Full report: -------------------------------------------------------------------- AllocationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- AllocationCapabilities - 02_alloccap_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 01_enum.py: PASS -------------------------------------------------------------------- ComputerSystem - 02_nosystems.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- ComputerSystem - 03_defineVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 04_defineStartVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 05_activate_defined_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 06_paused_active_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 22_define_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 23_pause_pause.py: PASS -------------------------------------------------------------------- ComputerSystem - 27_define_pause_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 32_start_reboot.py: PASS -------------------------------------------------------------------- ComputerSystem - 33_suspend_reboot.py: XFAIL ERROR - Got CIM error CIM_ERR_NOT_SUPPORTED: State not supported with return code 7 ERROR - Exception: Unable Suspend dom 'test_domain' InvokeMethod(RequestStateChange): CIM_ERR_NOT_SUPPORTED: State not supported Bug:<00012> -------------------------------------------------------------------- ComputerSystem - 35_start_reset.py: PASS -------------------------------------------------------------------- ComputerSystem - 40_RSC_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 41_cs_to_settingdefinestate.py: PASS -------------------------------------------------------------------- ComputerSystem - 42_cs_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystemIndication - 01_created_indication.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 03_reverse_errs.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 04_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 01_forward.py: PASS -------------------------------------------------------------------- ElementCapabilities - 02_reverse.py: PASS -------------------------------------------------------------------- ElementCapabilities - 03_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 05_hostsystem_cap.py: PASS -------------------------------------------------------------------- ElementConforms - 01_forward.py: PASS -------------------------------------------------------------------- ElementConforms - 02_reverse.py: PASS -------------------------------------------------------------------- ElementConforms - 03_ectp_fwd_errs.py: PASS -------------------------------------------------------------------- ElementConforms - 04_ectp_rev_errs.py: PASS -------------------------------------------------------------------- ElementSettingData - 01_forward.py: PASS -------------------------------------------------------------------- ElementSettingData - 03_esd_assoc_with_rasd_errs.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 02_elecap_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 01_enum.py: PASS -------------------------------------------------------------------- HostSystem - 02_hostsystem_to_rasd.py: PASS -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - 'KVM_SettingsDefineCapabilities' returned 8 RASD objects instead of 4 CIM_ERR_INVALID_CLASS: Linux_ComputerSystem -------------------------------------------------------------------- HostSystem - 04_hs_to_EAPF.py: PASS -------------------------------------------------------------------- HostSystem - 05_hs_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 06_hs_to_vsms.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 01_forward.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 01_forward.py: PASS -------------------------------------------------------------------- HostedDependency - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 03_enabledstate.py: PASS -------------------------------------------------------------------- HostedDependency - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedResourcePool - 01_forward.py: PASS -------------------------------------------------------------------- HostedResourcePool - 02_reverse.py: PASS -------------------------------------------------------------------- HostedResourcePool - 03_forward_errs.py: PASS -------------------------------------------------------------------- HostedResourcePool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedService - 01_forward.py: PASS -------------------------------------------------------------------- HostedService - 02_reverse.py: PASS -------------------------------------------------------------------- HostedService - 03_forward_errs.py: PASS -------------------------------------------------------------------- HostedService - 04_reverse_errs.py: PASS -------------------------------------------------------------------- KVMRedirectionSAP - 01_enum_KVMredSAP.py: PASS -------------------------------------------------------------------- LogicalDisk - 01_disk.py: PASS -------------------------------------------------------------------- LogicalDisk - 02_nodevs.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- LogicalDisk - 03_ld_gi_errs.py: PASS -------------------------------------------------------------------- Memory - 01_memory.py: PASS -------------------------------------------------------------------- Memory - 02_defgetmem.py: PASS -------------------------------------------------------------------- Memory - 03_mem_gi_errs.py: PASS -------------------------------------------------------------------- NetworkPort - 01_netport.py: PASS -------------------------------------------------------------------- NetworkPort - 02_np_gi_errors.py: PASS -------------------------------------------------------------------- NetworkPort - 03_user_netport.py: PASS -------------------------------------------------------------------- Processor - 01_processor.py: PASS -------------------------------------------------------------------- Processor - 02_definesys_get_procs.py: PASS -------------------------------------------------------------------- Processor - 03_proc_gi_errs.py: PASS -------------------------------------------------------------------- Profile - 01_enum.py: PASS -------------------------------------------------------------------- Profile - 02_profile_to_elec.py: PASS -------------------------------------------------------------------- Profile - 03_rprofile_gi_errs.py: PASS -------------------------------------------------------------------- RASD - 01_verify_rasd_fields.py: PASS -------------------------------------------------------------------- RASD - 02_enum.py: PASS -------------------------------------------------------------------- RASD - 03_rasd_errs.py: PASS -------------------------------------------------------------------- RASD - 04_disk_rasd_size.py: PASS -------------------------------------------------------------------- RASD - 05_disk_rasd_emu_type.py: FAIL ERROR - Exception: EmulatedType Mismatch: got 0,expected 1 -------------------------------------------------------------------- RedirectionService - 01_enum_crs.py: PASS -------------------------------------------------------------------- RedirectionService - 02_enum_crscap.py: PASS -------------------------------------------------------------------- RedirectionService - 03_RedirectionSAP_errs.py: PASS -------------------------------------------------------------------- ReferencedProfile - 01_verify_refprof.py: PASS -------------------------------------------------------------------- ReferencedProfile - 02_refprofile_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 03_forward_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 05_RAPF_err.py: PASS -------------------------------------------------------------------- ResourcePool - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePool - 02_rp_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 02_rpcc_gi_errs.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 02_rcps_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 03_CreateResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: FAIL ERROR - Unexpected rc code 4 and description CIM_ERR_INVALID_PARAMETER InvokeMethod(CreateChildResourcePool): CIM_ERR_INVALID_PARAMETER -------------------------------------------------------------------- ResourcePoolConfigurationService - 05_AddResourcesToResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 06_RemoveResourcesFromResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 07_DeleteResourcePool.py: FAIL ERROR - Unexpected rc code 4 and description CIM_ERR_INVALID_PARAMETER InvokeMethod(DeleteResourcePool): CIM_ERR_INVALID_PARAMETER -------------------------------------------------------------------- ServiceAccessBySAP - 01_forward.py: PASS -------------------------------------------------------------------- ServiceAccessBySAP - 02_reverse.py: PASS -------------------------------------------------------------------- SettingsDefine - 01_forward.py: PASS -------------------------------------------------------------------- SettingsDefine - 02_reverse.py: PASS -------------------------------------------------------------------- SettingsDefine - 03_sds_fwd_errs.py: PASS -------------------------------------------------------------------- SettingsDefine - 04_sds_rev_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 01_forward.py: FAIL ERROR - KVM_SettingsDefineCapabilities returned 8 ResourcePool objects instead of 4 -------------------------------------------------------------------- SettingsDefineCapabilities - 03_forward_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 04_forward_vsmsdata.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 05_reverse_vsmcap.py: PASS -------------------------------------------------------------------- SystemDevice - 01_forward.py: FAIL 01_forward.py:29: DeprecationWarning: the sets module is deprecated from sets import Set ERROR - DeviceID mismatch ERROR - Exception Expected DeviceID: ['test_domain/0', 'test_domain/1', 'test_domain/2'] Got: [u'test_domain/0'] -------------------------------------------------------------------- SystemDevice - 02_reverse.py: PASS -------------------------------------------------------------------- SystemDevice - 03_fwderrs.py: PASS -------------------------------------------------------------------- VSSD - 01_enum.py: PASS -------------------------------------------------------------------- VSSD - 02_bootldr.py: SKIP -------------------------------------------------------------------- VSSD - 03_vssd_gi_errs.py: PASS -------------------------------------------------------------------- VSSD - 04_vssd_to_rasd.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 02_vsmcap_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 01_definesystem_name.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 02_destroysystem.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 03_definesystem_ess.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 04_definesystem_ers.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 05_destroysystem_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 06_addresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 07_addresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 08_modifyresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 09_procrasd_persist.py: XFAIL -------------------------------------------------------------------- VirtualSystemManagementService - 10_hv_version.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 11_define_memrasdunits.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 12_referenced_config.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 13_refconfig_additional_devs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 14_define_sys_disk.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 15_mod_system_settings.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 16_removeresource.py: XFAIL ERROR - 0 RASD insts for domain/mouse:ps2 CIM_ERR_NOT_FOUND: No such instance (no device domain/mouse:ps2) Bug:<00014> -------------------------------------------------------------------- VirtualSystemManagementService - 17_removeresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 02_vsmc_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationService - 01_migratable_host.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 02_vsmsd_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 01_forward.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 02_reverse.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 03_vssdc_fwd_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 04_vssdc_rev_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 02_vs_sservice_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 02_vs_sservicecap_gi_errs.py: PASS -------------------------------------------------------------------- -------------- next part -------------- An HTML attachment was scrubbed... URL: From rmaciel at linux.vnet.ibm.com Tue May 5 20:22:03 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Tue, 05 May 2009 17:22:03 -0300 Subject: [Libvirt-cim] [PATCH] Work around for sfcb's lack of association handling In-Reply-To: <1cb3975921d590d4dda4.1241031321@localhost.localdomain> References: <1cb3975921d590d4dda4.1241031321@localhost.localdomain> Message-ID: <4A009FEB.1060200@linux.vnet.ibm.com> +1 Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1240873892 25200 > # Node ID 1cb3975921d590d4dda4de197a4dc687e45d1840 > # Parent d4a9e07d6738f76780bcb1ada5b7c0dbc57e4e0f > Work around for sfcb's lack of association handling > > If a association provider has multiple classes listed, and a user queries with > the super class for the classes listed in the registration, the provider is > not called each time for each class. Instead, the provider is called once. > > This logic detects whether the association classname specified for by the user > is an exact match of one of the classnames in the registration. If it is, > the provider is only called once. If the classname is a super class, then the > provider is called once for each class listed in the registration. > > Signed-off-by: Kaitlin Rupert > > diff -r d4a9e07d6738 -r 1cb3975921d5 std_association.c > --- a/std_association.c Thu Jan 22 11:33:20 2009 -0800 > +++ b/std_association.c Mon Apr 27 16:11:32 2009 -0700 > @@ -274,6 +274,56 @@ > return s; > } > > +static bool do_generic_assoc_call(struct std_assoc_info *info, > + struct std_assoc *handler) > +{ > + int i; > + > + if (info->assoc_class == NULL) { > + return true; > + } else { > + for (i = 0; handler->assoc_class[i]; i++) { > + if (STREQ(info->assoc_class, handler->assoc_class[i])) > + return false; > + } > + } > + > + return true; > +} > + > +static CMPIStatus handle_assoc(struct std_assoc_info *info, > + const CMPIObjectPath *ref, > + struct std_assoc *handler, > + struct inst_list *list) > +{ > + CMPIStatus s = {CMPI_RC_OK, NULL}; > + int i; > + > + if (do_generic_assoc_call(info, handler)) { > + for (i = 0; handler->assoc_class[i]; i++) { > + info->assoc_class = handler->assoc_class[i]; > + > + CU_DEBUG("Calling handler ..."); > + s = handler->handler(ref, info, list); > + if (s.rc != CMPI_RC_OK) { > + CU_DEBUG("Handler did not return CMPI_RC_OK."); > + goto out; > + } > + } > + } else { > + CU_DEBUG("Calling handler ..."); > + s = handler->handler(ref, info, list); > + if (s.rc != CMPI_RC_OK) { > + CU_DEBUG("Handler did not return CMPI_RC_OK."); > + goto out; > + } > + } > + CU_DEBUG("Handler returned CMPI_RC_OK."); > + > + out: > + return s; > +} > + > static CMPIStatus do_assoc(struct std_assoc_ctx *ctx, > struct std_assoc_info *info, > const CMPIResult *results, > @@ -284,6 +334,7 @@ > CMPIStatus s = {CMPI_RC_OK, NULL}; > struct inst_list list; > struct std_assoc *handler; > + int i; > > CU_DEBUG("Getting handler ..."); > handler = std_assoc_get_handler(ctx, info, ref); > @@ -295,13 +346,23 @@ > > inst_list_init(&list); > > - CU_DEBUG("Calling handler ..."); > - s = handler->handler(ref, info, &list); > - if (s.rc != CMPI_RC_OK) { > - CU_DEBUG("Handler did not return CMPI_RC_OK."); > - goto out; > + if (do_generic_assoc_call(info, handler)) { > + for (i = 0; handler->assoc_class[i]; i++) { > + info->assoc_class = handler->assoc_class[i]; > + > + s = handle_assoc(info, ref, handler, &list); > + if (s.rc != CMPI_RC_OK) { > + CU_DEBUG("Failed to handle association"); > + goto out; > + } > + } > + } else { > + s = handle_assoc(info, ref, handler, &list); > + if (s.rc != CMPI_RC_OK) { > + CU_DEBUG("Failed to handle association"); > + goto out; > + } > } > - CU_DEBUG("Handler returned CMPI_RC_OK."); > > /* References and ReferenceNames */ > if (ref_rslt) > @@ -320,6 +381,7 @@ > CU_DEBUG("Prepare return list did not return CMPI_RC_OK."); > goto out; > } > + > CU_DEBUG("Returned %u instance(s).", list.cur); > > if (names_only) > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Wed May 6 01:24:37 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Tue, 05 May 2009 18:24:37 -0700 Subject: [Libvirt-cim] [PATCH 1 of 2] Add XML generation for netfs disk pools In-Reply-To: References: Message-ID: <0bbc6afa622ccea1b57b.1241573077@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1241572138 25200 # Node ID 0bbc6afa622ccea1b57b5a92dbf228a61b6aee6b # Parent aa607e00fcf92c40f6fa3e5ca311f68433395661 Add XML generation for netfs disk pools Signed-off-by: Kaitlin Rupert diff -r aa607e00fcf9 -r 0bbc6afa622c libxkutil/pool_parsing.c --- a/libxkutil/pool_parsing.c Fri May 01 13:02:34 2009 -0700 +++ b/libxkutil/pool_parsing.c Tue May 05 18:08:58 2009 -0700 @@ -55,6 +55,8 @@ static void cleanup_disk_pool(struct disk_pool pool) { free(pool.path); free(pool.device_path); + free(pool.host); + free(pool.src_dir); } void cleanup_virt_pool(struct virt_pool **pool) diff -r aa607e00fcf9 -r 0bbc6afa622c libxkutil/pool_parsing.h --- a/libxkutil/pool_parsing.h Fri May 01 13:02:34 2009 -0700 +++ b/libxkutil/pool_parsing.h Tue May 05 18:08:58 2009 -0700 @@ -46,6 +46,8 @@ DISK_POOL_LOGICAL} pool_type; char *path; char *device_path; + char *host; + char *src_dir; }; struct virt_pool { diff -r aa607e00fcf9 -r 0bbc6afa622c libxkutil/xmlgen.c --- a/libxkutil/xmlgen.c Fri May 01 13:02:34 2009 -0700 +++ b/libxkutil/xmlgen.c Tue May 05 18:08:58 2009 -0700 @@ -847,16 +847,61 @@ return NULL; } +static const char *set_disk_pool_source(xmlNodePtr disk, + struct disk_pool *pool) +{ + xmlNodePtr src; + xmlNodePtr tmp; + + src = xmlNewChild(disk, NULL, BAD_CAST "source", NULL); + if (src == NULL) + return XML_ERROR; + + if (pool->device_path != NULL) { + tmp = xmlNewChild(src, NULL, BAD_CAST "device", BAD_CAST NULL); + if (tmp == NULL) + return XML_ERROR; + + if (xmlNewProp(tmp, + BAD_CAST "path", + BAD_CAST pool->device_path) == NULL) + return XML_ERROR; + } + + if (pool->host != NULL) { + tmp = xmlNewChild(src, NULL, BAD_CAST "host", BAD_CAST NULL); + if (tmp == NULL) + return XML_ERROR; + + if (xmlNewProp(tmp, + BAD_CAST "name", + BAD_CAST pool->host) == NULL) + return XML_ERROR; + } + + if (pool->src_dir != NULL) { + tmp = xmlNewChild(src, NULL, BAD_CAST "dir", BAD_CAST NULL); + if (tmp == NULL) + return XML_ERROR; + + if (xmlNewProp(tmp, + BAD_CAST "path", + BAD_CAST pool->src_dir) == NULL) + return XML_ERROR; + } + + return NULL; +} + static const char *disk_pool_xml(xmlNodePtr root, struct virt_pool *_pool) { xmlNodePtr disk = NULL; xmlNodePtr name = NULL; - xmlNodePtr src = NULL; - xmlNodePtr dev = NULL; xmlNodePtr target = NULL; xmlNodePtr path = NULL; const char *type = NULL; + const char *msg = NULL; struct disk_pool *pool = &_pool->pool_info.disk; type = disk_pool_type_to_str(pool->pool_type); @@ -874,19 +919,10 @@ if (name == NULL) goto out; - if (pool->device_path != NULL) { - src = xmlNewChild(disk, NULL, BAD_CAST "source", NULL); - if (src == NULL) - goto out; - - dev = xmlNewChild(src, NULL, BAD_CAST "device", BAD_CAST NULL); - if (dev == NULL) - goto out; - - if (xmlNewProp(dev, - BAD_CAST "path", - BAD_CAST pool->device_path) == NULL) - goto out; + if (pool->pool_type != DISK_POOL_DIR) { + msg = set_disk_pool_source(disk, pool); + if (msg != NULL) + return msg; } target = xmlNewChild(disk, NULL, BAD_CAST "target", NULL); From kaitlin at linux.vnet.ibm.com Wed May 6 01:24:36 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Tue, 05 May 2009 18:24:36 -0700 Subject: [Libvirt-cim] [PATCH 0 of 2] Add netfs storage pool support Message-ID: This adds support for the netfs type storage pool. From kaitlin at linux.vnet.ibm.com Wed May 6 01:24:38 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Tue, 05 May 2009 18:24:38 -0700 Subject: [Libvirt-cim] [PATCH 2 of 2] Add netfs disk pool attributes to schema, RPCS support, and template RASDs In-Reply-To: References: Message-ID: <570c3507c7b2c3e55680.1241573078@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1241572138 25200 # Node ID 570c3507c7b2c3e55680e9a70f4889accb9a1cf7 # Parent 0bbc6afa622ccea1b57b5a92dbf228a61b6aee6b Add netfs disk pool attributes to schema, RPCS support, and template RASDs Signed-off-by: Kaitlin Rupert diff -r 0bbc6afa622c -r 570c3507c7b2 schema/ResourceAllocationSettingData.mof --- a/schema/ResourceAllocationSettingData.mof Tue May 05 18:08:58 2009 -0700 +++ b/schema/ResourceAllocationSettingData.mof Tue May 05 18:08:58 2009 -0700 @@ -223,6 +223,8 @@ uint16 Type; string Path; string DevicePath; + string Host; + string SourceDirectory; }; [Description ("KVM virtual disk pool settings"), @@ -237,6 +239,8 @@ uint16 Type; string Path; string DevicePath; + string Host; + string SourceDirectory; }; [Description ("LXC virtual disk pool settings"), @@ -251,5 +255,7 @@ uint16 Type; string Path; string DevicePath; + string Host; + string SourceDirectory; }; diff -r 0bbc6afa622c -r 570c3507c7b2 src/Virt_ResourcePoolConfigurationService.c --- a/src/Virt_ResourcePoolConfigurationService.c Tue May 05 18:08:58 2009 -0700 +++ b/src/Virt_ResourcePoolConfigurationService.c Tue May 05 18:08:58 2009 -0700 @@ -144,6 +144,8 @@ { pool->pool_info.disk.device_path = NULL; pool->pool_info.disk.path = NULL; + pool->pool_info.disk.host = NULL; + pool->pool_info.disk.src_dir = NULL; } static const char *disk_fs_pool(CMPIInstance *inst, @@ -159,6 +161,24 @@ return NULL; } +static const char *disk_netfs_pool(CMPIInstance *inst, + struct virt_pool *pool) +{ + const char *val = NULL; + + if (cu_get_str_prop(inst, "Host", &val) != CMPI_RC_OK) + return "Missing `Host' property"; + + pool->pool_info.disk.host = strdup(val); + + if (cu_get_str_prop(inst, "SourceDirectory", &val) != CMPI_RC_OK) + return "Missing `SourceDirectory' property"; + + pool->pool_info.disk.src_dir = strdup(val); + + return NULL; +} + static const char *disk_rasd_to_pool(CMPIInstance *inst, struct virt_pool *pool) { @@ -179,6 +199,11 @@ if (msg != NULL) goto out; break; + case DISK_POOL_NETFS: + msg = disk_netfs_pool(inst, pool); + if (msg != NULL) + goto out; + break; default: return "Storage pool type not supported"; } diff -r 0bbc6afa622c -r 570c3507c7b2 src/Virt_SettingsDefineCapabilities.c --- a/src/Virt_SettingsDefineCapabilities.c Tue May 05 18:08:58 2009 -0700 +++ b/src/Virt_SettingsDefineCapabilities.c Tue May 05 18:08:58 2009 -0700 @@ -1240,8 +1240,10 @@ CMPIStatus s = {CMPI_RC_OK, NULL}; const char *path = "/dev/null"; const char *dev_path; - int type[2] = {DISK_POOL_DIR, DISK_POOL_FS}; - int pool_types = 2; + const char *host; + const char *src_dir; + int type[3] = {DISK_POOL_DIR, DISK_POOL_FS, DISK_POOL_NETFS}; + int pool_types = 3; int i; switch (template_type) { @@ -1277,6 +1279,15 @@ CMSetProperty(inst, "DevicePath", (CMPIValue *)dev_path, CMPI_chars); break; + case DISK_POOL_NETFS: + host = "host_sys.domain.com"; + CMSetProperty(inst, "Host", + (CMPIValue *)host, CMPI_chars); + + src_dir = "/var/lib/images"; + CMSetProperty(inst, "SourceDirectory", + (CMPIValue *)src_dir, CMPI_chars); + break; default: break; } From kaitlin at linux.vnet.ibm.com Thu May 7 00:37:04 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Wed, 06 May 2009 17:37:04 -0700 Subject: [Libvirt-cim] [PATCH 0 of 2] Parent pool <--> child pool support for EAFP Message-ID: From kaitlin at linux.vnet.ibm.com Thu May 7 00:37:06 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Wed, 06 May 2009 17:37:06 -0700 Subject: [Libvirt-cim] [PATCH 2 of 2] EAFP between parent and child pools In-Reply-To: References: Message-ID: <5608b9455cd32fccbc32.1241656626@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1240877148 25200 # Node ID 5608b9455cd32fccbc324cd540c509d7230a113f # Parent e132a8e94381d218d39eb2fffc94b27a438abb32 EAFP between parent and child pools. Signed-off-by: Kaitlin Rupert diff -r e132a8e94381 -r 5608b9455cd3 src/Virt_ElementAllocatedFromPool.c --- a/src/Virt_ElementAllocatedFromPool.c Mon Apr 27 17:05:48 2009 -0700 +++ b/src/Virt_ElementAllocatedFromPool.c Mon Apr 27 17:05:48 2009 -0700 @@ -89,18 +89,27 @@ free(poolid); return s; + } -static int filter_by_pool(struct inst_list *dest, - struct inst_list *src, - const uint16_t type, - const char *_poolid) +static CMPIStatus get_dev_from_pool(const CMPIObjectPath *ref, + const uint16_t type, + const char *_poolid, + struct inst_list *list) { + CMPIStatus s = {CMPI_RC_OK, NULL}; + char *poolid = NULL; + struct inst_list tmp; int i; - char *poolid = NULL; - for (i = 0; i < src->cur; i++) { - CMPIInstance *inst = src->list[i]; + inst_list_init(&tmp); + + s = enum_devices(_BROKER, ref, NULL, type, &tmp); + if (s.rc != CMPI_RC_OK) + goto out; + + for (i = 0; i < tmp.cur; i++) { + CMPIInstance *inst = tmp.list[i]; const char *cn = NULL; const char *dev_id = NULL; @@ -112,21 +121,76 @@ poolid = pool_member_of(_BROKER, cn, type, dev_id); if (poolid && STREQ(poolid, _poolid)) - inst_list_add(dest, inst); + inst_list_add(list, inst); } - return dest->cur; + inst_list_free(&tmp); + + out: + + return s; } -static CMPIStatus pool_to_vdev(const CMPIObjectPath *ref, - struct std_assoc_info *info, - struct inst_list *list) +static CMPIStatus get_pools(const CMPIObjectPath *ref, + const uint16_t type, + const char *poolid, + CMPIInstance *pool_inst, + struct inst_list *list) +{ + CMPIStatus s = {CMPI_RC_OK, NULL}; + CMPIInstance *pool = NULL; + bool val; + + if (cu_get_bool_prop(pool_inst, "Primordial", &val) != CMPI_RC_OK) { + cu_statusf(_BROKER, &s, + CMPI_RC_ERR_FAILED, + "Unable to determine pool type"); + goto out; + } + + /* If Primordial is true, the pool is a parent pool. Need to return + all other pools. Otherwise, just return the parent pool. */ + if (val) { + struct inst_list tmp; + int i; + + inst_list_init(&tmp); + + s = enum_pools(_BROKER, ref, type, &tmp); + if (s.rc != CMPI_RC_OK) + goto out; + + for (i = 0; i < tmp.cur; i++) { + CMPIInstance *inst = tmp.list[i]; + const char *id = NULL; + + cu_get_str_prop(inst, "InstanceID", &id); + + if (!STREQC(id, poolid)) + inst_list_add(list, inst); + } + + inst_list_free(&tmp); + } else { + pool = parent_device_pool(_BROKER, ref, type, &s); + if (s.rc != CMPI_RC_OK) + goto out; + + inst_list_add(list, pool); + } + + out: + return s; +} + +static CMPIStatus pool_to_vdev_or_pool(const CMPIObjectPath *ref, + struct std_assoc_info *info, + struct inst_list *list) { const char *poolid; CMPIStatus s = {CMPI_RC_OK, NULL}; uint16_t type; CMPIInstance *inst = NULL; - struct inst_list tmp; if (!match_hypervisor_prefix(ref, info)) return s; @@ -150,15 +214,11 @@ goto out; } - inst_list_init(&tmp); - - s = enum_devices(_BROKER, ref, NULL, type, &tmp); + s = get_dev_from_pool(ref, type, poolid, list); if (s.rc != CMPI_RC_OK) goto out; - filter_by_pool(list, &tmp, type, poolid); - - inst_list_free(&tmp); + s = get_pools(ref, type, poolid, inst, list); out: return s; @@ -166,7 +226,7 @@ LIBVIRT_CIM_DEFAULT_MAKEREF() -static char* antecedent[] = { +static char* pool[] = { "Xen_ProcessorPool", "Xen_MemoryPool", "Xen_NetworkPool", @@ -188,7 +248,7 @@ NULL }; -static char* dependent[] = { +static char* device[] = { "Xen_Processor", "Xen_Memory", "Xen_NetworkPort", @@ -210,6 +270,46 @@ NULL }; +static char* device_or_pool[] = { + "Xen_Processor", + "Xen_Memory", + "Xen_NetworkPort", + "Xen_LogicalDisk", + "Xen_DisplayController", + "Xen_PointingDevice", + "KVM_Processor", + "KVM_Memory", + "KVM_NetworkPort", + "KVM_LogicalDisk", + "KVM_DisplayController", + "KVM_PointingDevice", + "LXC_Processor", + "LXC_Memory", + "LXC_NetworkPort", + "LXC_LogicalDisk", + "LXC_DisplayController", + "LXC_PointingDevice", + "Xen_ProcessorPool", + "Xen_MemoryPool", + "Xen_NetworkPool", + "Xen_DiskPool", + "Xen_GraphicsPool", + "Xen_InputPool", + "KVM_ProcessorPool", + "KVM_MemoryPool", + "KVM_NetworkPool", + "KVM_DiskPool", + "KVM_GraphicsPool", + "KVM_InputPool", + "LXC_ProcessorPool", + "LXC_MemoryPool", + "LXC_NetworkPool", + "LXC_DiskPool", + "LXC_GraphicsPool", + "LXC_InputPool", + NULL +}; + static char* assoc_classname[] = { "Xen_ElementAllocatedFromPool", "KVM_ElementAllocatedFromPool", @@ -218,10 +318,10 @@ }; static struct std_assoc _vdev_to_pool = { - .source_class = (char**)&dependent, + .source_class = (char**)&device, .source_prop = "Dependent", - .target_class = (char**)&antecedent, + .target_class = (char**)&pool, .target_prop = "Antecedent", .assoc_class = (char**)&assoc_classname, @@ -230,22 +330,22 @@ .make_ref = make_ref }; -static struct std_assoc _pool_to_vdev = { - .source_class = (char**)&antecedent, +static struct std_assoc _pool_to_vdev_or_pool = { + .source_class = (char**)&pool, .source_prop = "Antecedent", - .target_class = (char**)&dependent, + .target_class = (char**)&device_or_pool, .target_prop = "Dependent", .assoc_class = (char**)&assoc_classname, - .handler = pool_to_vdev, + .handler = pool_to_vdev_or_pool, .make_ref = make_ref }; static struct std_assoc *handlers[] = { &_vdev_to_pool, - &_pool_to_vdev, + &_pool_to_vdev_or_pool, NULL }; From kaitlin at linux.vnet.ibm.com Thu May 7 00:37:05 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Wed, 06 May 2009 17:37:05 -0700 Subject: [Libvirt-cim] [PATCH 1 of 2] Add parent_device_pool() which returns the parent pool for a given device type In-Reply-To: References: Message-ID: # HG changeset patch # User Kaitlin Rupert # Date 1240877148 25200 # Node ID e132a8e94381d218d39eb2fffc94b27a438abb32 # Parent 570c3507c7b2c3e55680e9a70f4889accb9a1cf7 Add parent_device_pool() which returns the parent pool for a given device type Signed-off-by: Kaitlin Rupert diff -r 570c3507c7b2 -r e132a8e94381 src/Virt_DevicePool.c --- a/src/Virt_DevicePool.c Tue May 05 18:08:58 2009 -0700 +++ b/src/Virt_DevicePool.c Mon Apr 27 17:05:48 2009 -0700 @@ -77,7 +77,7 @@ goto out; } - pools[count].tag = strdup("Parent"); + pools[count].tag = strdup("0"); pools[count].path = NULL; pools[count].primordial = true; count++; @@ -1234,6 +1234,45 @@ return _get_pools(broker, reference, type, NULL, list); } +CMPIInstance *parent_device_pool(const CMPIBroker *broker, + const CMPIObjectPath *reference, + uint16_t type, + CMPIStatus *s) +{ + CMPIInstance *inst = NULL; + const char *id = NULL; + + if (type == CIM_RES_TYPE_MEM) { + id = "MemoryPool/0"; + } else if (type == CIM_RES_TYPE_PROC) { + id = "ProcessorPool/0"; + } else if (type == CIM_RES_TYPE_DISK) { + id = "DiskPool/0"; + } else if (type == CIM_RES_TYPE_NET) { + id = "NetworkPool/0"; + } else if (type == CIM_RES_TYPE_GRAPHICS) { + id = "GraphicsPool/0"; + } else if (type == CIM_RES_TYPE_INPUT) { + id = "InputPool/0"; + } else { + cu_statusf(broker, s, + CMPI_RC_ERR_INVALID_PARAMETER, + "No such device type `%s'", type); + goto out; + } + + *s = get_pool_by_name(broker, reference, id, &inst); + if (inst == NULL) { + cu_statusf(broker, s, + CMPI_RC_ERR_FAILED, + "No default pool found for type %hi", type); + } + + out: + + return inst; +} + CMPIInstance *default_device_pool(const CMPIBroker *broker, const CMPIObjectPath *reference, uint16_t type, @@ -1241,43 +1280,38 @@ { CMPIInstance *inst = NULL; struct inst_list list; + bool val; - inst_list_init(&list); + if ((type == CIM_RES_TYPE_DISK) || (type == CIM_RES_TYPE_NET)) { + int i = 0; + CMPIrc rc; - if (type == CIM_RES_TYPE_MEM) { - *s = get_pool_by_name(broker, reference, "MemoryPool/0", &inst); - } else if (type == CIM_RES_TYPE_PROC) { - *s = get_pool_by_name(broker, reference, "ProcessorPool/0", &inst); - } else if (type == CIM_RES_TYPE_DISK) { + inst_list_init(&list); + *s = enum_pools(broker, reference, type, &list); - if ((s->rc == CMPI_RC_OK) && (list.cur > 0)) - inst = list.list[0]; - } else if (type == CIM_RES_TYPE_NET) { - *s = enum_pools(broker, reference, type, &list); - if ((s->rc == CMPI_RC_OK) && (list.cur > 0)) - inst = list.list[0]; - } else if (type == CIM_RES_TYPE_GRAPHICS) { - *s = get_pool_by_name(broker, - reference, - "GraphicsPool/0", - &inst); - } else if (type == CIM_RES_TYPE_INPUT) { - *s = get_pool_by_name(broker, - reference, - "InputPool/0", - &inst); + if ((s->rc == CMPI_RC_OK) && (list.cur > 0)) { + + for (i = 0; i < list.cur; i++) { + rc = cu_get_bool_prop(list.list[i], + "Primordial", + &val); + if ((rc != CMPI_RC_OK) || (val)) + continue; + + inst = list.list[i]; + break; + } + } + + inst_list_free(&list); + + if (inst == NULL) { + cu_statusf(broker, s, + CMPI_RC_ERR_FAILED, + "No default pool found for type %hi", type); + } } else { - cu_statusf(broker, s, - CMPI_RC_ERR_INVALID_PARAMETER, - "No such device type `%s'", type); - } - - inst_list_free(&list); - - if (inst == NULL) { - cu_statusf(broker, s, - CMPI_RC_ERR_FAILED, - "No default pool found for type %hi", type); + inst = parent_device_pool(broker, reference, type, s); } return inst; diff -r 570c3507c7b2 -r e132a8e94381 src/Virt_DevicePool.h --- a/src/Virt_DevicePool.h Tue May 05 18:08:58 2009 -0700 +++ b/src/Virt_DevicePool.h Mon Apr 27 17:05:48 2009 -0700 @@ -106,6 +106,20 @@ CMPIInstance **_inst); /** + * Get the parent pool for a given device type + * + * @param broker A pointer to the current broker + * @param ref The object path containing namespace and prefix info + * @param type The device type in question + * @param status The returned status + * @returns Parent pool instance + */ +CMPIInstance *parent_device_pool(const CMPIBroker *broker, + const CMPIObjectPath *reference, + uint16_t type, + CMPIStatus *s); + +/** * Get the default pool for a given device type * * @param broker A pointer to the current broker From yunguol at cn.ibm.com Thu May 7 05:05:16 2009 From: yunguol at cn.ibm.com (yunguol at cn.ibm.com) Date: Wed, 06 May 2009 22:05:16 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] #5 Update RPCS/04 to validate that the Network child pool can be created through the providers Message-ID: # HG changeset patch # User Guolian Yun # Date 1241671689 25200 # Node ID af273b2ad41c2a42f999155b23122c18a5c5ee8e # Parent 92caf252c2fa8c8a7a9b70548d12b03c52f3935c [TEST] #5 Update RPCS/04 to validate that the Network child pool can be created through the providers Updates from 4 to 5: 1) Move common functions to pool.py Updates from 3 to 4: 1) Move general net function to common.py 2) move the CIM_NS import stmt along with the logger Updates from 2 to 3: 1) Use CIM_NS from const.py instead of hardcoding 2) Check if the IP is already used on the system before setting 3) Rewrite try, except... blocks Updates from 1 to 2: Test all types of networkpool including routed network, nat based network and isolated network Tested for KVM with current sources Signed-off-by: Guolian Yun diff -r 92caf252c2fa -r af273b2ad41c suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Mon May 04 03:49:32 2009 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Wed May 06 21:48:09 2009 -0700 @@ -39,45 +39,47 @@ # OUT -- Error -- String -- Encoded error instance if the operation # failed and did not return a job # -# REVISIT : -# -------- -# As of now the CreateChildResourcePool() simply throws an Exception. -# We must improve this tc once the service is implemented. -# -# -Date: 20.02.2008 - +# Exception details before Revision 837 +# ----- +# Error code: CIM_ERR_NOT_SUPPORTED +# +# After revision 837, the service is implemented +# +# -Date: 20.02.2008 import sys -import pywbem -from XenKvmLib import rpcs_service from CimTest.Globals import logger from CimTest.ReturnCodes import FAIL, PASS from XenKvmLib.const import do_main, platform_sup from XenKvmLib.classes import get_typed_class +from XenKvmLib.common_util import destroy_netpool +from XenKvmLib.pool import undefine_netpool, create_verify_netpool -cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED -cim_mname = "CreateChildResourcePool" +test_pool = ["routedpool", "natpool", "isolatedpool"] @do_main(platform_sup) def main(): + status = PASS options = main.options - rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \ - "ResourcePoolConfigurationService"))(options.ip) - try: - rpcs_conn.CreateChildResourcePool() - except pywbem.CIMError, (err_no, desc): - if err_no == cim_errno : - logger.info("Got expected exception for '%s' service", cim_mname) - logger.info("Errno is '%s' ", err_no) - logger.info("Error string is '%s'", desc) - return PASS - else: - logger.error("Unexpected rc code %s and description %s\n", - err_no, desc) + + np = get_typed_class(options.virt, 'NetworkPool') + for i in range(0, len(test_pool)): + status = create_verify_netpool(options.ip, options.virt, test_pool[i]) + if status != PASS: + logger.error("Error in networkpool creation and verification") return FAIL - - logger.error("The execution should not have reached here!!") - return FAIL + + status = destroy_netpool(options.ip, options.virt, test_pool[i]) + if status != PASS: + logger.error("Unable to destroy networkpool %s", test_pool[i]) + status = FAIL + + status = undefine_netpool(options.ip, options.virt, test_pool[i]) + if status != PASS: + logger.error("Unable to undefine networkpool %s", test_pool[i]) + status = FAIL + + return status + if __name__ == "__main__": sys.exit(main()) - diff -r 92caf252c2fa -r af273b2ad41c suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Mon May 04 03:49:32 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Wed May 06 21:48:09 2009 -0700 @@ -21,15 +21,21 @@ # import sys -from CimTest.Globals import logger +from CimTest.Globals import logger, CIM_NS from CimTest.ReturnCodes import PASS, FAIL from XenKvmLib.classes import get_typed_class from XenKvmLib.const import get_provider_version, default_pool_name from XenKvmLib.enumclass import EnumInstances from VirtLib.utils import run_remote -from XenKvmLib.xm_virt_util import virt2uri +from XenKvmLib.xm_virt_util import virt2uri, net_list +from XenKvmLib import rpcs_service +from pywbem.cim_obj import CIMInstance, CIMInstanceName +import pywbem +cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED +cim_mname = "CreateChildResourcePool" input_graphics_pool_rev = 757 +libvirt_cim_child_pool_rev = 837 def pool_cn_to_rasd_cn(pool_cn, virt): if pool_cn.find('ProcessorPool') >= 0: @@ -97,3 +103,94 @@ return volume +def net_undefine(network, server, virt="Xen"): + """Function undefine a given virtual network""" + + cmd = "virsh -c %s net-undefine %s" % (virt2uri(virt), network) + ret, out = run_remote(server, cmd) + + return ret + +def undefine_netpool(server, virt, net_name): + if net_name == None: + return FAIL + + ret = net_undefine(net_name, server, virt) + if ret != 0: + logger.error("Failed to undefine Virtual Network '%s'", net_name) + return FAIL + + return PASS + +def create_verify_netpool(server, virt, test_pool): + status = FAIL + rpcs = get_typed_class(virt, "ResourcePoolConfigurationService") + rpcs_conn = eval("rpcs_service." + rpcs)(server) + curr_cim_rev, changeset = get_provider_version(virt, server) + if curr_cim_rev < libvirt_cim_child_pool_rev: + try: + rpcs_conn.CreateChildResourcePool() + except pywbem.CIMError, (err_no, desc): + if err_no == cim_errno : + logger.info("Got expected exception for '%s'service", cim_mname) + logger.info("Errno is '%s' ", err_no) + logger.info("Error string is '%s'", desc) + return PASS + else: + logger.error("Unexpected rc code %s and description %s\n", + err_no, desc) + return FAIL + elif curr_cim_rev >= libvirt_cim_child_pool_rev: + nprasd = get_typed_class(virt, + 'NetPoolResourceAllocationSettingData') + addr = "192.168.0.9" + n_list = net_list(server, virt) + for _net_name in n_list: + cmd = "virsh net-dumpxml %s | awk '/ip address/ {print}' | \ + cut -d ' ' -f 4 | sed 's/address=//'" % _net_name + s, in_use_addr = run_remote(server, cmd) + in_use_addr = in_use_addr.strip("'") + if in_use_addr == addr: + logger.error("IP address is in use by a different network") + return FAIL + np_prop = { + "Address" : addr, + "Netmask" : "255.255.255.0", + "IPRangeStart" : "192.168.0.31", + "IPRangeEnd" : "192.168.0.57", + } + np_id = 'NetworkPool/%s' % test_pool + iname = CIMInstanceName(nprasd, + namespace = CIM_NS, + keybindings = {'InstanceID':np_id}) + if test_pool == "routedpool": + np_prop["ForwardMode"] = "route eth1" + elif test_pool == "natpool": + np_prop["ForwardMode"] = "nat" + + nrasd = CIMInstance(nprasd, path = iname, properties = np_prop) + try: + rpcs_conn.CreateChildResourcePool(ElementName=test_pool, + Settings=[nrasd.tomof()]) + except Exception, details: + logger.error("Error in childpool creation") + logger.error(details) + return FAIL + + networkpool = get_typed_class(virt, 'NetworkPool') + pool_list = EnumInstances(server, networkpool) + if len(pool_list) < 1: + logger.error("Return %i instances, expected at least one instance", + len(pool_list)) + return FAIL + + for i in range(0, len(pool_list)): + ret_pool = pool_list[i].InstanceID + if ret_pool == np_id: + status = PASS + break + elif ret_pool != poolname and i == len(pool_list)-1: + logger.error("Can not find expected pool") + return FAIL + + return status From kaitlin at linux.vnet.ibm.com Thu May 7 05:41:59 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Wed, 06 May 2009 22:41:59 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] #5 Update RPCS/04 to validate that theNetwork child pool can be created through the providers In-Reply-To: References: Message-ID: <4A0274A7.1020507@linux.vnet.ibm.com> > - logger.error("The execution should not have reached here!!") > - return FAIL > + > + status = destroy_netpool(options.ip, options.virt, test_pool[i]) > + if status != PASS: > + logger.error("Unable to destroy networkpool %s", test_pool[i]) > + status = FAIL Should return here. Otherwise, if you're able to clean up the 2nd and 3rd pool, but not the first, the test will return PASS instead of FAIL. > + > + status = undefine_netpool(options.ip, options.virt, test_pool[i]) > + if status != PASS: > + logger.error("Unable to undefine networkpool %s", test_pool[i]) > + status = FAIL Same issue here. > + > + return status > + > if __name__ == "__main__": > sys.exit(main()) > - > diff -r 92caf252c2fa -r af273b2ad41c suites/libvirt-cim/lib/XenKvmLib/pool.py > --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Mon May 04 03:49:32 2009 -0700 > +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Wed May 06 21:48:09 2009 -0700 > @@ -21,15 +21,21 @@ > # > > import sys > -from CimTest.Globals import logger > +from CimTest.Globals import logger, CIM_NS > from CimTest.ReturnCodes import PASS, FAIL > from XenKvmLib.classes import get_typed_class > from XenKvmLib.const import get_provider_version, default_pool_name > from XenKvmLib.enumclass import EnumInstances > from VirtLib.utils import run_remote > -from XenKvmLib.xm_virt_util import virt2uri > +from XenKvmLib.xm_virt_util import virt2uri, net_list > +from XenKvmLib import rpcs_service > +from pywbem.cim_obj import CIMInstance, CIMInstanceName > +import pywbem > > +cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED > +cim_mname = "CreateChildResourcePool" > input_graphics_pool_rev = 757 > +libvirt_cim_child_pool_rev = 837 > > def pool_cn_to_rasd_cn(pool_cn, virt): > if pool_cn.find('ProcessorPool') >= 0: > @@ -97,3 +103,94 @@ > > return volume > > +def net_undefine(network, server, virt="Xen"): > + """Function undefine a given virtual network""" > + > + cmd = "virsh -c %s net-undefine %s" % (virt2uri(virt), network) > + ret, out = run_remote(server, cmd) > + > + return ret > + > +def undefine_netpool(server, virt, net_name): > + if net_name == None: > + return FAIL > + > + ret = net_undefine(net_name, server, virt) > + if ret != 0: > + logger.error("Failed to undefine Virtual Network '%s'", net_name) > + return FAIL > + > + return PASS > + > +def create_verify_netpool(server, virt, test_pool): This is a long function. I would break this up so there is a function for creating the network pool and different function for verifying. > + elif curr_cim_rev >= libvirt_cim_child_pool_rev: > + nprasd = get_typed_class(virt, > + 'NetPoolResourceAllocationSettingData') > + addr = "192.168.0.9" > + n_list = net_list(server, virt) > + for _net_name in n_list: > + cmd = "virsh net-dumpxml %s | awk '/ip address/ {print}' | \ > + cut -d ' ' -f 4 | sed 's/address=//'" % _net_name > + s, in_use_addr = run_remote(server, cmd) > + in_use_addr = in_use_addr.strip("'") > + if in_use_addr == addr: > + logger.error("IP address is in use by a different network") > + return FAIL > + np_prop = { > + "Address" : addr, > + "Netmask" : "255.255.255.0", > + "IPRangeStart" : "192.168.0.31", > + "IPRangeEnd" : "192.168.0.57", > + } > + np_id = 'NetworkPool/%s' % test_pool > + iname = CIMInstanceName(nprasd, > + namespace = CIM_NS, > + keybindings = {'InstanceID':np_id}) > + if test_pool == "routedpool": > + np_prop["ForwardMode"] = "route eth1" > + elif test_pool == "natpool": > + np_prop["ForwardMode"] = "nat" The template NetPoolRASDs are now available in the providers. It's probably better to use those than to hand build the instances. > + > + nrasd = CIMInstance(nprasd, path = iname, properties = np_prop) > + try: > + rpcs_conn.CreateChildResourcePool(ElementName=test_pool, > + Settings=[nrasd.tomof()]) > + except Exception, details: > + logger.error("Error in childpool creation") > + logger.error(details) > + return FAIL > + > + networkpool = get_typed_class(virt, 'NetworkPool') > + pool_list = EnumInstances(server, networkpool) > + if len(pool_list) < 1: > + logger.error("Return %i instances, expected at least one instance", > + len(pool_list)) > + return FAIL > + > + for i in range(0, len(pool_list)): > + ret_pool = pool_list[i].InstanceID > + if ret_pool == np_id: > + status = PASS > + break > + elif ret_pool != poolname and i == len(pool_list)-1: > + logger.error("Can not find expected pool") > + return FAIL You only verify the InstanceID - you don't verify that the pool was created as expected. Ideally, this would also verify the NetPoolRASD that represents the current configuration of the NetworkPool. But I haven't implemented that piece yet. > + > + return status > -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From yunguol at cn.ibm.com Thu May 7 08:56:47 2009 From: yunguol at cn.ibm.com (yunguol at cn.ibm.com) Date: Thu, 07 May 2009 01:56:47 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Add new functions to pool.py for pool verificaiton through providers Message-ID: <0456e939b4326d1fa111.1241686607@elm3b197.beaverton.ibm.com> # HG changeset patch # User Guolian Yun # Date 1241686602 25200 # Node ID 0456e939b4326d1fa1110f4be8281fbb54af2dc9 # Parent 92caf252c2fa8c8a7a9b70548d12b03c52f3935c [TEST] Add new functions to pool.py for pool verificaiton through providers Tested for KVM with current sources Signed-off-by: Guolian Yun diff -r 92caf252c2fa -r 0456e939b432 suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Mon May 04 03:49:32 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Thu May 07 01:56:42 2009 -0700 @@ -21,15 +21,21 @@ # import sys -from CimTest.Globals import logger +from CimTest.Globals import logger, CIM_NS from CimTest.ReturnCodes import PASS, FAIL from XenKvmLib.classes import get_typed_class from XenKvmLib.const import get_provider_version, default_pool_name from XenKvmLib.enumclass import EnumInstances from VirtLib.utils import run_remote -from XenKvmLib.xm_virt_util import virt2uri +from XenKvmLib.xm_virt_util import virt2uri, net_list +from XenKvmLib import rpcs_service +import pywbem +from CimTest.CimExt import CIMClassMOF +cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED +cim_mname = "CreateChildResourcePool" input_graphics_pool_rev = 757 +libvirt_cim_child_pool_rev = 837 def pool_cn_to_rasd_cn(pool_cn, virt): if pool_cn.find('ProcessorPool') >= 0: @@ -97,3 +103,119 @@ return volume +def net_undefine(network, server, virt="Xen"): + """Function undefine a given virtual network""" + + cmd = "virsh -c %s net-undefine %s" % (virt2uri(virt), network) + ret, out = run_remote(server, cmd) + + return ret + +def undefine_netpool(server, virt, net_name): + if net_name == None: + return FAIL + + ret = net_undefine(net_name, server, virt) + if ret != 0: + logger.error("Failed to undefine Virtual Network '%s'", net_name) + return FAIL + + return PASS + +class CIM_NetPoolResourceAllocationSettingData(CIMClassMOF): + def __init__(self, addr, netmask, ipstart, ipend, mode): + if addr != None: + self.Address = addr + if netmask != None: + self.NetMask = netmask + if ipstart != None: + self.IPRangeStart = ipstart + if ipend != None: + self.IPRangeEnd = ipend + if mode != None: + self.ForwardMode = mode + +class Xen_NetPoolResourceAllocationSettingData(CIM_NetPoolResourceAllocationSettingData): + pass + +class KVM_NetPoolResourceAllocationSettingData(CIM_NetPoolResourceAllocationSettingData): + pass + +class LXC_NetPoolResourceAllocationSettingData(CIM_NetPoolResourceAllocationSettingData): + pass + +def dump_netxml(server, netname): + cmd = "virsh net-dumpxml %s | awk '/ip address/ {print}' | \ + cut -d ' ' -f 4 | sed 's/address=//'" % netname + s, addr = run_remote(server, cmd) + addr = addr.strip("'") + + return addr + +def create_netpool(server, virt, test_pool, address, mode): + status = PASS + rpcs = get_typed_class(virt, "ResourcePoolConfigurationService") + rpcs_conn = eval("rpcs_service." + rpcs)(server) + curr_cim_rev, changeset = get_provider_version(virt, server) + if curr_cim_rev < libvirt_cim_child_pool_rev: + try: + rpcs_conn.CreateChildResourcePool() + except pywbem.CIMError, (err_no, desc): + if err_no == cim_errno : + logger.info("Got expected exception for '%s'service", cim_mname) + logger.info("Errno is '%s' ", err_no) + logger.info("Error string is '%s'", desc) + return PASS + else: + logger.error("Unexpected rc code %s and description %s\n", + err_no, desc) + return FAIL + elif curr_cim_rev >= libvirt_cim_child_pool_rev: + nprasd = get_typed_class(virt, + 'NetPoolResourceAllocationSettingData') + n_list = net_list(server, virt) + for _net_name in n_list: + in_use_addr = dump_netxml(server, _net_name) + if in_use_addr == address: + logger.error("IP address is in use by a different network") + return FAIL + + class_nprasd = eval(nprasd) + nprasd = class_nprasd(addr = address, + netmask = "255.255.255.0", + ipstart = "192.168.0.31", + ipend = "192.168.0.57", + mode = mode) + try: + rpcs_conn.CreateChildResourcePool(ElementName=test_pool, + Settings=[nprasd.mof()]) + except Exception, details: + logger.error("Error in childpool creation") + logger.error(details) + return FAIL + + return status + +def verify_pool(server, pooltype, poolid, address): + status = FAIL + pool_list = EnumInstances(server, pooltype) + if len(pool_list) < 1: + logger.error("Return %i instances, expected at least one instance", + len(pool_list)) + return FAIL + + for i in range(0, len(pool_list)): + ret_pool = pool_list[i].InstanceID + ret_pool_name = ret_pool.split("/")[1] + if ret_pool == poolid: + ret_addr = dump_netxml(server, ret_pool_name) + if ret_addr == address: + status = PASS + break + else: + logger.error('Return address is %s, but expect is %s', + ret_addr, address) + elif ret_pool != poolid and i == len(pool_list)-1: + logger.error("The created pool can not be found") + + return status From yunguol at cn.ibm.com Thu May 7 08:44:21 2009 From: yunguol at cn.ibm.com (Guo Lian Yun) Date: Thu, 7 May 2009 16:44:21 +0800 Subject: [Libvirt-cim] [PATCH] [TEST] #5 Update RPCS/04 to validate that theNetwork child pool can be created through the providers In-Reply-To: <4A0274A7.1020507@linux.vnet.ibm.com> Message-ID: libvirt-cim-bounces at redhat.com wrote on 2009-05-07 13:41:59: > > > - logger.error("The execution should not have reached here!!") > > - return FAIL > > + > > + status = destroy_netpool(options.ip, options.virt, test_pool[i]) > > + if status != PASS: > > + logger.error("Unable to destroy networkpool %s", test_pool[i]) > > + status = FAIL > > Should return here. Otherwise, if you're able to clean up the 2nd and > 3rd pool, but not the first, the test will return PASS instead of FAIL. > > > + > > + status = undefine_netpool(options.ip, options.virt, test_pool[i]) > > + if status != PASS: > > + logger.error("Unable to undefine networkpool %s", test_pool[i]) > > + status = FAIL > > Same issue here. > > > + > > + return status > > + > > if __name__ == "__main__": > > sys.exit(main()) > > - > > diff -r 92caf252c2fa -r af273b2ad41c suites/libvirt- > cim/lib/XenKvmLib/pool.py > > --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Mon May 04 03:49: > 32 2009 -0700 > > +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Wed May 06 21:48: > 09 2009 -0700 > > @@ -21,15 +21,21 @@ > > # > > > > import sys > > -from CimTest.Globals import logger > > +from CimTest.Globals import logger, CIM_NS > > from CimTest.ReturnCodes import PASS, FAIL > > from XenKvmLib.classes import get_typed_class > > from XenKvmLib.const import get_provider_version, default_pool_name > > from XenKvmLib.enumclass import EnumInstances > > from VirtLib.utils import run_remote > > -from XenKvmLib.xm_virt_util import virt2uri > > +from XenKvmLib.xm_virt_util import virt2uri, net_list > > +from XenKvmLib import rpcs_service > > +from pywbem.cim_obj import CIMInstance, CIMInstanceName > > +import pywbem > > > > +cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED > > +cim_mname = "CreateChildResourcePool" > > input_graphics_pool_rev = 757 > > +libvirt_cim_child_pool_rev = 837 > > > > def pool_cn_to_rasd_cn(pool_cn, virt): > > if pool_cn.find('ProcessorPool') >= 0: > > @@ -97,3 +103,94 @@ > > > > return volume > > > > +def net_undefine(network, server, virt="Xen"): > > + """Function undefine a given virtual network""" > > + > > + cmd = "virsh -c %s net-undefine %s" % (virt2uri(virt), network) > > + ret, out = run_remote(server, cmd) > > + > > + return ret > > + > > +def undefine_netpool(server, virt, net_name): > > + if net_name == None: > > + return FAIL > > + > > + ret = net_undefine(net_name, server, virt) > > + if ret != 0: > > + logger.error("Failed to undefine Virtual Network '%s'", net_name) > > + return FAIL > > + > > + return PASS > > + > > +def create_verify_netpool(server, virt, test_pool): > > This is a long function. I would break this up so there is a function > for creating the network pool and different function for verifying. > > > > + elif curr_cim_rev >= libvirt_cim_child_pool_rev: > > + nprasd = get_typed_class(virt, > > + 'NetPoolResourceAllocationSettingData') > > + addr = "192.168.0.9" > > + n_list = net_list(server, virt) > > + for _net_name in n_list: > > + cmd = "virsh net-dumpxml %s | awk '/ip address/ {print}' | \ > > + cut -d ' ' -f 4 | sed 's/address=//'" % _net_name > > + s, in_use_addr = run_remote(server, cmd) > > + in_use_addr = in_use_addr.strip("'") > > + if in_use_addr == addr: > > + logger.error("IP address is in use by a differentnetwork") > > + return FAIL > > + np_prop = { > > + "Address" : addr, > > + "Netmask" : "255.255.255.0", > > + "IPRangeStart" : "192.168.0.31", > > + "IPRangeEnd" : "192.168.0.57", > > + } > > + np_id = 'NetworkPool/%s' % test_pool > > + iname = CIMInstanceName(nprasd, > > + namespace = CIM_NS, > > + keybindings = {'InstanceID':np_id}) > > + if test_pool == "routedpool": > > + np_prop["ForwardMode"] = "route eth1" > > + elif test_pool == "natpool": > > + np_prop["ForwardMode"] = "nat" > > The template NetPoolRASDs are now available in the providers. It's > probably better to use those than to hand build the instances. > Kaitlin - I'm not sure if I quite understand you. Due to the network break down, all your response to this question on IRC is lost. I tried to send a new patch for these general functions. I have to log the discussion messages on IRC next time=) > > + > > + nrasd = CIMInstance(nprasd, path = iname, properties = np_prop) > > + try: > > + rpcs_conn.CreateChildResourcePool(ElementName=test_pool, > > + Settings=[nrasd.tomof()]) > > + except Exception, details: > > + logger.error("Error in childpool creation") > > + logger.error(details) > > + return FAIL > > + > > + networkpool = get_typed_class(virt, 'NetworkPool') > > + pool_list = EnumInstances(server, networkpool) > > + if len(pool_list) < 1: > > + logger.error("Return %i instances, expected at least > one instance", > > + len(pool_list)) > > + return FAIL > > + > > + for i in range(0, len(pool_list)): > > + ret_pool = pool_list[i].InstanceID > > + if ret_pool == np_id: > > + status = PASS > > + break > > + elif ret_pool != poolname and i == len(pool_list)-1: > > + logger.error("Can not find expected pool") > > + return FAIL > > You only verify the InstanceID - you don't verify that the pool was > created as expected. > > Ideally, this would also verify the NetPoolRASD that represents the > current configuration of the NetworkPool. But I haven't implemented > that piece yet. > > > + > > + return status > > > > > -- > Kaitlin Rupert > IBM Linux Technology Center > kaitlin at linux.vnet.ibm.com > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -------------- next part -------------- An HTML attachment was scrubbed... URL: From yunguol at cn.ibm.com Thu May 7 09:04:55 2009 From: yunguol at cn.ibm.com (yunguol at cn.ibm.com) Date: Thu, 07 May 2009 02:04:55 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Simplify RPCS/04 for networkpool creation and validation through provider Message-ID: <5bf7603206d39d26feef.1241687095@elm3b197.beaverton.ibm.com> # HG changeset patch # User Guolian Yun # Date 1241687089 25200 # Node ID 5bf7603206d39d26feef19c91121b6e17e52dbe8 # Parent 92caf252c2fa8c8a7a9b70548d12b03c52f3935c [TEST] Simplify RPCS/04 for networkpool creation and validation through provider Tested for KVM with current sources Signed-off-by: Guolian Yun diff -r 92caf252c2fa -r 5bf7603206d3 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Mon May 04 03:49:32 2009 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Thu May 07 02:04:49 2009 -0700 @@ -39,45 +39,56 @@ # OUT -- Error -- String -- Encoded error instance if the operation # failed and did not return a job # -# REVISIT : -# -------- -# As of now the CreateChildResourcePool() simply throws an Exception. -# We must improve this tc once the service is implemented. -# -# -Date: 20.02.2008 - +# Exception details before Revision 837 +# ----- +# Error code: CIM_ERR_NOT_SUPPORTED +# +# After revision 837, the service is implemented +# +# -Date: 20.02.2008 import sys -import pywbem -from XenKvmLib import rpcs_service from CimTest.Globals import logger from CimTest.ReturnCodes import FAIL, PASS from XenKvmLib.const import do_main, platform_sup from XenKvmLib.classes import get_typed_class +from XenKvmLib.common_util import destroy_netpool +from XenKvmLib.pool import undefine_netpool, create_netpool, verify_pool -cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED -cim_mname = "CreateChildResourcePool" +test_pool = "testpool" +test_mode = ["None", "route eth1", "nat"] @do_main(platform_sup) def main(): + status = PASS options = main.options - rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \ - "ResourcePoolConfigurationService"))(options.ip) - try: - rpcs_conn.CreateChildResourcePool() - except pywbem.CIMError, (err_no, desc): - if err_no == cim_errno : - logger.info("Got expected exception for '%s' service", cim_mname) - logger.info("Errno is '%s' ", err_no) - logger.info("Error string is '%s'", desc) - return PASS - else: - logger.error("Unexpected rc code %s and description %s\n", - err_no, desc) + + np = get_typed_class(options.virt, 'NetworkPool') + np_id = "NetworkPool/%s" % test_pool + addr = "192.168.0.8" + for i in range(0, len(test_mode)): + status = create_netpool(options.ip, options.virt, test_pool, + addr, test_mode[i]) + if status != PASS: + logger.error("Error in networkpool creation") return FAIL - - logger.error("The execution should not have reached here!!") - return FAIL + + status = verify_pool(options.ip, np, np_id, addr) + if status != PASS: + logger.error("Error in networkpool verification") + return FAIL + + status = destroy_netpool(options.ip, options.virt, test_pool) + if status != PASS: + logger.error("Unable to destroy networkpool %s", test_pool) + return FAIL + + status = undefine_netpool(options.ip, options.virt, test_pool) + if status != PASS: + logger.error("Unable to undefine networkpool %s", test_pool) + return FAIL + + return status + if __name__ == "__main__": sys.exit(main()) - From yunguol at cn.ibm.com Thu May 7 09:31:00 2009 From: yunguol at cn.ibm.com (yunguol at cn.ibm.com) Date: Thu, 07 May 2009 02:31:00 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Make RPCS/07 to delete networkpool through providers Message-ID: <18fc8178c29ac3002727.1241688660@elm3b197.beaverton.ibm.com> # HG changeset patch # User Guolian Yun # Date 1241688655 25200 # Node ID 18fc8178c29ac30027272af9afe5b3fd01b1380c # Parent 92caf252c2fa8c8a7a9b70548d12b03c52f3935c [TEST] Make RPCS/07 to delete networkpool through providers Tested for KVM with current sources Signed-off-by: Guolian Yun diff -r 92caf252c2fa -r 18fc8178c29a suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py Mon May 04 03:49:32 2009 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py Thu May 07 02:30:55 2009 -0700 @@ -33,10 +33,12 @@ # OUT -- Job -- CIM_ConcreteJob REF -- Returned job if started # OUT -- Error-- String -- Encoded error instance if the operation # failed and did not return a job. -# REVISIT : -# -------- -# As of now the DeleteResourcePool() simply throws an Exception. -# We must improve this tc once the service is implemented. +# +# Exception details before Revision 841 +# ----- +# Error code: CIM_ERR_NOT_SUPPORTED +# +# After revision 841, the service is implemented # # -Date: 20.02.2008 @@ -46,32 +48,64 @@ from XenKvmLib import rpcs_service from CimTest.Globals import logger from CimTest.ReturnCodes import FAIL, PASS -from XenKvmLib.const import do_main, platform_sup +from XenKvmLib.const import do_main, platform_sup, get_provider_version +from XenKvmLib.enumclass import EnumInstances from XenKvmLib.classes import get_typed_class +from XenKvmLib.pool import create_netpool, verify_pool +from pywbem.cim_obj import CIMInstanceName cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED cim_mname = "DeleteResourcePool" +libvirt_cim_child_pool_rev = 841 +test_pool = "test_pool" @do_main(platform_sup) def main(): + status = PASS options = main.options - rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \ - "ResourcePoolConfigurationService"))(options.ip) - try: - rpcs_conn.DeleteResourcePool() - except pywbem.CIMError, (err_no, desc): - if err_no == cim_errno : - logger.info("Got expected exception for '%s' service", cim_mname) - logger.info("Errno is '%s' ", err_no) - logger.info("Error string is '%s'", desc) - return PASS - else: - logger.error("Unexpected rc code %s and description %s\n", - err_no, desc) + rpcs = get_typed_class(options.virt, "ResourcePoolConfigurationService") + rpcs_conn = eval("rpcs_service." + rpcs)(options.ip) + curr_cim_rev, changeset = get_provider_version(options.virt, options.ip) + if curr_cim_rev < libvirt_cim_child_pool_rev: + try: + rpcs_conn.DeleteResourcePool() + except pywbem.CIMError, (err_no, desc): + if err_no == cim_errno : + logger.info("Got expected exception for '%s' service", cim_mname) + logger.info("Errno is '%s' ", err_no) + logger.info("Error string is '%s'", desc) + return PASS + else: + logger.error("Unexpected rc code %s and description %s\n", + err_no, desc) + return FAIL + elif curr_cim_rev >= libvirt_cim_child_pool_rev: + addr = "192.168.0.100" + status = create_netpool(options.ip, options.virt, test_pool, + addr, None) + if status != PASS: + logger.error("Error in networkpool creation") return FAIL - - logger.error("The execution should not have reached here!!") - return FAIL + + np = get_typed_class(options.virt, "NetworkPool") + np_id = "NetworkPool/%s" %test_pool + status = verify_pool(options.ip, np, np_id, addr) + if status != PASS: + logger.error("Error in networkpool verification") + return FAIL + + pool = CIMInstanceName(np, keybindings = {'InstanceID':np_id}) + try: + rpcs_conn.DeleteResourcePool(Pool = pool) + netpool = EnumInstances(options.ip, np) + for i in range(0, len(netpool)): + ret_pool = netpool[i].InstanceID + if ret_pool == np_id: + raise Exception("Failed to delete %s" % test_pool) + except Exception, details: + logger.error(details) + status = FAIL + + return status if __name__ == "__main__": sys.exit(main()) - From deeptik at linux.vnet.ibm.com Thu May 7 10:55:24 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Thu, 07 May 2009 16:25:24 +0530 Subject: [Libvirt-cim] [PATCH] [TEST] #5 Update RPCS/04 to validate thattheNetwork child pool can be created through the providers In-Reply-To: References: Message-ID: <4A02BE1C.2080805@linux.vnet.ibm.com> Guo Lian Yun wrote: > > libvirt-cim-bounces at redhat.com wrote on 2009-05-07 13:41:59: > > > > > > - logger.error("The execution should not have reached here!!") > > > - return FAIL > > > + > > > + status = destroy_netpool(options.ip, options.virt, > test_pool[i]) > > > + if status != PASS: > > > + logger.error("Unable to destroy networkpool %s", > test_pool[i]) > > > + status = FAIL > > > > Should return here. Otherwise, if you're able to clean up the 2nd and > > 3rd pool, but not the first, the test will return PASS instead of FAIL. > > > > > + > > > + status = undefine_netpool(options.ip, options.virt, > test_pool[i]) > > > + if status != PASS: > > > + logger.error("Unable to undefine networkpool %s", > test_pool[i]) > > > + status = FAIL > > > > Same issue here. > > > > > + > > > + return status > > > + > > > if __name__ == "__main__": > > > sys.exit(main()) > > > - > > > diff -r 92caf252c2fa -r af273b2ad41c suites/libvirt- > > cim/lib/XenKvmLib/pool.py > > > --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Mon May 04 03:49: > > 32 2009 -0700 > > > +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Wed May 06 21:48: > > 09 2009 -0700 > > > @@ -21,15 +21,21 @@ > > > # > > > > > > import sys > > > -from CimTest.Globals import logger > > > +from CimTest.Globals import logger, CIM_NS > > > from CimTest.ReturnCodes import PASS, FAIL > > > from XenKvmLib.classes import get_typed_class > > > from XenKvmLib.const import get_provider_version, default_pool_name > > > from XenKvmLib.enumclass import EnumInstances > > > from VirtLib.utils import run_remote > > > -from XenKvmLib.xm_virt_util import virt2uri > > > +from XenKvmLib.xm_virt_util import virt2uri, net_list > > > +from XenKvmLib import rpcs_service > > > +from pywbem.cim_obj import CIMInstance, CIMInstanceName > > > +import pywbem > > > > > > +cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED > > > +cim_mname = "CreateChildResourcePool" > > > input_graphics_pool_rev = 757 > > > +libvirt_cim_child_pool_rev = 837 > > > > > > def pool_cn_to_rasd_cn(pool_cn, virt): > > > if pool_cn.find('ProcessorPool') >= 0: > > > @@ -97,3 +103,94 @@ > > > > > > return volume > > > > > > +def net_undefine(network, server, virt="Xen"): > > > + """Function undefine a given virtual network""" > > > + > > > + cmd = "virsh -c %s net-undefine %s" % (virt2uri(virt), network) > > > + ret, out = run_remote(server, cmd) > > > + > > > + return ret > > > + > > > +def undefine_netpool(server, virt, net_name): > > > + if net_name == None: > > > + return FAIL > > > + > > > + ret = net_undefine(net_name, server, virt) > > > + if ret != 0: > > > + logger.error("Failed to undefine Virtual Network '%s'", > net_name) > > > + return FAIL > > > + > > > + return PASS > > > + > > > +def create_verify_netpool(server, virt, test_pool): > > > > This is a long function. I would break this up so there is a function > > for creating the network pool and different function for verifying. > > > > > > > > + elif curr_cim_rev >= libvirt_cim_child_pool_rev: > > > + nprasd = get_typed_class(virt, > > > + > 'NetPoolResourceAllocationSettingData') > > > + addr = "192.168.0.9" > > > + n_list = net_list(server, virt) > > > + for _net_name in n_list: > > > + cmd = "virsh net-dumpxml %s | awk '/ip address/ > {print}' | \ > > > + cut -d ' ' -f 4 | sed 's/address=//'" % _net_name > > > + s, in_use_addr = run_remote(server, cmd) > > > + in_use_addr = in_use_addr.strip("'") > > > + if in_use_addr == addr: > > > + logger.error("IP address is in use by a > differentnetwork") > > > + return FAIL > > > + np_prop = { > > > + "Address" : addr, > > > + "Netmask" : "255.255.255.0", > > > + "IPRangeStart" : "192.168.0.31", > > > + "IPRangeEnd" : "192.168.0.57", > > > + } > > > + np_id = 'NetworkPool/%s' % test_pool > > > + iname = CIMInstanceName(nprasd, > > > + namespace = CIM_NS, > > > + keybindings = {'InstanceID':np_id}) > > > + if test_pool == "routedpool": > > > + np_prop["ForwardMode"] = "route eth1" > > > + elif test_pool == "natpool": > > > + np_prop["ForwardMode"] = "nat" > > > > The template NetPoolRASDs are now available in the providers. It's > > probably better to use those than to hand build the instances. > > > Kaitlin - I'm not sure if I quite understand you. Due to the network > break down, > all your response to this question on IRC is lost. I tried to send a > new patch > for these general functions. I have to log the discussion messages > on IRC next time=) Daisy you have not missed much. Kaitlin wanted you to use O/p from SettingDefineCapabilities like the the following: wbemcli ain -ac KVM_SettingsDefineCapabilities 'http://root:password at localhost/root/virt:KVM_AllocationCapabilities.InstanceID="NetworkPool/0"' -nl localhost/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Default" localhost/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Minimum" localhost/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Maximum" localhost/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Increment" You could use the default o/p from the above query. > > > > + > > > + nrasd = CIMInstance(nprasd, path = iname, properties = > np_prop) > > > + try: > > > + rpcs_conn.CreateChildResourcePool(ElementName=test_pool, > > > + > Settings=[nrasd.tomof()]) > > > + except Exception, details: > > > + logger.error("Error in childpool creation") > > > + logger.error(details) > > > + return FAIL > > > + > > > + networkpool = get_typed_class(virt, 'NetworkPool') > > > + pool_list = EnumInstances(server, networkpool) > > > + if len(pool_list) < 1: > > > + logger.error("Return %i instances, expected at least > > one instance", > > > + len(pool_list)) > > > + return FAIL > > > + > > > + for i in range(0, len(pool_list)): > > > + ret_pool = pool_list[i].InstanceID > > > + if ret_pool == np_id: > > > + status = PASS > > > + break > > > + elif ret_pool != poolname and i == len(pool_list)-1: > > > + logger.error("Can not find expected pool") > > > + return FAIL > > > > You only verify the InstanceID - you don't verify that the pool was > > created as expected. > > > > Ideally, this would also verify the NetPoolRASD that represents the > > current configuration of the NetworkPool. But I haven't implemented > > that piece yet. > > > > > + > > > + return status > > > > > > > > > -- > > Kaitlin Rupert > > IBM Linux Technology Center > > kaitlin at linux.vnet.ibm.com > > > > _______________________________________________ > > Libvirt-cim mailing list > > Libvirt-cim at redhat.com > > https://www.redhat.com/mailman/listinfo/libvirt-cim > ------------------------------------------------------------------------ > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Thu May 7 11:21:08 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Thu, 07 May 2009 16:51:08 +0530 Subject: [Libvirt-cim] [PATCH] [TEST] Add new functions to pool.py for poolverificaiton through providers In-Reply-To: <0456e939b4326d1fa111.1241686607@elm3b197.beaverton.ibm.com> References: <0456e939b4326d1fa111.1241686607@elm3b197.beaverton.ibm.com> Message-ID: <4A02C424.8050303@linux.vnet.ibm.com> yunguol at cn.ibm.com wrote: > # HG changeset patch > # User Guolian Yun > # Date 1241686602 25200 > # Node ID 0456e939b4326d1fa1110f4be8281fbb54af2dc9 > # Parent 92caf252c2fa8c8a7a9b70548d12b03c52f3935c > [TEST] Add new functions to pool.py for pool verificaiton through providers > > > Tested for KVM with current sources > Signed-off-by: Guolian Yun > > diff -r 92caf252c2fa -r 0456e939b432 suites/libvirt-cim/lib/XenKvmLib/pool.py > --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Mon May 04 03:49:32 2009 -0700 > +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Thu May 07 01:56:42 2009 -0700 > @@ -21,15 +21,21 @@ > # > > import sys > -from CimTest.Globals import logger > +from CimTest.Globals import logger, CIM_NS > from CimTest.ReturnCodes import PASS, FAIL > from XenKvmLib.classes import get_typed_class > from XenKvmLib.const import get_provider_version, default_pool_name > from XenKvmLib.enumclass import EnumInstances > from VirtLib.utils import run_remote > -from XenKvmLib.xm_virt_util import virt2uri > +from XenKvmLib.xm_virt_util import virt2uri, net_list > +from XenKvmLib import rpcs_service > +import pywbem > +from CimTest.CimExt import CIMClassMOF > > +cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED > +cim_mname = "CreateChildResourcePool" > input_graphics_pool_rev = 757 > +libvirt_cim_child_pool_rev = 837 > > def pool_cn_to_rasd_cn(pool_cn, virt): > if pool_cn.find('ProcessorPool') >= 0: > @@ -97,3 +103,119 @@ > > return volume > > +def net_undefine(network, server, virt="Xen"): > + """Function undefine a given virtual network""" > + > + cmd = "virsh -c %s net-undefine %s" % (virt2uri(virt), network) > + ret, out = run_remote(server, cmd) > + > + return ret > + > +def undefine_netpool(server, virt, net_name): > + if net_name == None: > + return FAIL > + > + ret = net_undefine(net_name, server, virt) > + if ret != 0: > + logger.error("Failed to undefine Virtual Network '%s'", net_name) > + return FAIL > + > + return PASS > + > +class CIM_NetPoolResourceAllocationSettingData(CIMClassMOF): > + def __init__(self, addr, netmask, ipstart, ipend, mode): > + if addr != None: > + self.Address = addr > + if netmask != None: > + self.NetMask = netmask > + if ipstart != None: > + self.IPRangeStart = ipstart > + if ipend != None: > + self.IPRangeEnd = ipend > + if mode != None: > + self.ForwardMode = mode > + > +class Xen_NetPoolResourceAllocationSettingData(CIM_NetPoolResourceAllocationSettingData): > + pass > + > +class KVM_NetPoolResourceAllocationSettingData(CIM_NetPoolResourceAllocationSettingData): > + pass > + > +class LXC_NetPoolResourceAllocationSettingData(CIM_NetPoolResourceAllocationSettingData): > + pass > + > +def dump_netxml(server, netname): > + cmd = "virsh net-dumpxml %s | awk '/ip address/ {print}' | \ > + cut -d ' ' -f 4 | sed 's/address=//'" % netname > + s, addr = run_remote(server, cmd) > + addr = addr.strip("'") > + > + return addr > + > +def create_netpool(server, virt, test_pool, address, mode): > + status = PASS > status = PASS value is used only in one place, hence you can remove this variable definition and use return PASS instead. > + rpcs = get_typed_class(virt, "ResourcePoolConfigurationService") > + rpcs_conn = eval("rpcs_service." + rpcs)(server) > + curr_cim_rev, changeset = get_provider_version(virt, server) > + if curr_cim_rev < libvirt_cim_child_pool_rev: > + try: > + rpcs_conn.CreateChildResourcePool() > + except pywbem.CIMError, (err_no, desc): > + if err_no == cim_errno : > + logger.info("Got expected exception for '%s'service", cim_mname) > + logger.info("Errno is '%s' ", err_no) > + logger.info("Error string is '%s'", desc) > + return PASS > + else: > + logger.error("Unexpected rc code %s and description %s\n", > + err_no, desc) > + return FAIL > + elif curr_cim_rev >= libvirt_cim_child_pool_rev: > + nprasd = get_typed_class(virt, > + 'NetPoolResourceAllocationSettingData') > + n_list = net_list(server, virt) > + for _net_name in n_list: > + in_use_addr = dump_netxml(server, _net_name) > + if in_use_addr == address: > + logger.error("IP address is in use by a different network") > + return FAIL > + > + class_nprasd = eval(nprasd) > + nprasd = class_nprasd(addr = address, > + netmask = "255.255.255.0", > + ipstart = "192.168.0.31", > + ipend = "192.168.0.57", > + mode = mode) > Do you think using the following here will avoid the harcoding of the NetPoolRASD setting here ? wbemcli ain -ac KVM_SettingsDefineCapabilities 'http://root:password at localhost/root/virt:KVM_AllocationCapabilities.InstanceID="NetworkPool/0"' -nl localhost/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Default" localhost/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Minimum" localhost/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Maximum" localhost/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Increment" You could use the default o/p from the above query. > + try: > + rpcs_conn.CreateChildResourcePool(ElementName=test_pool, > + Settings=[nprasd.mof()]) > + except Exception, details: > + logger.error("Error in childpool creation") > + logger.error(details) > + return FAIL > + > + return status > + > +def verify_pool(server, pooltype, poolid, address): > + status = FAIL > + pool_list = EnumInstances(server, pooltype) > + if len(pool_list) < 1: > + logger.error("Return %i instances, expected at least one instance", > + len(pool_list)) > + return FAIL > + > + for i in range(0, len(pool_list)): > + ret_pool = pool_list[i].InstanceID > + ret_pool_name = ret_pool.split("/")[1] > + if ret_pool == poolid: > + ret_addr = dump_netxml(server, ret_pool_name) > + if ret_addr == address: > + status = PASS > + break > + else: > + logger.error('Return address is %s, but expect is %s', > + ret_addr, address) > + elif ret_pool != poolid and i == len(pool_list)-1: > + logger.error("The created pool can not be found") > + > + return status > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim > -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Thu May 7 11:28:28 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Thu, 07 May 2009 16:58:28 +0530 Subject: [Libvirt-cim] [PATCH] [TEST] Simplify RPCS/04 for networkpoolcreation and validation through provider In-Reply-To: <5bf7603206d39d26feef.1241687095@elm3b197.beaverton.ibm.com> References: <5bf7603206d39d26feef.1241687095@elm3b197.beaverton.ibm.com> Message-ID: <4A02C5DC.1080109@linux.vnet.ibm.com> +1 -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Thu May 7 11:33:44 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Thu, 07 May 2009 17:03:44 +0530 Subject: [Libvirt-cim] [PATCH] [TEST] Make RPCS/07 to delete networkpoolthrough providers In-Reply-To: <18fc8178c29ac3002727.1241688660@elm3b197.beaverton.ibm.com> References: <18fc8178c29ac3002727.1241688660@elm3b197.beaverton.ibm.com> Message-ID: <4A02C718.2060101@linux.vnet.ibm.com> yunguol at cn.ibm.com wrote: > # HG changeset patch > # User Guolian Yun > # Date 1241688655 25200 > # Node ID 18fc8178c29ac30027272af9afe5b3fd01b1380c > # Parent 92caf252c2fa8c8a7a9b70548d12b03c52f3935c > [TEST] Make RPCS/07 to delete networkpool through providers > > > Tested for KVM with current sources > Signed-off-by: Guolian Yun > > diff -r 92caf252c2fa -r 18fc8178c29a suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py > --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py Mon May 04 03:49:32 2009 -0700 > +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py Thu May 07 02:30:55 2009 -0700 > @@ -33,10 +33,12 @@ > # OUT -- Job -- CIM_ConcreteJob REF -- Returned job if started > # OUT -- Error-- String -- Encoded error instance if the operation > # failed and did not return a job. > -# REVISIT : > -# -------- > -# As of now the DeleteResourcePool() simply throws an Exception. > -# We must improve this tc once the service is implemented. > +# > +# Exception details before Revision 841 > +# ----- > +# Error code: CIM_ERR_NOT_SUPPORTED > +# > +# After revision 841, the service is implemented > # > # -Date: 20.02.2008 > > @@ -46,32 +48,64 @@ > from XenKvmLib import rpcs_service > from CimTest.Globals import logger > from CimTest.ReturnCodes import FAIL, PASS > -from XenKvmLib.const import do_main, platform_sup > +from XenKvmLib.const import do_main, platform_sup, get_provider_version > +from XenKvmLib.enumclass import EnumInstances > from XenKvmLib.classes import get_typed_class > +from XenKvmLib.pool import create_netpool, verify_pool > +from pywbem.cim_obj import CIMInstanceName > > cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED > cim_mname = "DeleteResourcePool" > +libvirt_cim_child_pool_rev = 841 > +test_pool = "test_pool" > > @do_main(platform_sup) > def main(): > + status = PASS > options = main.options > - rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \ > - "ResourcePoolConfigurationService"))(options.ip) > - try: > - rpcs_conn.DeleteResourcePool() > - except pywbem.CIMError, (err_no, desc): > - if err_no == cim_errno : > - logger.info("Got expected exception for '%s' service", cim_mname) > - logger.info("Errno is '%s' ", err_no) > - logger.info("Error string is '%s'", desc) > - return PASS > - else: > - logger.error("Unexpected rc code %s and description %s\n", > - err_no, desc) > + rpcs = get_typed_class(options.virt, "ResourcePoolConfigurationService") > + rpcs_conn = eval("rpcs_service." + rpcs)(options.ip) > + curr_cim_rev, changeset = get_provider_version(options.virt, options.ip) > + if curr_cim_rev < libvirt_cim_child_pool_rev: > + try: > + rpcs_conn.DeleteResourcePool() > + except pywbem.CIMError, (err_no, desc): > + if err_no == cim_errno : > + logger.info("Got expected exception for '%s' service", cim_mname) > + logger.info("Errno is '%s' ", err_no) > + logger.info("Error string is '%s'", desc) > + return PASS > + else: > + logger.error("Unexpected rc code %s and description %s\n", > + err_no, desc) > + return FAIL > + elif curr_cim_rev >= libvirt_cim_child_pool_rev: > + addr = "192.168.0.100" > + status = create_netpool(options.ip, options.virt, test_pool, > + addr, None) > + if status != PASS: > + logger.error("Error in networkpool creation") > return FAIL > - > - logger.error("The execution should not have reached here!!") > - return FAIL > + > + np = get_typed_class(options.virt, "NetworkPool") > + np_id = "NetworkPool/%s" %test_pool > + status = verify_pool(options.ip, np, np_id, addr) > + if status != PASS: > + logger.error("Error in networkpool verification") > + return FAIL > You need to destroy/undefine the netpool here before returning. > + > + pool = CIMInstanceName(np, keybindings = {'InstanceID':np_id}) > + try: > + rpcs_conn.DeleteResourcePool(Pool = pool) > + netpool = EnumInstances(options.ip, np) > + for i in range(0, len(netpool)): > + ret_pool = netpool[i].InstanceID > + if ret_pool == np_id: > + raise Exception("Failed to delete %s" % test_pool) > + except Exception, details: > + logger.error(details) > + status = FAIL > You need to delete the networkpool when it fails to delete it using the DeleteResourcePool() > + > + return status > > if __name__ == "__main__": > sys.exit(main()) > - > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim > -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Thu May 7 17:13:54 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 07 May 2009 10:13:54 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Add new functions to pool.py forpoolverificaiton through providers In-Reply-To: <4A02C424.8050303@linux.vnet.ibm.com> References: <0456e939b4326d1fa111.1241686607@elm3b197.beaverton.ibm.com> <4A02C424.8050303@linux.vnet.ibm.com> Message-ID: <4A0316D2.20606@linux.vnet.ibm.com> >> + elif curr_cim_rev >= libvirt_cim_child_pool_rev: + nprasd >> = get_typed_class(virt, + >> 'NetPoolResourceAllocationSettingData') >> + n_list = net_list(server, virt) >> + for _net_name in n_list: >> + in_use_addr = dump_netxml(server, _net_name) >> + if in_use_addr == address: >> + logger.error("IP address is in use by a different >> network") >> + return FAIL >> + + class_nprasd = eval(nprasd) >> + nprasd = class_nprasd(addr = address, >> + netmask = "255.255.255.0", >> + ipstart = "192.168.0.31", >> + ipend = "192.168.0.57", >> + mode = mode) >> > Do you think using the following here will avoid the harcoding of the > NetPoolRASD setting here ? > wbemcli ain -ac KVM_SettingsDefineCapabilities > 'http://root:password at localhost/root/virt:KVM_AllocationCapabilities.InstanceID="NetworkPool/0"' > -nl > > localhost/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Default" > > localhost/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Minimum" > > localhost/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Maximum" > > localhost/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Increment" > > > You could use the default o/p from the above query. > Thanks Deepti - this is what I meant by the template NetPoolRASDs. Instead of building the instances of NetPoolRASDs by hand, you can use the instances returned by SDC. You'll probably want a function that does this. You can look at the get_default_rasd_mofs() in rasd.py - this gets the template RASDs for the non-pool related RASDs. -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Thu May 7 17:28:09 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 07 May 2009 10:28:09 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Add new functions to pool.py for poolverificaiton through providers In-Reply-To: <0456e939b4326d1fa111.1241686607@elm3b197.beaverton.ibm.com> References: <0456e939b4326d1fa111.1241686607@elm3b197.beaverton.ibm.com> Message-ID: <4A031A29.704@linux.vnet.ibm.com> > +def dump_netxml(server, netname): > + cmd = "virsh net-dumpxml %s | awk '/ip address/ {print}' | \ > + cut -d ' ' -f 4 | sed 's/address=//'" % netname > + s, addr = run_remote(server, cmd) > + addr = addr.strip("'") > + > + return addr This only checks the IP, it doesn't check the - the netmask, ip start, ip end, etc. I would try to leverage the NetXML class here. You can have a function similar to dumpxml. You can also have functions similar to xml_get_net_mac(), xml_get_vcpus() etc. This will allow you to verify all the values of the XML, not just the IP. > +def verify_pool(server, pooltype, poolid, address): > + status = FAIL > + pool_list = EnumInstances(server, pooltype) > + if len(pool_list) < 1: > + logger.error("Return %i instances, expected at least one instance", > + len(pool_list)) > + return FAIL > + > + for i in range(0, len(pool_list)): > + ret_pool = pool_list[i].InstanceID > + ret_pool_name = ret_pool.split("/")[1] You can use the parse_instance_id() function for this. -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Thu May 7 17:44:14 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 07 May 2009 10:44:14 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Make RPCS/07 to delete networkpoolthrough providers In-Reply-To: <18fc8178c29ac3002727.1241688660@elm3b197.beaverton.ibm.com> References: <18fc8178c29ac3002727.1241688660@elm3b197.beaverton.ibm.com> Message-ID: <4A031DEE.1030908@linux.vnet.ibm.com> > - logger.info("Got expected exception for '%s' service", cim_mname) This line is longer than 80 characters. > + pool = CIMInstanceName(np, keybindings = {'InstanceID':np_id}) Enumerate the network pools and the get pool reference that way. -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From yunguol at cn.ibm.com Fri May 8 06:50:04 2009 From: yunguol at cn.ibm.com (Guo Lian Yun) Date: Fri, 8 May 2009 14:50:04 +0800 Subject: [Libvirt-cim] [PATCH] [TEST] Add new functions to pool.py forpoolverificaiton through providers In-Reply-To: <4A0316D2.20606@linux.vnet.ibm.com> Message-ID: libvirt-cim-bounces at redhat.com wrote on 2009-05-08 01:13:54: > >> + elif curr_cim_rev >= libvirt_cim_child_pool_rev: + nprasd > >> = get_typed_class(virt, + > >> 'NetPoolResourceAllocationSettingData') > >> + n_list = net_list(server, virt) > >> + for _net_name in n_list: > >> + in_use_addr = dump_netxml(server, _net_name) > >> + if in_use_addr == address: > >> + logger.error("IP address is in use by a different > >> network") > >> + return FAIL > >> + + class_nprasd = eval(nprasd) > >> + nprasd = class_nprasd(addr = address, > >> + netmask = "255.255.255.0", > >> + ipstart = "192.168.0.31", > >> + ipend = "192.168.0.57", > >> + mode = mode) > >> > > Do you think using the following here will avoid the harcoding of the > > NetPoolRASD setting here ? > > wbemcli ain -ac KVM_SettingsDefineCapabilities > > 'http://root:password at localhost/root/virt: > KVM_AllocationCapabilities.InstanceID="NetworkPool/0"' > > -nl > > > > localhost/root/virt:KVM_NetPoolResourceAllocationSettingData. > InstanceID="Default" > > > > localhost/root/virt:KVM_NetPoolResourceAllocationSettingData. > InstanceID="Minimum" > > > > localhost/root/virt:KVM_NetPoolResourceAllocationSettingData. > InstanceID="Maximum" > > > > localhost/root/virt:KVM_NetPoolResourceAllocationSettingData. > InstanceID="Increment" > > > > > > You could use the default o/p from the above query. > > > > Thanks Deepti - this is what I meant by the template NetPoolRASDs. > Instead of building the instances of NetPoolRASDs by hand, you can use > the instances returned by SDC. I got NetRASDs instead of NetPoolRASDs through this association with latest provider. [root at elm3b197 cimtest]# wbemcli ain -ac KVM_SettingsDefineCapabilities http://root:elm3b197 at localhost/root/virt:KVM_AllocationCapabilities.InstanceID="NetworkPool/cimtest-networkpool" localhost/root/virt:KVM_NetResourceAllocationSettingData.InstanceID="Minimum" localhost/root/virt:KVM_NetResourceAllocationSettingData.InstanceID="Minimum" localhost/root/virt:KVM_NetResourceAllocationSettingData.InstanceID="Maximum" localhost/root/virt:KVM_NetResourceAllocationSettingData.InstanceID="Maximum" localhost/root/virt:KVM_NetResourceAllocationSettingData.InstanceID="Default" localhost/root/virt:KVM_NetResourceAllocationSettingData.InstanceID="Default" localhost/root/virt:KVM_NetResourceAllocationSettingData.InstanceID="Increment" localhost/root/virt:KVM_NetResourceAllocationSettingData.InstanceID="Increment" > > You'll probably want a function that does this. You can look at the > get_default_rasd_mofs() in rasd.py - this gets the template RASDs for > the non-pool related RASDs. > > -- > Kaitlin Rupert > IBM Linux Technology Center > kaitlin at linux.vnet.ibm.com > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -------------- next part -------------- An HTML attachment was scrubbed... URL: From rmaciel at linux.vnet.ibm.com Fri May 8 19:44:51 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Fri, 08 May 2009 16:44:51 -0300 Subject: [Libvirt-cim] [PATCH 0 of 2] Add netfs storage pool support In-Reply-To: References: Message-ID: <4A048BB3.6080806@linux.vnet.ibm.com> +1 Kaitlin Rupert wrote: > This adds support for the netfs type storage pool. > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From rmaciel at linux.vnet.ibm.com Fri May 8 20:37:16 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Fri, 08 May 2009 17:37:16 -0300 Subject: [Libvirt-cim] [PATCH 1 of 2] Add parent_device_pool() which returnsthe parent pool for a given device type In-Reply-To: References: Message-ID: <4A0497FC.9090005@linux.vnet.ibm.com> Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1240877148 25200 > # Node ID e132a8e94381d218d39eb2fffc94b27a438abb32 > # Parent 570c3507c7b2c3e55680e9a70f4889accb9a1cf7 > Add parent_device_pool() which returns the parent pool for a given device type > > Signed-off-by: Kaitlin Rupert > > diff -r 570c3507c7b2 -r e132a8e94381 src/Virt_DevicePool.c > --- a/src/Virt_DevicePool.c Tue May 05 18:08:58 2009 -0700 > +++ b/src/Virt_DevicePool.c Mon Apr 27 17:05:48 2009 -0700 > @@ -77,7 +77,7 @@ > goto out; > } > > - pools[count].tag = strdup("Parent"); > + pools[count].tag = strdup("0"); > pools[count].path = NULL; > pools[count].primordial = true; > count++; > @@ -1234,6 +1234,45 @@ > return _get_pools(broker, reference, type, NULL, list); > } > > +CMPIInstance *parent_device_pool(const CMPIBroker *broker, > + const CMPIObjectPath *reference, > + uint16_t type, > + CMPIStatus *s) > +{ > + CMPIInstance *inst = NULL; > + const char *id = NULL; > + > + if (type == CIM_RES_TYPE_MEM) { > + id = "MemoryPool/0"; > + } else if (type == CIM_RES_TYPE_PROC) { > + id = "ProcessorPool/0"; > + } else if (type == CIM_RES_TYPE_DISK) { > + id = "DiskPool/0"; > + } else if (type == CIM_RES_TYPE_NET) { > + id = "NetworkPool/0"; > + } else if (type == CIM_RES_TYPE_GRAPHICS) { > + id = "GraphicsPool/0"; > + } else if (type == CIM_RES_TYPE_INPUT) { > + id = "InputPool/0"; > + } else { > + cu_statusf(broker, s, > + CMPI_RC_ERR_INVALID_PARAMETER, > + "No such device type `%s'", type); > + goto out; > + } > + > + *s = get_pool_by_name(broker, reference, id, &inst); > + if (inst == NULL) { > + cu_statusf(broker, s, > + CMPI_RC_ERR_FAILED, > + "No default pool found for type %hi", type); > + } > + > + out: > + > + return inst; > +} > + > CMPIInstance *default_device_pool(const CMPIBroker *broker, > const CMPIObjectPath *reference, > uint16_t type, > @@ -1241,43 +1280,38 @@ > { > CMPIInstance *inst = NULL; > struct inst_list list; > + bool val; > > - inst_list_init(&list); > + if ((type == CIM_RES_TYPE_DISK) || (type == CIM_RES_TYPE_NET)) { > + int i = 0; > + CMPIrc rc; > > - if (type == CIM_RES_TYPE_MEM) { > - *s = get_pool_by_name(broker, reference, "MemoryPool/0", &inst); > - } else if (type == CIM_RES_TYPE_PROC) { > - *s = get_pool_by_name(broker, reference, "ProcessorPool/0", &inst); > - } else if (type == CIM_RES_TYPE_DISK) { > + inst_list_init(&list); > + > *s = enum_pools(broker, reference, type, &list); > - if ((s->rc == CMPI_RC_OK) && (list.cur > 0)) You probably should insert a CU_DEBUG here just to help debug > - inst = list.list[0]; > - } else if (type == CIM_RES_TYPE_NET) { > - *s = enum_pools(broker, reference, type, &list); > - if ((s->rc == CMPI_RC_OK) && (list.cur > 0)) > - inst = list.list[0]; Same here > - } else if (type == CIM_RES_TYPE_GRAPHICS) { > - *s = get_pool_by_name(broker, > - reference, > - "GraphicsPool/0", > - &inst); Same here > - } else if (type == CIM_RES_TYPE_INPUT) { > - *s = get_pool_by_name(broker, > - reference, > - "InputPool/0", > - &inst); > + if ((s->rc == CMPI_RC_OK) && (list.cur > 0)) { > + > + for (i = 0; i < list.cur; i++) { > + rc = cu_get_bool_prop(list.list[i], > + "Primordial", > + &val); > + if ((rc != CMPI_RC_OK) || (val)) > + continue; > + > + inst = list.list[i]; > + break; > + } Same here > + } > + > + inst_list_free(&list); > + > + if (inst == NULL) { > + cu_statusf(broker, s, > + CMPI_RC_ERR_FAILED, > + "No default pool found for type %hi", type); > + } > } else { > - cu_statusf(broker, s, > - CMPI_RC_ERR_INVALID_PARAMETER, > - "No such device type `%s'", type); > - } > - > - inst_list_free(&list); > - > - if (inst == NULL) { > - cu_statusf(broker, s, > - CMPI_RC_ERR_FAILED, > - "No default pool found for type %hi", type); > + inst = parent_device_pool(broker, reference, type, s); > } > > return inst; > diff -r 570c3507c7b2 -r e132a8e94381 src/Virt_DevicePool.h > --- a/src/Virt_DevicePool.h Tue May 05 18:08:58 2009 -0700 > +++ b/src/Virt_DevicePool.h Mon Apr 27 17:05:48 2009 -0700 > @@ -106,6 +106,20 @@ > CMPIInstance **_inst); > > /** > + * Get the parent pool for a given device type > + * > + * @param broker A pointer to the current broker > + * @param ref The object path containing namespace and prefix info > + * @param type The device type in question > + * @param status The returned status > + * @returns Parent pool instance > + */ > +CMPIInstance *parent_device_pool(const CMPIBroker *broker, > + const CMPIObjectPath *reference, > + uint16_t type, > + CMPIStatus *s); > + > +/** > * Get the default pool for a given device type > * > * @param broker A pointer to the current broker > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From rmaciel at linux.vnet.ibm.com Fri May 8 20:39:20 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Fri, 08 May 2009 17:39:20 -0300 Subject: [Libvirt-cim] [PATCH 2 of 2] EAFP between parent and child pools In-Reply-To: <5608b9455cd32fccbc32.1241656626@localhost.localdomain> References: <5608b9455cd32fccbc32.1241656626@localhost.localdomain> Message-ID: <4A049878.3030608@linux.vnet.ibm.com> Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1240877148 25200 > # Node ID 5608b9455cd32fccbc324cd540c509d7230a113f > # Parent e132a8e94381d218d39eb2fffc94b27a438abb32 > EAFP between parent and child pools. > > Signed-off-by: Kaitlin Rupert > > diff -r e132a8e94381 -r 5608b9455cd3 src/Virt_ElementAllocatedFromPool.c > --- a/src/Virt_ElementAllocatedFromPool.c Mon Apr 27 17:05:48 2009 -0700 > +++ b/src/Virt_ElementAllocatedFromPool.c Mon Apr 27 17:05:48 2009 -0700 > @@ -89,18 +89,27 @@ > free(poolid); > > return s; > + > } > > -static int filter_by_pool(struct inst_list *dest, > - struct inst_list *src, > - const uint16_t type, > - const char *_poolid) > +static CMPIStatus get_dev_from_pool(const CMPIObjectPath *ref, > + const uint16_t type, > + const char *_poolid, > + struct inst_list *list) > { > + CMPIStatus s = {CMPI_RC_OK, NULL}; > + char *poolid = NULL; > + struct inst_list tmp; > int i; > - char *poolid = NULL; > > - for (i = 0; i < src->cur; i++) { > - CMPIInstance *inst = src->list[i]; > + inst_list_init(&tmp); > + > + s = enum_devices(_BROKER, ref, NULL, type, &tmp); > + if (s.rc != CMPI_RC_OK) Same problem here. Use a CU_DEBUG to register in the log that enum_devices actually failed > + goto out; > + > + for (i = 0; i < tmp.cur; i++) { > + CMPIInstance *inst = tmp.list[i]; > const char *cn = NULL; > const char *dev_id = NULL; > > @@ -112,21 +121,76 @@ > > poolid = pool_member_of(_BROKER, cn, type, dev_id); > if (poolid && STREQ(poolid, _poolid)) > - inst_list_add(dest, inst); > + inst_list_add(list, inst); > } > > - return dest->cur; > + inst_list_free(&tmp); > + > + out: > + > + return s; > } > > -static CMPIStatus pool_to_vdev(const CMPIObjectPath *ref, > - struct std_assoc_info *info, > - struct inst_list *list) > +static CMPIStatus get_pools(const CMPIObjectPath *ref, > + const uint16_t type, > + const char *poolid, > + CMPIInstance *pool_inst, > + struct inst_list *list) > +{ > + CMPIStatus s = {CMPI_RC_OK, NULL}; > + CMPIInstance *pool = NULL; > + bool val; > + > + if (cu_get_bool_prop(pool_inst, "Primordial", &val) != CMPI_RC_OK) { > + cu_statusf(_BROKER, &s, > + CMPI_RC_ERR_FAILED, > + "Unable to determine pool type"); > + goto out; > + } > + > + /* If Primordial is true, the pool is a parent pool. Need to return > + all other pools. Otherwise, just return the parent pool. */ > + if (val) { > + struct inst_list tmp; > + int i; > + > + inst_list_init(&tmp); > + > + s = enum_pools(_BROKER, ref, type, &tmp); > + if (s.rc != CMPI_RC_OK) Add CU_DEBUG notification here > + goto out; > + > + for (i = 0; i < tmp.cur; i++) { > + CMPIInstance *inst = tmp.list[i]; > + const char *id = NULL; > + > + cu_get_str_prop(inst, "InstanceID", &id); > + > + if (!STREQC(id, poolid)) > + inst_list_add(list, inst); > + } > + > + inst_list_free(&tmp); > + } else { > + pool = parent_device_pool(_BROKER, ref, type, &s); > + if (s.rc != CMPI_RC_OK) Add CU_DEBUG notification here > + goto out; > + > + inst_list_add(list, pool); > + } > + > + out: > + return s; > +} > + > +static CMPIStatus pool_to_vdev_or_pool(const CMPIObjectPath *ref, > + struct std_assoc_info *info, > + struct inst_list *list) > { > const char *poolid; > CMPIStatus s = {CMPI_RC_OK, NULL}; > uint16_t type; > CMPIInstance *inst = NULL; > - struct inst_list tmp; > > if (!match_hypervisor_prefix(ref, info)) > return s; > @@ -150,15 +214,11 @@ Add CU_DEBUG notification here > goto out; > } > > - inst_list_init(&tmp); > - > - s = enum_devices(_BROKER, ref, NULL, type, &tmp); > + s = get_dev_from_pool(ref, type, poolid, list); > if (s.rc != CMPI_RC_OK) Add CU_DEBUG notification here > goto out; > > - filter_by_pool(list, &tmp, type, poolid); > - > - inst_list_free(&tmp); > + s = get_pools(ref, type, poolid, inst, list); > > out: > return s; > @@ -166,7 +226,7 @@ > > LIBVIRT_CIM_DEFAULT_MAKEREF() > > -static char* antecedent[] = { > +static char* pool[] = { > "Xen_ProcessorPool", > "Xen_MemoryPool", > "Xen_NetworkPool", > @@ -188,7 +248,7 @@ > NULL > }; > > -static char* dependent[] = { > +static char* device[] = { > "Xen_Processor", > "Xen_Memory", > "Xen_NetworkPort", > @@ -210,6 +270,46 @@ > NULL > }; > > +static char* device_or_pool[] = { > + "Xen_Processor", > + "Xen_Memory", > + "Xen_NetworkPort", > + "Xen_LogicalDisk", > + "Xen_DisplayController", > + "Xen_PointingDevice", > + "KVM_Processor", > + "KVM_Memory", > + "KVM_NetworkPort", > + "KVM_LogicalDisk", > + "KVM_DisplayController", > + "KVM_PointingDevice", > + "LXC_Processor", > + "LXC_Memory", > + "LXC_NetworkPort", > + "LXC_LogicalDisk", > + "LXC_DisplayController", > + "LXC_PointingDevice", > + "Xen_ProcessorPool", > + "Xen_MemoryPool", > + "Xen_NetworkPool", > + "Xen_DiskPool", > + "Xen_GraphicsPool", > + "Xen_InputPool", > + "KVM_ProcessorPool", > + "KVM_MemoryPool", > + "KVM_NetworkPool", > + "KVM_DiskPool", > + "KVM_GraphicsPool", > + "KVM_InputPool", > + "LXC_ProcessorPool", > + "LXC_MemoryPool", > + "LXC_NetworkPool", > + "LXC_DiskPool", > + "LXC_GraphicsPool", > + "LXC_InputPool", > + NULL > +}; > + > static char* assoc_classname[] = { > "Xen_ElementAllocatedFromPool", > "KVM_ElementAllocatedFromPool", > @@ -218,10 +318,10 @@ > }; > > static struct std_assoc _vdev_to_pool = { > - .source_class = (char**)&dependent, > + .source_class = (char**)&device, > .source_prop = "Dependent", > > - .target_class = (char**)&antecedent, > + .target_class = (char**)&pool, > .target_prop = "Antecedent", > > .assoc_class = (char**)&assoc_classname, > @@ -230,22 +330,22 @@ > .make_ref = make_ref > }; > > -static struct std_assoc _pool_to_vdev = { > - .source_class = (char**)&antecedent, > +static struct std_assoc _pool_to_vdev_or_pool = { > + .source_class = (char**)&pool, > .source_prop = "Antecedent", > > - .target_class = (char**)&dependent, > + .target_class = (char**)&device_or_pool, > .target_prop = "Dependent", > > .assoc_class = (char**)&assoc_classname, > > - .handler = pool_to_vdev, > + .handler = pool_to_vdev_or_pool, > .make_ref = make_ref > }; > > static struct std_assoc *handlers[] = { > &_vdev_to_pool, > - &_pool_to_vdev, > + &_pool_to_vdev_or_pool, > NULL > }; > > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Fri May 8 23:45:56 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 08 May 2009 16:45:56 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Add new functions to pool.pyforpoolverificaiton through providers In-Reply-To: References: Message-ID: <4A04C434.6000601@linux.vnet.ibm.com> > > > > Thanks Deepti - this is what I meant by the template NetPoolRASDs. > > Instead of building the instances of NetPoolRASDs by hand, you can use > > the instances returned by SDC. > > I got NetRASDs instead of NetPoolRASDs through this association with > latest provider. > > [root at elm3b197 cimtest]# wbemcli ain -ac KVM_SettingsDefineCapabilities > http://localhost/root/virt:KVM_AllocationCapabilities.InstanceID="NetworkPool/cimtest-networkpool" You want to create a child pool from the parent pool. The parent pool is just a placeholder - all network pools that exist on the system will be children of the parent pool. # wbemcli ain -ac KVM_SettingsDefineCapabilities http://localhost/root/virt:KVM_AllocationCapabilities.InstanceID="NetworkPool/0" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Minimum" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Minimum" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Minimum" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Minimum" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Minimum" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Maximum" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Maximum" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Maximum" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Maximum" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Maximum" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Default" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Default" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Default" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Default" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Default" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Increment" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Increment" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Increment" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Increment" elm3b41/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Increment" -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From yunguol at cn.ibm.com Mon May 11 02:58:53 2009 From: yunguol at cn.ibm.com (Guo Lian Yun) Date: Mon, 11 May 2009 10:58:53 +0800 Subject: [Libvirt-cim] Test Run Summary (May 11 2009): KVM on Fedora release 10.90 (Rawhide) with Pegasus Message-ID: ================================================= Test Run Summary (May 11 2009): KVM on Fedora release 10.90 (Rawhide) with Pegasus ================================================= Distro: Fedora release 10.90 (Rawhide) Kernel: 2.6.29-0.24.rc0.git13.fc11.x86_64 libvirt: 0.6.3 Hypervisor: QEMU 0.10.1 CIMOM: Pegasus 2.7.2 Libvirt-cim revision: 867 Libvirt-cim changeset: aa607e00fcf9 Cimtest revision: 675 Cimtest changeset: 92caf252c2fa ================================================= FAIL : 6 XFAIL : 3 SKIP : 9 PASS : 133 ----------------- Total : 151 ================================================= FAIL Test Summary: HostSystem - 03_hs_to_settdefcap.py: FAIL RASD - 05_disk_rasd_emu_type.py: FAIL ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: FAIL ResourcePoolConfigurationService - 07_DeleteResourcePool.py: FAIL SettingsDefineCapabilities - 01_forward.py: FAIL SystemDevice - 01_forward.py: FAIL ================================================= XFAIL Test Summary: ComputerSystem - 33_suspend_reboot.py: XFAIL VirtualSystemManagementService - 09_procrasd_persist.py: XFAIL VirtualSystemManagementService - 16_removeresource.py: XFAIL ================================================= SKIP Test Summary: ComputerSystem - 02_nosystems.py: SKIP LogicalDisk - 02_nodevs.py: SKIP VSSD - 02_bootldr.py: SKIP VirtualSystemMigrationService - 01_migratable_host.py: SKIP VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP ================================================= Full report: -------------------------------------------------------------------- AllocationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- AllocationCapabilities - 02_alloccap_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 01_enum.py: PASS -------------------------------------------------------------------- ComputerSystem - 02_nosystems.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- ComputerSystem - 03_defineVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 04_defineStartVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 05_activate_defined_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 06_paused_active_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 22_define_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 23_pause_pause.py: PASS -------------------------------------------------------------------- ComputerSystem - 27_define_pause_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 32_start_reboot.py: PASS -------------------------------------------------------------------- ComputerSystem - 33_suspend_reboot.py: XFAIL ERROR - Got CIM error CIM_ERR_NOT_SUPPORTED: State not supported with return code 7 ERROR - Exception: Unable Suspend dom 'test_domain' InvokeMethod(RequestStateChange): CIM_ERR_NOT_SUPPORTED: State not supported Bug:<00012> -------------------------------------------------------------------- ComputerSystem - 35_start_reset.py: PASS -------------------------------------------------------------------- ComputerSystem - 40_RSC_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 41_cs_to_settingdefinestate.py: PASS -------------------------------------------------------------------- ComputerSystem - 42_cs_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystemIndication - 01_created_indication.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 03_reverse_errs.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 04_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 01_forward.py: PASS -------------------------------------------------------------------- ElementCapabilities - 02_reverse.py: PASS -------------------------------------------------------------------- ElementCapabilities - 03_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 05_hostsystem_cap.py: PASS -------------------------------------------------------------------- ElementConforms - 01_forward.py: PASS -------------------------------------------------------------------- ElementConforms - 02_reverse.py: PASS -------------------------------------------------------------------- ElementConforms - 03_ectp_fwd_errs.py: PASS -------------------------------------------------------------------- ElementConforms - 04_ectp_rev_errs.py: PASS -------------------------------------------------------------------- ElementSettingData - 01_forward.py: PASS -------------------------------------------------------------------- ElementSettingData - 03_esd_assoc_with_rasd_errs.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 02_elecap_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 01_enum.py: PASS -------------------------------------------------------------------- HostSystem - 02_hostsystem_to_rasd.py: PASS -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - 'KVM_SettingsDefineCapabilities' returned 8 RASD objects instead of 4 CIM_ERR_INVALID_CLASS: Linux_ComputerSystem -------------------------------------------------------------------- HostSystem - 04_hs_to_EAPF.py: PASS -------------------------------------------------------------------- HostSystem - 05_hs_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 06_hs_to_vsms.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 01_forward.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 01_forward.py: PASS -------------------------------------------------------------------- HostedDependency - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 03_enabledstate.py: PASS -------------------------------------------------------------------- HostedDependency - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedResourcePool - 01_forward.py: PASS -------------------------------------------------------------------- HostedResourcePool - 02_reverse.py: PASS -------------------------------------------------------------------- HostedResourcePool - 03_forward_errs.py: PASS -------------------------------------------------------------------- HostedResourcePool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedService - 01_forward.py: PASS -------------------------------------------------------------------- HostedService - 02_reverse.py: PASS -------------------------------------------------------------------- HostedService - 03_forward_errs.py: PASS -------------------------------------------------------------------- HostedService - 04_reverse_errs.py: PASS -------------------------------------------------------------------- KVMRedirectionSAP - 01_enum_KVMredSAP.py: PASS -------------------------------------------------------------------- LogicalDisk - 01_disk.py: PASS -------------------------------------------------------------------- LogicalDisk - 02_nodevs.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- LogicalDisk - 03_ld_gi_errs.py: PASS -------------------------------------------------------------------- Memory - 01_memory.py: PASS -------------------------------------------------------------------- Memory - 02_defgetmem.py: PASS -------------------------------------------------------------------- Memory - 03_mem_gi_errs.py: PASS -------------------------------------------------------------------- NetworkPort - 01_netport.py: PASS -------------------------------------------------------------------- NetworkPort - 02_np_gi_errors.py: PASS -------------------------------------------------------------------- NetworkPort - 03_user_netport.py: PASS -------------------------------------------------------------------- Processor - 01_processor.py: PASS -------------------------------------------------------------------- Processor - 02_definesys_get_procs.py: PASS -------------------------------------------------------------------- Processor - 03_proc_gi_errs.py: PASS -------------------------------------------------------------------- Profile - 01_enum.py: PASS -------------------------------------------------------------------- Profile - 02_profile_to_elec.py: PASS -------------------------------------------------------------------- Profile - 03_rprofile_gi_errs.py: PASS -------------------------------------------------------------------- RASD - 01_verify_rasd_fields.py: PASS -------------------------------------------------------------------- RASD - 02_enum.py: PASS -------------------------------------------------------------------- RASD - 03_rasd_errs.py: PASS -------------------------------------------------------------------- RASD - 04_disk_rasd_size.py: PASS -------------------------------------------------------------------- RASD - 05_disk_rasd_emu_type.py: FAIL ERROR - Exception: EmulatedType Mismatch: got 0,expected 1 -------------------------------------------------------------------- RedirectionService - 01_enum_crs.py: PASS -------------------------------------------------------------------- RedirectionService - 02_enum_crscap.py: PASS -------------------------------------------------------------------- RedirectionService - 03_RedirectionSAP_errs.py: PASS -------------------------------------------------------------------- ReferencedProfile - 01_verify_refprof.py: PASS -------------------------------------------------------------------- ReferencedProfile - 02_refprofile_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 03_forward_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 05_RAPF_err.py: PASS -------------------------------------------------------------------- ResourcePool - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePool - 02_rp_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 02_rpcc_gi_errs.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 02_rcps_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 03_CreateResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: FAIL ERROR - Unexpected rc code 4 and description CIM_ERR_INVALID_PARAMETER InvokeMethod(CreateChildResourcePool): CIM_ERR_INVALID_PARAMETER -------------------------------------------------------------------- ResourcePoolConfigurationService - 05_AddResourcesToResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 06_RemoveResourcesFromResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 07_DeleteResourcePool.py: FAIL ERROR - Unexpected rc code 4 and description CIM_ERR_INVALID_PARAMETER InvokeMethod(DeleteResourcePool): CIM_ERR_INVALID_PARAMETER -------------------------------------------------------------------- ServiceAccessBySAP - 01_forward.py: PASS -------------------------------------------------------------------- ServiceAccessBySAP - 02_reverse.py: PASS -------------------------------------------------------------------- SettingsDefine - 01_forward.py: PASS -------------------------------------------------------------------- SettingsDefine - 02_reverse.py: PASS -------------------------------------------------------------------- SettingsDefine - 03_sds_fwd_errs.py: PASS -------------------------------------------------------------------- SettingsDefine - 04_sds_rev_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 01_forward.py: FAIL ERROR - KVM_SettingsDefineCapabilities returned 8 ResourcePool objects instead of 4 -------------------------------------------------------------------- SettingsDefineCapabilities - 03_forward_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 04_forward_vsmsdata.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 05_reverse_vsmcap.py: PASS -------------------------------------------------------------------- SystemDevice - 01_forward.py: FAIL 01_forward.py:29: DeprecationWarning: the sets module is deprecated from sets import Set ERROR - DeviceID mismatch ERROR - Exception Expected DeviceID: ['test_domain/0', 'test_domain/1', 'test_domain/2'] Got: [u'test_domain/0'] -------------------------------------------------------------------- SystemDevice - 02_reverse.py: PASS -------------------------------------------------------------------- SystemDevice - 03_fwderrs.py: PASS -------------------------------------------------------------------- VSSD - 01_enum.py: PASS -------------------------------------------------------------------- VSSD - 02_bootldr.py: SKIP -------------------------------------------------------------------- VSSD - 03_vssd_gi_errs.py: PASS -------------------------------------------------------------------- VSSD - 04_vssd_to_rasd.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 02_vsmcap_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 01_definesystem_name.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 02_destroysystem.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 03_definesystem_ess.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 04_definesystem_ers.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 05_destroysystem_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 06_addresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 07_addresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 08_modifyresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 09_procrasd_persist.py: XFAIL -------------------------------------------------------------------- VirtualSystemManagementService - 10_hv_version.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 11_define_memrasdunits.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 12_referenced_config.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 13_refconfig_additional_devs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 14_define_sys_disk.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 15_mod_system_settings.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 16_removeresource.py: XFAIL ERROR - 0 RASD insts for domain/mouse:ps2 CIM_ERR_NOT_FOUND: No such instance (no device domain/mouse:ps2) Bug:<00014> -------------------------------------------------------------------- VirtualSystemManagementService - 17_removeresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 02_vsmc_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationService - 01_migratable_host.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 02_vsmsd_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 01_forward.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 02_reverse.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 03_vssdc_fwd_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 04_vssdc_rev_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 02_vs_sservice_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 02_vs_sservicecap_gi_errs.py: PASS -------------------------------------------------------------------- -------------- next part -------------- An HTML attachment was scrubbed... URL: From yunguol at cn.ibm.com Mon May 11 09:10:42 2009 From: yunguol at cn.ibm.com (Guo Lian Yun) Date: Mon, 11 May 2009 17:10:42 +0800 Subject: [Libvirt-cim] [PATCH] [TEST] Add new functions to pool.py for poolverificaiton through providers In-Reply-To: <4A031A29.704@linux.vnet.ibm.com> Message-ID: libvirt-cim-bounces at redhat.com wrote on 2009-05-08 01:28:09: > > > +def dump_netxml(server, netname): > > + cmd = "virsh net-dumpxml %s | awk '/ip address/ {print}' | \ > > + cut -d ' ' -f 4 | sed 's/address=//'" % netname > > + s, addr = run_remote(server, cmd) > > + addr = addr.strip("'") > > + > > + return addr > > This only checks the IP, it doesn't check the - the netmask, ip start, > ip end, etc. > > I would try to leverage the NetXML class here. You can have a function > similar to dumpxml. You can also have functions similar to > xml_get_net_mac(), xml_get_vcpus() etc. This will allow you to verify > all the values of the XML, not just the IP. I discussed this in detail with Deepti. We'd like to define several functions for parsing net pool values in NetXML class. But on the tc, we can only get NetXML string instead of NetXML class, so we can not call the parsing functions. Do you know how to fix this? Now, the only options is to use dump function outside of NetXML, and then parse the values in the str... > > > +def verify_pool(server, pooltype, poolid, address): > > + status = FAIL > > + pool_list = EnumInstances(server, pooltype) > > + if len(pool_list) < 1: > > + logger.error("Return %i instances, expected at least one instance", > > + len(pool_list)) > > + return FAIL > > + > > + for i in range(0, len(pool_list)): > > + ret_pool = pool_list[i].InstanceID > > + ret_pool_name = ret_pool.split("/")[1] > > You can use the parse_instance_id() function for this. > > -- > Kaitlin Rupert > IBM Linux Technology Center > kaitlin at linux.vnet.ibm.com > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -------------- next part -------------- An HTML attachment was scrubbed... URL: From kaitlin at linux.vnet.ibm.com Mon May 11 17:35:40 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 11 May 2009 10:35:40 -0700 Subject: [Libvirt-cim] [PATCH 1 of 2] Add parent_device_pool() which returnsthe parent pool for a given device type In-Reply-To: <4A0497FC.9090005@linux.vnet.ibm.com> References: <4A0497FC.9090005@linux.vnet.ibm.com> Message-ID: <4A0861EC.6080306@linux.vnet.ibm.com> >> { >> CMPIInstance *inst = NULL; >> struct inst_list list; >> + bool val; >> >> - inst_list_init(&list); >> + if ((type == CIM_RES_TYPE_DISK) || (type == CIM_RES_TYPE_NET)) { >> + int i = 0; >> + CMPIrc rc; >> >> - if (type == CIM_RES_TYPE_MEM) { >> - *s = get_pool_by_name(broker, reference, >> "MemoryPool/0", &inst); >> - } else if (type == CIM_RES_TYPE_PROC) { >> - *s = get_pool_by_name(broker, reference, >> "ProcessorPool/0", &inst); >> - } else if (type == CIM_RES_TYPE_DISK) { >> + inst_list_init(&list); >> + >> *s = enum_pools(broker, reference, type, &list); >> - if ((s->rc == CMPI_RC_OK) && (list.cur > 0)) > > You probably should insert a CU_DEBUG here just to help debug The code bit above is being removed, not added. > >> - inst = list.list[0]; >> - } else if (type == CIM_RES_TYPE_NET) { >> - *s = enum_pools(broker, reference, type, &list); >> - if ((s->rc == CMPI_RC_OK) && (list.cur > 0)) >> - inst = list.list[0]; > Same here Same here - this code bit it being removed. >> - } else if (type == CIM_RES_TYPE_GRAPHICS) { >> - *s = get_pool_by_name(broker, >> - reference, >> - "GraphicsPool/0", >> - &inst); > Same here It's been removed here as well. >> - } else if (type == CIM_RES_TYPE_INPUT) { >> - *s = get_pool_by_name(broker, >> - reference, >> - "InputPool/0", >> - &inst); >> + if ((s->rc == CMPI_RC_OK) && (list.cur > 0)) { >> + + for (i = 0; i < >> list.cur; i++) { >> + rc = cu_get_bool_prop(list.list[i], >> + "Primordial", >> + &val); >> + if ((rc != CMPI_RC_OK) || (val)) >> + continue; >> + >> + inst = list.list[i]; >> + break; >> + } > Same here I'll fix this and send an updated patch. -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Mon May 11 17:59:09 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 11 May 2009 10:59:09 -0700 Subject: [Libvirt-cim] [PATCH 0 of 2] #2 Parent pool <--> child pool supportfor EAFP Message-ID: See individual patches for changes From kaitlin at linux.vnet.ibm.com Mon May 11 17:59:10 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 11 May 2009 10:59:10 -0700 Subject: [Libvirt-cim] [PATCH 1 of 2] (#2) Add parent_device_pool() which returns the parent pool for a given device type In-Reply-To: References: Message-ID: # HG changeset patch # User Kaitlin Rupert # Date 1240877148 25200 # Node ID aa703f18b57160b88feef4c02df05a926709966e # Parent 570c3507c7b2c3e55680e9a70f4889accb9a1cf7 (#2) Add parent_device_pool() which returns the parent pool for a given device type -Updates from 1 to 2: -Add debug statement is enum_pools() fails or returns no instances Signed-off-by: Kaitlin Rupert diff -r 570c3507c7b2 -r aa703f18b571 src/Virt_DevicePool.c --- a/src/Virt_DevicePool.c Tue May 05 18:08:58 2009 -0700 +++ b/src/Virt_DevicePool.c Mon Apr 27 17:05:48 2009 -0700 @@ -77,7 +77,7 @@ goto out; } - pools[count].tag = strdup("Parent"); + pools[count].tag = strdup("0"); pools[count].path = NULL; pools[count].primordial = true; count++; @@ -1234,6 +1234,45 @@ return _get_pools(broker, reference, type, NULL, list); } +CMPIInstance *parent_device_pool(const CMPIBroker *broker, + const CMPIObjectPath *reference, + uint16_t type, + CMPIStatus *s) +{ + CMPIInstance *inst = NULL; + const char *id = NULL; + + if (type == CIM_RES_TYPE_MEM) { + id = "MemoryPool/0"; + } else if (type == CIM_RES_TYPE_PROC) { + id = "ProcessorPool/0"; + } else if (type == CIM_RES_TYPE_DISK) { + id = "DiskPool/0"; + } else if (type == CIM_RES_TYPE_NET) { + id = "NetworkPool/0"; + } else if (type == CIM_RES_TYPE_GRAPHICS) { + id = "GraphicsPool/0"; + } else if (type == CIM_RES_TYPE_INPUT) { + id = "InputPool/0"; + } else { + cu_statusf(broker, s, + CMPI_RC_ERR_INVALID_PARAMETER, + "No such device type `%s'", type); + goto out; + } + + *s = get_pool_by_name(broker, reference, id, &inst); + if (inst == NULL) { + cu_statusf(broker, s, + CMPI_RC_ERR_FAILED, + "No default pool found for type %hi", type); + } + + out: + + return inst; +} + CMPIInstance *default_device_pool(const CMPIBroker *broker, const CMPIObjectPath *reference, uint16_t type, @@ -1241,44 +1280,43 @@ { CMPIInstance *inst = NULL; struct inst_list list; + bool val; - inst_list_init(&list); + if ((type == CIM_RES_TYPE_DISK) || (type == CIM_RES_TYPE_NET)) { + int i = 0; + CMPIrc rc; - if (type == CIM_RES_TYPE_MEM) { - *s = get_pool_by_name(broker, reference, "MemoryPool/0", &inst); - } else if (type == CIM_RES_TYPE_PROC) { - *s = get_pool_by_name(broker, reference, "ProcessorPool/0", &inst); - } else if (type == CIM_RES_TYPE_DISK) { + inst_list_init(&list); + *s = enum_pools(broker, reference, type, &list); - if ((s->rc == CMPI_RC_OK) && (list.cur > 0)) - inst = list.list[0]; - } else if (type == CIM_RES_TYPE_NET) { - *s = enum_pools(broker, reference, type, &list); - if ((s->rc == CMPI_RC_OK) && (list.cur > 0)) - inst = list.list[0]; - } else if (type == CIM_RES_TYPE_GRAPHICS) { - *s = get_pool_by_name(broker, - reference, - "GraphicsPool/0", - &inst); - } else if (type == CIM_RES_TYPE_INPUT) { - *s = get_pool_by_name(broker, - reference, - "InputPool/0", - &inst); + if ((s->rc != CMPI_RC_OK) || (list.cur <= 0)) { + CU_DEBUG("Unable to enum pools to get parent pool"); + goto out; + } + + for (i = 0; i < list.cur; i++) { + rc = cu_get_bool_prop(list.list[i], + "Primordial", + &val); + if ((rc != CMPI_RC_OK) || (val)) + continue; + + inst = list.list[i]; + break; + } + + inst_list_free(&list); + + if (inst == NULL) { + cu_statusf(broker, s, + CMPI_RC_ERR_FAILED, + "No default pool found for type %hi", type); + } } else { - cu_statusf(broker, s, - CMPI_RC_ERR_INVALID_PARAMETER, - "No such device type `%s'", type); + inst = parent_device_pool(broker, reference, type, s); } - inst_list_free(&list); - - if (inst == NULL) { - cu_statusf(broker, s, - CMPI_RC_ERR_FAILED, - "No default pool found for type %hi", type); - } + out: return inst; } diff -r 570c3507c7b2 -r aa703f18b571 src/Virt_DevicePool.h --- a/src/Virt_DevicePool.h Tue May 05 18:08:58 2009 -0700 +++ b/src/Virt_DevicePool.h Mon Apr 27 17:05:48 2009 -0700 @@ -106,6 +106,20 @@ CMPIInstance **_inst); /** + * Get the parent pool for a given device type + * + * @param broker A pointer to the current broker + * @param ref The object path containing namespace and prefix info + * @param type The device type in question + * @param status The returned status + * @returns Parent pool instance + */ +CMPIInstance *parent_device_pool(const CMPIBroker *broker, + const CMPIObjectPath *reference, + uint16_t type, + CMPIStatus *s); + +/** * Get the default pool for a given device type * * @param broker A pointer to the current broker From kaitlin at linux.vnet.ibm.com Mon May 11 17:59:11 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 11 May 2009 10:59:11 -0700 Subject: [Libvirt-cim] [PATCH 2 of 2] (#2) EAFP between parent and child pools In-Reply-To: References: Message-ID: <3d42e1423d027c04c104.1242064751@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1240877148 25200 # Node ID 3d42e1423d027c04c104ff0be6d99b86c46d1257 # Parent aa703f18b57160b88feef4c02df05a926709966e (#2) EAFP between parent and child pools. -Updates from 1 to 2: -Add debug statements to failure paths for easier debugging Signed-off-by: Kaitlin Rupert diff -r aa703f18b571 -r 3d42e1423d02 src/Virt_ElementAllocatedFromPool.c --- a/src/Virt_ElementAllocatedFromPool.c Mon Apr 27 17:05:48 2009 -0700 +++ b/src/Virt_ElementAllocatedFromPool.c Mon Apr 27 17:05:48 2009 -0700 @@ -89,18 +89,29 @@ free(poolid); return s; + } -static int filter_by_pool(struct inst_list *dest, - struct inst_list *src, - const uint16_t type, - const char *_poolid) +static CMPIStatus get_dev_from_pool(const CMPIObjectPath *ref, + const uint16_t type, + const char *_poolid, + struct inst_list *list) { + CMPIStatus s = {CMPI_RC_OK, NULL}; + char *poolid = NULL; + struct inst_list tmp; int i; - char *poolid = NULL; - for (i = 0; i < src->cur; i++) { - CMPIInstance *inst = src->list[i]; + inst_list_init(&tmp); + + s = enum_devices(_BROKER, ref, NULL, type, &tmp); + if (s.rc != CMPI_RC_OK) { + CU_DEBUG("Unable to enum devices in get_dev_from_pool()"); + goto out; + } + + for (i = 0; i < tmp.cur; i++) { + CMPIInstance *inst = tmp.list[i]; const char *cn = NULL; const char *dev_id = NULL; @@ -112,21 +123,80 @@ poolid = pool_member_of(_BROKER, cn, type, dev_id); if (poolid && STREQ(poolid, _poolid)) - inst_list_add(dest, inst); + inst_list_add(list, inst); } - return dest->cur; + inst_list_free(&tmp); + + out: + + return s; } -static CMPIStatus pool_to_vdev(const CMPIObjectPath *ref, - struct std_assoc_info *info, - struct inst_list *list) +static CMPIStatus get_pools(const CMPIObjectPath *ref, + const uint16_t type, + const char *poolid, + CMPIInstance *pool_inst, + struct inst_list *list) +{ + CMPIStatus s = {CMPI_RC_OK, NULL}; + CMPIInstance *pool = NULL; + bool val; + + if (cu_get_bool_prop(pool_inst, "Primordial", &val) != CMPI_RC_OK) { + cu_statusf(_BROKER, &s, + CMPI_RC_ERR_FAILED, + "Unable to determine pool type"); + goto out; + } + + /* If Primordial is true, the pool is a parent pool. Need to return + all other pools. Otherwise, just return the parent pool. */ + if (val) { + struct inst_list tmp; + int i; + + inst_list_init(&tmp); + + s = enum_pools(_BROKER, ref, type, &tmp); + if (s.rc != CMPI_RC_OK) { + CU_DEBUG("Unable to enum pools in get_pools()"); + goto out; + } + + for (i = 0; i < tmp.cur; i++) { + CMPIInstance *inst = tmp.list[i]; + const char *id = NULL; + + cu_get_str_prop(inst, "InstanceID", &id); + + if (!STREQC(id, poolid)) + inst_list_add(list, inst); + } + + inst_list_free(&tmp); + } else { + pool = parent_device_pool(_BROKER, ref, type, &s); + if (s.rc != CMPI_RC_OK) { + CU_DEBUG("Unable to get parent pool in get_pools()"); + goto out; + } + + inst_list_add(list, pool); + } + + out: + return s; +} + +static CMPIStatus pool_to_vdev_or_pool(const CMPIObjectPath *ref, + struct std_assoc_info *info, + struct inst_list *list) { const char *poolid; CMPIStatus s = {CMPI_RC_OK, NULL}; uint16_t type; CMPIInstance *inst = NULL; - struct inst_list tmp; if (!match_hypervisor_prefix(ref, info)) return s; @@ -150,15 +220,13 @@ goto out; } - inst_list_init(&tmp); + s = get_dev_from_pool(ref, type, poolid, list); + if (s.rc != CMPI_RC_OK) { + CU_DEBUG("Unable to get device from pool"); + goto out; + } - s = enum_devices(_BROKER, ref, NULL, type, &tmp); - if (s.rc != CMPI_RC_OK) - goto out; - - filter_by_pool(list, &tmp, type, poolid); - - inst_list_free(&tmp); + s = get_pools(ref, type, poolid, inst, list); out: return s; @@ -166,7 +234,7 @@ LIBVIRT_CIM_DEFAULT_MAKEREF() -static char* antecedent[] = { +static char* pool[] = { "Xen_ProcessorPool", "Xen_MemoryPool", "Xen_NetworkPool", @@ -188,7 +256,7 @@ NULL }; -static char* dependent[] = { +static char* device[] = { "Xen_Processor", "Xen_Memory", "Xen_NetworkPort", @@ -210,6 +278,46 @@ NULL }; +static char* device_or_pool[] = { + "Xen_Processor", + "Xen_Memory", + "Xen_NetworkPort", + "Xen_LogicalDisk", + "Xen_DisplayController", + "Xen_PointingDevice", + "KVM_Processor", + "KVM_Memory", + "KVM_NetworkPort", + "KVM_LogicalDisk", + "KVM_DisplayController", + "KVM_PointingDevice", + "LXC_Processor", + "LXC_Memory", + "LXC_NetworkPort", + "LXC_LogicalDisk", + "LXC_DisplayController", + "LXC_PointingDevice", + "Xen_ProcessorPool", + "Xen_MemoryPool", + "Xen_NetworkPool", + "Xen_DiskPool", + "Xen_GraphicsPool", + "Xen_InputPool", + "KVM_ProcessorPool", + "KVM_MemoryPool", + "KVM_NetworkPool", + "KVM_DiskPool", + "KVM_GraphicsPool", + "KVM_InputPool", + "LXC_ProcessorPool", + "LXC_MemoryPool", + "LXC_NetworkPool", + "LXC_DiskPool", + "LXC_GraphicsPool", + "LXC_InputPool", + NULL +}; + static char* assoc_classname[] = { "Xen_ElementAllocatedFromPool", "KVM_ElementAllocatedFromPool", @@ -218,10 +326,10 @@ }; static struct std_assoc _vdev_to_pool = { - .source_class = (char**)&dependent, + .source_class = (char**)&device, .source_prop = "Dependent", - .target_class = (char**)&antecedent, + .target_class = (char**)&pool, .target_prop = "Antecedent", .assoc_class = (char**)&assoc_classname, @@ -230,22 +338,22 @@ .make_ref = make_ref }; -static struct std_assoc _pool_to_vdev = { - .source_class = (char**)&antecedent, +static struct std_assoc _pool_to_vdev_or_pool = { + .source_class = (char**)&pool, .source_prop = "Antecedent", - .target_class = (char**)&dependent, + .target_class = (char**)&device_or_pool, .target_prop = "Dependent", .assoc_class = (char**)&assoc_classname, - .handler = pool_to_vdev, + .handler = pool_to_vdev_or_pool, .make_ref = make_ref }; static struct std_assoc *handlers[] = { &_vdev_to_pool, - &_pool_to_vdev, + &_pool_to_vdev_or_pool, NULL }; From kaitlin at linux.vnet.ibm.com Mon May 11 18:01:00 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 11 May 2009 11:01:00 -0700 Subject: [Libvirt-cim] [PATCH] Add "disk" type storage pools Message-ID: # HG changeset patch # User Kaitlin Rupert # Date 1242062614 25200 # Node ID e08c78615c3ec653c2979b4619b15d48a1c56d48 # Parent 3d42e1423d027c04c104ff0be6d99b86c46d1257 Add "disk" type storage pools. This storage pool type is a disk backed pool, instead of being a file or directory backed pool. This is tricky to test - you'll need a free disk / partition that you can mount. I've tested this using a spare LVM partition. Signed-off-by: Kaitlin Rupert diff -r 3d42e1423d02 -r e08c78615c3e src/Virt_ResourcePoolConfigurationService.c --- a/src/Virt_ResourcePoolConfigurationService.c Mon Apr 27 17:05:48 2009 -0700 +++ b/src/Virt_ResourcePoolConfigurationService.c Mon May 11 10:23:34 2009 -0700 @@ -148,8 +148,8 @@ pool->pool_info.disk.src_dir = NULL; } -static const char *disk_fs_pool(CMPIInstance *inst, - struct virt_pool *pool) +static const char *disk_fs_or_disk_pool(CMPIInstance *inst, + struct virt_pool *pool) { const char *val = NULL; @@ -195,19 +195,19 @@ case DISK_POOL_DIR: break; case DISK_POOL_FS: - msg = disk_fs_pool(inst, pool); - if (msg != NULL) - goto out; + case DISK_POOL_DISK: + msg = disk_fs_or_disk_pool(inst, pool); break; case DISK_POOL_NETFS: msg = disk_netfs_pool(inst, pool); - if (msg != NULL) - goto out; break; default: return "Storage pool type not supported"; } + if (msg != NULL) + goto out; + pool->pool_info.disk.pool_type = type; if (cu_get_str_prop(inst, "Path", &val) != CMPI_RC_OK) From kaitlin at linux.vnet.ibm.com Mon May 11 18:01:29 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 11 May 2009 11:01:29 -0700 Subject: [Libvirt-cim] [PATCH] Allow user to specify UUID to use when guest is created Message-ID: <9a16c7a7963cebddd145.1242064889@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1241805181 25200 # Node ID 9a16c7a7963cebddd145212e57ccc9defa5e0a0c # Parent e08c78615c3ec653c2979b4619b15d48a1c56d48 Allow user to specify UUID to use when guest is created Also expose UUID in VSSD instances. Signed-off-by: Kaitlin Rupert diff -r e08c78615c3e -r 9a16c7a7963c schema/ComputerSystem.mof --- a/schema/ComputerSystem.mof Mon May 11 10:23:34 2009 -0700 +++ b/schema/ComputerSystem.mof Fri May 08 10:53:01 2009 -0700 @@ -7,7 +7,6 @@ ] class Xen_ComputerSystem : CIM_ComputerSystem { - [Description("UUID assigned to this DomU.")] string UUID; diff -r e08c78615c3e -r 9a16c7a7963c schema/Virt_VSSD.mof --- a/schema/Virt_VSSD.mof Mon May 11 10:23:34 2009 -0700 +++ b/schema/Virt_VSSD.mof Fri May 08 10:53:01 2009 -0700 @@ -12,4 +12,7 @@ Values { "Turn Off", "Save state", "DMTF Reserved" }] uint16 AutomaticShutdownAction; + [Description("UUID assigned to this DomU.")] + string UUID; + }; diff -r e08c78615c3e -r 9a16c7a7963c src/Virt_VSSD.c --- a/src/Virt_VSSD.c Mon May 11 10:23:34 2009 -0700 +++ b/src/Virt_VSSD.c Fri May 08 10:53:01 2009 -0700 @@ -113,6 +113,9 @@ CMSetProperty(inst, "VirtualSystemType", (CMPIValue *)pfx, CMPI_chars); + CMSetProperty(inst, "UUID", + (CMPIValue *)dominfo->uuid, CMPI_chars); + CMSetProperty(inst, "Caption", (CMPIValue *)"Virtual System", CMPI_chars); diff -r e08c78615c3e -r 9a16c7a7963c src/Virt_VirtualSystemManagementService.c --- a/src/Virt_VirtualSystemManagementService.c Mon May 11 10:23:34 2009 -0700 +++ b/src/Virt_VirtualSystemManagementService.c Fri May 08 10:53:01 2009 -0700 @@ -337,6 +337,12 @@ free(domain->name); domain->name = strdup(val); + ret = cu_get_str_prop(inst, "UUID", &val); + if (ret == CMPI_RC_OK) { + free(domain->uuid); + domain->uuid = strdup(val); + } + ret = cu_get_u16_prop(inst, "AutomaticShutdownAction", &tmp); if (ret != CMPI_RC_OK) tmp = 0; From kaitlin at linux.vnet.ibm.com Mon May 11 18:25:52 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 11 May 2009 11:25:52 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Add new functions to pool.py forpoolverificaiton through providers In-Reply-To: References: Message-ID: <4A086DB0.6020101@linux.vnet.ibm.com> > > I would try to leverage the NetXML class here. You can have a function > > similar to dumpxml. You can also have functions similar to > > xml_get_net_mac(), xml_get_vcpus() etc. This will allow you to verify > > all the values of the XML, not just the IP. > > I discussed this in detail with Deepti. We'd like to define several > functions > for parsing net pool values in NetXML class. But on the tc, we can > only get > NetXML string instead of NetXML class, so we can not call the parsing > functions. > Do you know how to fix this? > Now, the only options is to use dump function outside of NetXML, and then > parse the values in the str... > I would make the init() function of NetXML take a flag that allows you to specify whether the network is a new one or not. If the network isn't a new one, then you can dump the XML directly (instead of building the XML, like what's currently being done in the init() function). Having a flag like this isn't really ideal, but there's not an easy way to tell NetXML whether you want to init a completely new NetXML object or if you want to just get the XML of an existing object. -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From rmaciel at linux.vnet.ibm.com Mon May 11 19:26:26 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Mon, 11 May 2009 16:26:26 -0300 Subject: [Libvirt-cim] [PATCH 0 of 2] #2 Parent pool <--> child poolsupportfor EAFP In-Reply-To: References: Message-ID: <4A087BE2.20806@linux.vnet.ibm.com> +1 Kaitlin Rupert wrote: > See individual patches for changes > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Mon May 11 23:01:40 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 11 May 2009 16:01:40 -0700 Subject: [Libvirt-cim] [PATCH 2 of 2] Add VSSS 03_create_snapshot.py In-Reply-To: References: Message-ID: <3c03fd52e4400d111720.1242082900@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242082293 25200 # Node ID 3c03fd52e4400d1117203c47a50ca1b8e75bf5f3 # Parent 61d1a7aa49471be6604552efc2dba8491ccd0ad7 Add VSSS 03_create_snapshot.py This test case attempts a guest snapshot and verifies the results. Signed-off-by: Kaitlin Rupert diff -r 61d1a7aa4947 -r 3c03fd52e440 suites/libvirt-cim/cimtest/VirtualSystemSnapshotService/03_create_snapshot.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/VirtualSystemSnapshotService/03_create_snapshot.py Mon May 11 15:51:33 2009 -0700 @@ -0,0 +1,140 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Kaitlin Rupert +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import sys +from pywbem import CIM_ERR_FAILED, cim_types +from CimTest.Globals import logger +from CimTest.ReturnCodes import PASS, FAIL +from XenKvmLib.const import do_main +from XenKvmLib.vxml import get_class +from XenKvmLib.classes import get_typed_class, inst_to_mof +from XenKvmLib.enumclass import EnumNames, EnumInstances, GetInstance +from XenKvmLib.vsss import remove_snapshot + +sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] + +SNAPSHOT = cim_types.Uint16(32769) +test_dom = "snapshot_vm" + +def get_cs_ref(virt, ip): + cs_cn = get_typed_class(virt, "ComputerSystem") + + cs_refs = EnumNames(ip, cs_cn) + if cs_refs is None or len(cs_refs) < 1: + logger.error("Exp at least one domain defined on the system") + return FAIL, None + + cs_ref = None + for ref in cs_refs: + if ref['Name'] == test_dom: + cs_ref = ref + break + + if cs_ref is None: + logger.error("Enum of %s didn't return %s", cs_cn, test_dom) + return FAIL, None + + return PASS, cs_ref + +def get_vsssc_inst(virt, ip): + vsssc_cn = get_typed_class(virt, "VirtualSystemSnapshotServiceCapabilities") + + vsssc_insts = EnumInstances(ip, vsssc_cn, ret_cim_inst=True) + if vsssc_insts is None or len(vsssc_insts) < 1: + logger.error("Exp at least one %s", vsssc_cn) + return FAIL, None + + vsssc = vsssc_insts[0] + + #Override the additional instance values. We only care about the key + #values (eventhough CreateSnapshot takes a instance) + vsssc['SynchronousMethodsSupported'] = "" + vsssc['SnapshotTypesSupported'] = "" + + vsssc = inst_to_mof(vsssc) + + return PASS, vsssc + + at do_main(sup_types) +def main(): + options = main.options + + cxml = get_class(options.virt)(test_dom) + + try: + ret = cxml.cim_define(options.ip) + if not ret: + raise Exception("Unable to define %s", test_dom) + + status = cxml.cim_start(options.ip) + if status != PASS: + raise Exception("Failed to start the defined domain: %s" % test_dom) + + status, cs_ref = get_cs_ref(options.virt, options.ip) + if status != PASS: + raise Exception("Unable to get reference for %s" % test_dom) + + status, vsssc = get_vsssc_inst(options.virt, options.ip) + if status != PASS: + raise Exception("Unable to get VSSSC instance") + + vsss_cn = get_typed_class(options.virt, "VirtualSystemSnapshotService") + vsss_refs = EnumNames(options.ip, vsss_cn) + if vsss_refs is None or len(vsss_refs) < 1: + raise Exception("Exp at least one %s" % vsss_cn) + + service = vsss_refs[0] + keys = { 'Name' : service['Name'], + 'CreationClassName' : service['CreationClassName'], + 'SystemCreationClassName' : service['SystemCreationClassName'], + 'SystemName' : service['SystemName'] + } + service = GetInstance(options.ip, vsss_cn, keys) + + output = service.CreateSnapshot(AffectedSystem=cs_ref, + SnapshotSettings=vsssc, + SnapshotType=SNAPSHOT) + + ret = output[0] + if ret != 0: + raise Exception("Snapshot of %s failed!" % test_dom) + + if output[1]['Job'] is None: + raise Exception("CreateSnapshot failed to return a CIM job inst") + + if output[1]['ResultingSnapshot'] is None: + raise Exception("CreateSnapshot failed to return ResultingSnapshot") + + except Exception, detail: + logger.error("Exception: %s", detail) + status = FAIL + + cxml.cim_destroy(options.ip) + cxml.undefine(options.ip) + + remove_snapshot(options.ip, test_dom) + + return status + +if __name__ == "__main__": + sys.exit(main()) + From kaitlin at linux.vnet.ibm.com Mon May 11 23:01:38 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 11 May 2009 16:01:38 -0700 Subject: [Libvirt-cim] [PATCH 0 of 2] [TEST] Add test to verify CreateSnapshot() Message-ID: From kaitlin at linux.vnet.ibm.com Mon May 11 23:01:39 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 11 May 2009 16:01:39 -0700 Subject: [Libvirt-cim] [PATCH 1 of 2] Add vsss.py module In-Reply-To: References: Message-ID: <61d1a7aa49471be66045.1242082899@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242082293 25200 # Node ID 61d1a7aa49471be6604552efc2dba8491ccd0ad7 # Parent 92caf252c2fa8c8a7a9b70548d12b03c52f3935c Add vsss.py module For keeping functions related to the VirtualSystemSnapshotService Signed-off-by: Kaitlin Rupert diff -r 92caf252c2fa -r 61d1a7aa4947 suites/libvirt-cim/lib/XenKvmLib/vsss.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/lib/XenKvmLib/vsss.py Mon May 11 15:51:33 2009 -0700 @@ -0,0 +1,40 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Kaitlin Rupert +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import pywbem +from VirtLib.utils import run_remote +from CimTest.Globals import logger +from CimTest.ReturnCodes import FAIL, PASS + +#Path to snapshot save location +snapshot_save_loc = '/var/lib/libvirt/' + +def remove_snapshot(ip, vm_name): + snapshot = "%s%s" % (snapshot_save_loc, vm_name) + + cmd = "rm %s.save" % snapshot + ret, out = run_remote(ip, cmd) + if ret != 0: + logger.error("Failed to remove snapshot file for %s", vm_name) + return FAIL + + return PASS From kaitlin at linux.vnet.ibm.com Mon May 11 23:05:53 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 11 May 2009 16:05:53 -0700 Subject: [Libvirt-cim] [PATCH] Return VSSD reference from CreateSnapshot Message-ID: # HG changeset patch # User Kaitlin Rupert # Date 1241805181 25200 # Node ID a8438f4413a007f0ae427072d1c9a603cc911341 # Parent 9a16c7a7963cebddd145212e57ccc9defa5e0a0c Return VSSD reference from CreateSnapshot This method has two outputs - Job and ResultingSnapshot. The provider was returning a value for ResultingSnapshot. Signed-off-by: Kaitlin Rupert diff -r 9a16c7a7963c -r a8438f4413a0 src/Makefile.am --- a/src/Makefile.am Fri May 08 10:53:01 2009 -0700 +++ b/src/Makefile.am Fri May 08 10:53:01 2009 -0700 @@ -197,9 +197,9 @@ libVirt_VSMigrationSettingData_la_SOURCES = Virt_VSMigrationSettingData.c -libVirt_VirtualSystemSnapshotService_la_DEPENDENCIES = libVirt_HostSystem.la +libVirt_VirtualSystemSnapshotService_la_DEPENDENCIES = libVirt_HostSystem.la libVirt_VSSD.la libVirt_VirtualSystemSnapshotService_la_SOURCES = Virt_VirtualSystemSnapshotService.c -libVirt_VirtualSystemSnapshotService_la_LIBADD = -lVirt_HostSystem +libVirt_VirtualSystemSnapshotService_la_LIBADD = -lVirt_HostSystem -lVirt_VSSD libVirt_VirtualSystemSnapshotServiceCapabilities_la_DEPENDENCIES = libVirt_VirtualSystemSnapshotServiceCapabilities_la_SOURCES = Virt_VirtualSystemSnapshotServiceCapabilities.c diff -r 9a16c7a7963c -r a8438f4413a0 src/Virt_VirtualSystemSnapshotService.c --- a/src/Virt_VirtualSystemSnapshotService.c Fri May 08 10:53:01 2009 -0700 +++ b/src/Virt_VirtualSystemSnapshotService.c Fri May 08 10:53:01 2009 -0700 @@ -38,6 +38,7 @@ #include "Virt_VirtualSystemSnapshotService.h" #include "Virt_HostSystem.h" +#include "Virt_VSSD.h" #define CIM_JOBSTATE_STARTING 3 #define CIM_JOBSTATE_RUNNING 4 @@ -388,6 +389,8 @@ struct snap_context *ctx; CMPIStatus s; CMPIObjectPath *job; + CMPIObjectPath *vssd; + CMPIInstance *inst; ctx = new_context(name, &s); if (ctx == NULL) { @@ -401,7 +404,21 @@ ctx->restore = (type != VIR_VSSS_SNAPSHOT_MEMT); s = create_job(context, ref, ctx, &job); + + s = get_vssd_by_name(_BROKER, ref, name, &inst); + if (s.rc != CMPI_RC_OK) { + CU_DEBUG("Unable to get guest VSSD in start_snapshot_job()"); + goto out; + } + + vssd = CMGetObjectPath(inst, &s); + if (s.rc != CMPI_RC_OK) { + CU_DEBUG("Unable to get VSSD ref from instance"); + goto out; + } + CMAddArg(argsout, "Job", (CMPIValue *)&job, CMPI_ref); + CMAddArg(argsout, "ResultingSnapshot", (CMPIValue *)&vssd, CMPI_ref); out: return s; From deeptik at linux.vnet.ibm.com Tue May 12 11:39:33 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Tue, 12 May 2009 04:39:33 -0700 Subject: [Libvirt-cim] [PATCH 1 of 3] [TEST] Adding InputRASD to vsms.py In-Reply-To: References: Message-ID: # HG changeset patch # User Deepti B. Kalakeri # Date 1242128012 25200 # Node ID bbea7924536083ce35de6ec8b680a7e873591d82 # Parent 92caf252c2fa8c8a7a9b70548d12b03c52f3935c [TEST] Adding InputRASD to vsms.py. Tested on F10 with KVM and current sources. Signed-off-by: Deepti B. Kalakeri diff -r 92caf252c2fa -r bbea79245360 suites/libvirt-cim/lib/XenKvmLib/vsms.py --- a/suites/libvirt-cim/lib/XenKvmLib/vsms.py Mon May 04 03:49:32 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/vsms.py Tue May 12 04:33:32 2009 -0700 @@ -34,6 +34,7 @@ RASD_TYPE_NET_OTHER = 11 RASD_TYPE_DISK = 17 RASD_TYPE_GRAPHICS = 24 +RASD_TYPE_INPUT = 13 VSSD_RECOVERY_NONE = 2 VSSD_RECOVERY_RESTART = 3 @@ -266,6 +267,32 @@ def get_gasd_class(virt): pass +class CIM_InputResourceAllocationSettingData(CIMClassMOF): + def __init__(self, name, res_sub_type=None, bus_type=None): + self.InstanceID = '%s' % name + self.ResourceType = RASD_TYPE_INPUT + + if res_sub_type != None: + self.ResourceSubType = res_sub_type + self.InstanceID += '/%s' % res_sub_type + + if bus_type != None: + self.BusType = bus_type + self.InstanceID += ':%s' % bus_type + +class Xen_InputResourceAllocationSettingData(CIM_InputResourceAllocationSettingData): + pass + +class KVM_InputResourceAllocationSettingData(CIM_InputResourceAllocationSettingData): + pass + +class LXC_InputResourceAllocationSettingData(CIM_InputResourceAllocationSettingData): + pass + + at eval_cls('InputResourceAllocationSettingData') +def get_iasd_class(virt): + pass + def default_vssd_rasd_str(dom_name='test_domain', disk_dev='xvda', disk_source=const.Xen_disk_path, From deeptik at linux.vnet.ibm.com Tue May 12 11:39:32 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Tue, 12 May 2009 04:39:32 -0700 Subject: [Libvirt-cim] [PATCH 0 of 3] [TEST] Add new tc to verify SAE association with CRS Message-ID: From deeptik at linux.vnet.ibm.com Tue May 12 11:39:35 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Tue, 12 May 2009 04:39:35 -0700 Subject: [Libvirt-cim] [PATCH 3 of 3] [TEST] Adding new tc to verify the SAE Association with CRS In-Reply-To: References: Message-ID: <2d9e512a8116eb52eba4.1242128375@localhost.localdomain> # HG changeset patch # User Deepti B. Kalakeri # Date 1242128319 25200 # Node ID 2d9e512a8116eb52eba4267617bb5b6eac1fc224 # Parent d9658db27f664f2058966a0ace1e158cae4a58b6 [TEST] Adding new tc to verify the SAE Association with CRS. Tested on F10 with KVM and current sources. Signed-off-by: Deepti B. Kalakeri diff -r d9658db27f66 -r 2d9e512a8116 suites/libvirt-cim/cimtest/ServiceAffectsElement/01_forward.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/ServiceAffectsElement/01_forward.py Tue May 12 04:38:39 2009 -0700 @@ -0,0 +1,190 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Deepti B. Kalakeri +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# This test case is used to verify the SAE association with the CRS providers. +# The SAE association when queried with the CRS should give the details of the +# Source from which the +# 1) Console Flow can be started represented by the ComputerSystem class +# 2) Original Poiniting Device associated with the guest +# 3) Original Graphics Device associated with the guest +# +# Ex: Command and some of the fields that are verified are given below. +# Command +# ------- +# wbemcli ain -ac KVM_ServiceAffectsElement 'http://root:passwd at localhost +# /root/virt:KVM_ConsoleRedirectionService.CreationClassName=\ +# "KVM_ConsoleRedirectionService", Name="ConsoleRedirectionService", +# SystemCreationClassName="KVM_HostSystem",SystemName="host"' +# +# Output +# ------ +# host/root/virt:KVM_ComputerSystem.CreationClassName="KVM_ComputerSystem", +# Name="demo2" +# host/root/virt:KVM_PointingDevice.CreationClassName="KVM_PointingDevice", +# DeviceID="demo2/mouse:ps2", SystemCreationClassName="KVM_ComputerSystem", +# SystemName="demo2" +# host/root/virt:KVM_DisplayController.CreationClassName=\ +# "KVM_DisplayController",DeviceID="demo2/graphics", +# SystemCreationClassName="KVM_ComputerSystem",SystemName="demo2" +# Date : 12-05-2009 + + +import sys +from sets import Set +from XenKvmLib import assoc +from XenKvmLib import vxml +from CimTest.Globals import logger +from XenKvmLib.classes import get_typed_class +from XenKvmLib.enumclass import EnumInstances +from XenKvmLib.common_util import parse_instance_id +from XenKvmLib.const import do_main, get_provider_version +from CimTest.ReturnCodes import FAIL, PASS + +sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] +sae_assoc_with_input_graphics_rev = 795 + +test_dom = "SAE_dom" + +def get_dom_records(an_cn, assoc_ei_info, assoc_ei_insts): + + for assoc_ei_item in assoc_ei_info: + rec = None + CCN = assoc_ei_item['CreationClassName'] + if 'DisplayController' in CCN or 'PointingDevice' in CCN : + guest, dev, status = parse_instance_id(assoc_ei_item['DeviceID']) + if status != PASS: + logger.error("Unable to parse DeviceID") + return assoc_ei_insts, status + + if guest == test_dom: + rec = assoc_ei_item + elif 'ComputerSystem' in CCN: + if assoc_ei_item['Name'] == test_dom: + rec = assoc_ei_item + else: + logger.error("Unexpected CreationClassName %s returned by " \ + "%s association", CCN, an_cn) + return assoc_ei_insts, FAIL + + if not CCN in assoc_ei_insts.keys() and rec != None: + assoc_ei_insts[CCN]=rec + elif rec != None and (CCN in assoc_ei_insts.keys()): + logger.error("Got more than one record for '%s'", CCN) + return assoc_ei_insts, FAIL + + return assoc_ei_insts, PASS + + +def init_list_for_compare(server, virt): + c_list = [ 'ComputerSystem'] + curr_cim_rev, changeset = get_provider_version(virt, server) + if curr_cim_rev >= sae_assoc_with_input_graphics_rev: + c_list.append('PointingDevice' ) + c_list.append('DisplayController') + + init_list = {} + for name in c_list: + c_name = get_typed_class(virt, name) + ei_details = EnumInstances(server, c_name, ret_cim_inst=True) + init_list, status = get_dom_records(c_name, ei_details, init_list) + if status != PASS: + return init_list, FAIL + + return init_list, PASS + + +def verify_assoc(server, virt, an, assoc_info): + assoc_insts = {} + try: + assoc_insts, status = get_dom_records(an, assoc_info, assoc_insts) + if status != PASS or len(assoc_insts) != 3: + raise Exception("Failed to get insts for domain %s" % test_dom) + + in_list, status = init_list_for_compare(server, virt) + if status != PASS or len(in_list) != 3: + raise Exception("Failed to get init_list") + + in_list_keys = Set(in_list.keys()) + assoc_list_keys = Set(assoc_insts.keys()) + if len(in_list_keys & assoc_list_keys) != 3: + raise Exception("Mistmatching Class Names, expected %s, got %s" \ + % (in_list_keys, assoc_list_keys)) + + for cname, prop in in_list.iteritems(): + logger.info("Verifying Values for '%s'", cname) + exp_vals = in_list[cname].items() + res_vals = assoc_insts[cname].items() + for i in range(0, len(prop)): + if exp_vals[i][1] != res_vals[i][1]: + logger.error("'%s' val mismatch for '%s': " \ + "got '%s', expected '%s'", exp_vals[i][0], + cname, res_vals[i][1], exp_vals[i][1]) + return FAIL + + except Exception, details: + logger.error("Exception in fn verify_assoc()") + logger.error("Exception details: %s", details) + return FAIL + return PASS + + at do_main(sup_types) +def main(): + options = main.options + server = options.ip + virt = options.virt + status = FAIL + + virt_xml = vxml.get_class(virt) + cxml = virt_xml(test_dom) + ret = cxml.cim_define(server) + if not ret: + logger.error("Failed to define the dom: %s", test_dom) + return FAIL + + an = get_typed_class(virt, "ServiceAffectsElement") + + try: + cname = 'ConsoleRedirectionService' + classname = get_typed_class(virt, cname) + crs = EnumInstances(server, classname) + + if len(crs) != 1: + raise Exception("'%s' returned %i records, expected 1" \ + % (classname, len(crs))) + + crs_val = crs[0] + crs_cname = crs_val.CreationClassName + crs_sccn = crs_val.SystemCreationClassName + assoc_info = assoc.Associators(server, an, crs_cname, + CreationClassName=crs_cname, + Name=crs_val.Name, + SystemCreationClassName=crs_sccn, + SystemName=crs_val.SystemName) + status = verify_assoc(server, virt, an, assoc_info) + except Exception, detail : + logger.error("Exception : %s", detail) + status = FAIL + + cxml.undefine(server) + return status + +if __name__ == "__main__": + sys.exit(main()) From deeptik at linux.vnet.ibm.com Tue May 12 11:39:34 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Tue, 12 May 2009 04:39:34 -0700 Subject: [Libvirt-cim] [PATCH 2 of 3] [TEST] Adding InputRASD to cim_define of vxml.py In-Reply-To: References: Message-ID: # HG changeset patch # User Deepti B. Kalakeri # Date 1242128081 25200 # Node ID d9658db27f664f2058966a0ace1e158cae4a58b6 # Parent bbea7924536083ce35de6ec8b680a7e873591d82 [TEST] Adding InputRASD to cim_define of vxml.py. Tested on F10 with KVM and current sources. Signed-off-by: Deepti B. Kalakeri diff -r bbea79245360 -r d9658db27f66 suites/libvirt-cim/lib/XenKvmLib/vxml.py --- a/suites/libvirt-cim/lib/XenKvmLib/vxml.py Tue May 12 04:33:32 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/vxml.py Tue May 12 04:34:41 2009 -0700 @@ -52,6 +52,7 @@ from XenKvmLib.const import get_provider_version vsms_graphics_sup = 763 +vsms_inputdev_sup = 771 class XMLClass: xml_string = "" @@ -502,7 +503,7 @@ def __init__(self, virt, dom_name, disk_dev, disk_source, net_type, net_name, net_mac, vcpus, mem, mem_allocunits, emu_type, grstype, ip, - port_num, kmap): + port_num, kmap, irstype, btype): self.virt = virt self.domain_name = dom_name self.err_rc = None @@ -526,7 +527,9 @@ self.gasd = vsms.get_gasd_class(virt)(name=dom_name, res_sub_type=grstype, ip=ip, lport=port_num, keymap=kmap) - + self.iasd = vsms.get_iasd_class(virt)(name=dom_name, + res_sub_type=irstype, + bus_type=btype) def cim_define(self, ip, ref_conf=None): service = vsms.get_vsms_class(self.virt)(ip) sys_settings = str(self.vssd) @@ -549,6 +552,10 @@ if self.gasd is not None: res_settings.append(str(self.gasd)) + if curr_cim_rev >= vsms_inputdev_sup: + if self.iasd is not None: + res_settings.append(str(self.iasd)) + if ref_conf is None: ref_conf = ' ' @@ -733,7 +740,8 @@ ntype=const.default_net_type, net_name=const.default_network_name, emu_type=None, grstype="vnc", address="127.0.0.1", - port_num='-1', keymap="en-us"): + port_num='-1', keymap="en-us", irstype="mouse", + btype="xen"): if not (os.path.exists(const.Xen_kernel_path) \ and os.path.exists(const.Xen_init_path)): logger.error('ERROR: Either the kernel image ' @@ -745,7 +753,8 @@ VirtCIM.__init__(self, 'Xen', test_dom, disk, disk_file_path, ntype, net_name, mac, vcpus, mem, mem_allocunits, - emu_type, grstype, address, port_num, keymap) + emu_type, grstype, address, port_num, keymap, irstype, + btype) def _os(self, os_kernel, os_initrd): os = self.get_node('/domain/os') @@ -796,7 +805,8 @@ ntype=const.default_net_type, net_name=const.default_network_name, emu_type=None, grstype="vnc", address="127.0.0.1", - port_num='-1', keymap="en-us"): + port_num='-1', keymap="en-us", irstype="mouse", + btype="ps2"): if not os.path.exists(disk_file_path): logger.error('Error: Disk image does not exist') sys.exit(1) @@ -804,7 +814,7 @@ VirtCIM.__init__(self, 'KVM', test_dom, disk, disk_file_path, ntype, net_name, mac, vcpus, mem, mem_allocunits, emu_type, grstype, address, - port_num, keymap) + port_num, keymap, irstype, btype) self._os() self._devices(const.KVM_default_emulator, ntype, disk_file_path, disk, mac, net_name) @@ -850,14 +860,16 @@ ntype=const.default_net_type, net_name=const.default_network_name, emu_type=None, grstype="vnc", - address="127.0.0.1", port_num='-1', keymap="en-us"): + address="127.0.0.1", port_num='-1', keymap="en-us", + irstype="mouse", btype="usb"): if not os.path.exists(disk_file_path): logger.error('Error: Disk image does not exist') sys.exit(1) VirtXML.__init__(self, 'xenfv', test_dom, set_uuid(), mem, vcpus) VirtCIM.__init__(self, 'XenFV', test_dom, disk, disk_file_path, ntype, net_name, mac, vcpus, mem, mem_allocunits, - emu_type, grstype, address, port_num, keymap) + emu_type, grstype, address, port_num, keymap, + irstype, btype) self._features() self._os(const.XenFV_default_loader) self._devices(const.XenFV_default_emulator, @@ -905,12 +917,13 @@ ntype=const.default_net_type, net_name=const.default_network_name, tty=const.LXC_default_tty, grstype="vnc", - address="127.0.0.1", port_num='-1', keymap="en-us"): + address="127.0.0.1", port_num='-1', keymap="en-us", + irstype="mouse", btype="usb"): VirtXML.__init__(self, 'lxc', test_dom, set_uuid(), mem, vcpus) VirtCIM.__init__(self, 'LXC', test_dom, const.LXC_default_mp, const.LXC_default_source, ntype, net_name, mac, vcpus, mem, const.default_mallocunits, None, grstype, - address, port_num, keymap) + address, port_num, keymap, irstype, btype) self._os(const.LXC_init_path) self._devices(mac, ntype, net_name, const.LXC_default_tty) self.create_lxc_file(CIM_IP, const.LXC_init_path) From yunguol at cn.ibm.com Wed May 13 09:42:24 2009 From: yunguol at cn.ibm.com (Guo Lian Yun) Date: Wed, 13 May 2009 17:42:24 +0800 Subject: [Libvirt-cim] Test Run Summary (May 13 2009): KVM on Fedora release 10 (Cambridge) with sfcb Message-ID: ================================================= Test Run Summary (May 13 2009): KVM on Fedora release 10 (Cambridge) with sfcb ================================================= Distro: Fedora release 10 (Cambridge) Kernel: 2.6.27.15-170.2.24.fc10.x86_64 libvirt: 0.4.5 Hypervisor: QEMU 0.9.1 CIMOM: sfcb sfcbd 1.3.4preview Libvirt-cim revision: 871 Libvirt-cim changeset: 3d42e1423d02 Cimtest revision: 678 Cimtest changeset: 2d9e512a8116 ================================================= FAIL : 5 XFAIL : 4 SKIP : 9 PASS : 134 ----------------- Total : 152 ================================================= FAIL Test Summary: ComputerSystemIndication - 01_created_indication.py: FAIL HostSystem - 03_hs_to_settdefcap.py: FAIL ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: FAIL ResourcePoolConfigurationService - 07_DeleteResourcePool.py: FAIL SettingsDefineCapabilities - 01_forward.py: FAIL ================================================= XFAIL Test Summary: ComputerSystem - 32_start_reboot.py: XFAIL ComputerSystem - 33_suspend_reboot.py: XFAIL VirtualSystemManagementService - 09_procrasd_persist.py: XFAIL VirtualSystemManagementService - 16_removeresource.py: XFAIL ================================================= SKIP Test Summary: ComputerSystem - 02_nosystems.py: SKIP LogicalDisk - 02_nodevs.py: SKIP VSSD - 02_bootldr.py: SKIP VirtualSystemMigrationService - 01_migratable_host.py: SKIP VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP ================================================= Full report: -------------------------------------------------------------------- AllocationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- AllocationCapabilities - 02_alloccap_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 01_enum.py: PASS -------------------------------------------------------------------- ComputerSystem - 02_nosystems.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- ComputerSystem - 03_defineVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 04_defineStartVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 05_activate_defined_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 06_paused_active_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 22_define_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 23_pause_pause.py: PASS -------------------------------------------------------------------- ComputerSystem - 27_define_pause_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 32_start_reboot.py: XFAIL ERROR - Got CIM error Unable to reboot domain: this function is not supported by the hypervisor: virDomainReboot with return code 1 ERROR - Exception: Unable reboot dom 'cs_test_domain' InvokeMethod(RequestStateChange): Unable to reboot domain: this function is not supported by the hypervisor: virDomainReboot Bug:<00005> -------------------------------------------------------------------- ComputerSystem - 33_suspend_reboot.py: XFAIL ERROR - Got CIM error State not supported with return code 7 ERROR - Exception: Unable Suspend dom 'test_domain' InvokeMethod(RequestStateChange): State not supported Bug:<00012> -------------------------------------------------------------------- ComputerSystem - 35_start_reset.py: PASS -------------------------------------------------------------------- ComputerSystem - 40_RSC_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 41_cs_to_settingdefinestate.py: PASS -------------------------------------------------------------------- ComputerSystem - 42_cs_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystemIndication - 01_created_indication.py: FAIL ERROR - Exception : Request Failed: 200 Traceback (most recent call last): File "./lib/XenKvmLib/const.py", line 139, in do_try File "01_created_indication.py", line 146, in main sub_list, ind_names, dict = sub_ind(ip, virt) File "01_created_indication.py", line 60, in sub_ind sub.subscribe(dict['default_url'], dict['default_auth']) File "/data/users/daisy/cimtest/suites/libvirt-cim/lib/XenKvmLib/indication_tester.py", line 345, in subscribe "CreateInstance", auth_hdr) File "/data/users/daisy/cimtest/suites/libvirt-cim/lib/XenKvmLib/indication_tester.py", line 330, in __do_cimpost (resp.status, resp.reason)) Exception: Request Failed: 200 ERROR - None -------------------------------------------------------------------- ElementAllocatedFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 03_reverse_errs.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 04_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 01_forward.py: PASS -------------------------------------------------------------------- ElementCapabilities - 02_reverse.py: PASS -------------------------------------------------------------------- ElementCapabilities - 03_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 05_hostsystem_cap.py: PASS -------------------------------------------------------------------- ElementConforms - 01_forward.py: PASS -------------------------------------------------------------------- ElementConforms - 02_reverse.py: PASS -------------------------------------------------------------------- ElementConforms - 03_ectp_fwd_errs.py: PASS -------------------------------------------------------------------- ElementConforms - 04_ectp_rev_errs.py: PASS -------------------------------------------------------------------- ElementSettingData - 01_forward.py: PASS -------------------------------------------------------------------- ElementSettingData - 03_esd_assoc_with_rasd_errs.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 02_elecap_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 01_enum.py: PASS -------------------------------------------------------------------- HostSystem - 02_hostsystem_to_rasd.py: PASS -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - 'KVM_SettingsDefineCapabilities' returned 8 RASD objects instead of 4 Class not found -------------------------------------------------------------------- HostSystem - 04_hs_to_EAPF.py: PASS -------------------------------------------------------------------- HostSystem - 05_hs_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 06_hs_to_vsms.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 01_forward.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 01_forward.py: PASS -------------------------------------------------------------------- HostedDependency - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 03_enabledstate.py: PASS -------------------------------------------------------------------- HostedDependency - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedResourcePool - 01_forward.py: PASS -------------------------------------------------------------------- HostedResourcePool - 02_reverse.py: PASS -------------------------------------------------------------------- HostedResourcePool - 03_forward_errs.py: PASS -------------------------------------------------------------------- HostedResourcePool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedService - 01_forward.py: PASS -------------------------------------------------------------------- HostedService - 02_reverse.py: PASS -------------------------------------------------------------------- HostedService - 03_forward_errs.py: PASS -------------------------------------------------------------------- HostedService - 04_reverse_errs.py: PASS -------------------------------------------------------------------- KVMRedirectionSAP - 01_enum_KVMredSAP.py: PASS -------------------------------------------------------------------- LogicalDisk - 01_disk.py: PASS -------------------------------------------------------------------- LogicalDisk - 02_nodevs.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- LogicalDisk - 03_ld_gi_errs.py: PASS -------------------------------------------------------------------- Memory - 01_memory.py: PASS -------------------------------------------------------------------- Memory - 02_defgetmem.py: PASS -------------------------------------------------------------------- Memory - 03_mem_gi_errs.py: PASS -------------------------------------------------------------------- NetworkPort - 01_netport.py: PASS -------------------------------------------------------------------- NetworkPort - 02_np_gi_errors.py: PASS -------------------------------------------------------------------- NetworkPort - 03_user_netport.py: PASS -------------------------------------------------------------------- Processor - 01_processor.py: PASS -------------------------------------------------------------------- Processor - 02_definesys_get_procs.py: PASS -------------------------------------------------------------------- Processor - 03_proc_gi_errs.py: PASS -------------------------------------------------------------------- Profile - 01_enum.py: PASS -------------------------------------------------------------------- Profile - 02_profile_to_elec.py: PASS -------------------------------------------------------------------- Profile - 03_rprofile_gi_errs.py: PASS -------------------------------------------------------------------- RASD - 01_verify_rasd_fields.py: PASS -------------------------------------------------------------------- RASD - 02_enum.py: PASS -------------------------------------------------------------------- RASD - 03_rasd_errs.py: PASS -------------------------------------------------------------------- RASD - 04_disk_rasd_size.py: PASS -------------------------------------------------------------------- RASD - 05_disk_rasd_emu_type.py: PASS -------------------------------------------------------------------- RedirectionService - 01_enum_crs.py: PASS -------------------------------------------------------------------- RedirectionService - 02_enum_crscap.py: PASS -------------------------------------------------------------------- RedirectionService - 03_RedirectionSAP_errs.py: PASS -------------------------------------------------------------------- ReferencedProfile - 01_verify_refprof.py: PASS -------------------------------------------------------------------- ReferencedProfile - 02_refprofile_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 03_forward_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 05_RAPF_err.py: PASS -------------------------------------------------------------------- ResourcePool - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePool - 02_rp_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 02_rpcc_gi_errs.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 02_rcps_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 03_CreateResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: FAIL ERROR - Unexpected rc code 4 and description One or more parameter values passed to the method were invalid InvokeMethod(CreateChildResourcePool): One or more parameter values passed to the method were invalid -------------------------------------------------------------------- ResourcePoolConfigurationService - 05_AddResourcesToResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 06_RemoveResourcesFromResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 07_DeleteResourcePool.py: FAIL ERROR - Unexpected rc code 4 and description One or more parameter values passed to the method were invalid InvokeMethod(DeleteResourcePool): One or more parameter values passed to the method were invalid -------------------------------------------------------------------- ServiceAccessBySAP - 01_forward.py: PASS -------------------------------------------------------------------- ServiceAccessBySAP - 02_reverse.py: PASS -------------------------------------------------------------------- ServiceAffectsElement - 01_forward.py: PASS -------------------------------------------------------------------- SettingsDefine - 01_forward.py: PASS -------------------------------------------------------------------- SettingsDefine - 02_reverse.py: PASS -------------------------------------------------------------------- SettingsDefine - 03_sds_fwd_errs.py: PASS -------------------------------------------------------------------- SettingsDefine - 04_sds_rev_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 01_forward.py: FAIL ERROR - KVM_SettingsDefineCapabilities returned 8 ResourcePool objects instead of 4 -------------------------------------------------------------------- SettingsDefineCapabilities - 03_forward_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 04_forward_vsmsdata.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 05_reverse_vsmcap.py: PASS -------------------------------------------------------------------- SystemDevice - 01_forward.py: PASS -------------------------------------------------------------------- SystemDevice - 02_reverse.py: PASS -------------------------------------------------------------------- SystemDevice - 03_fwderrs.py: PASS -------------------------------------------------------------------- VSSD - 01_enum.py: PASS -------------------------------------------------------------------- VSSD - 02_bootldr.py: SKIP -------------------------------------------------------------------- VSSD - 03_vssd_gi_errs.py: PASS -------------------------------------------------------------------- VSSD - 04_vssd_to_rasd.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 02_vsmcap_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 01_definesystem_name.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 02_destroysystem.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 03_definesystem_ess.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 04_definesystem_ers.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 05_destroysystem_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 06_addresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 07_addresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 08_modifyresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 09_procrasd_persist.py: XFAIL -------------------------------------------------------------------- VirtualSystemManagementService - 10_hv_version.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 11_define_memrasdunits.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 12_referenced_config.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 13_refconfig_additional_devs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 14_define_sys_disk.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 15_mod_system_settings.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 16_removeresource.py: XFAIL ERROR - 0 RASD insts for domain/mouse:ps2 No such instance (no device domain/mouse:ps2) Bug:<00014> -------------------------------------------------------------------- VirtualSystemManagementService - 17_removeresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 02_vsmc_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationService - 01_migratable_host.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 02_vsmsd_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 01_forward.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 02_reverse.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 03_vssdc_fwd_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 04_vssdc_rev_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 02_vs_sservice_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 02_vs_sservicecap_gi_errs.py: PASS -------------------------------------------------------------------- -------------- next part -------------- An HTML attachment was scrubbed... URL: From deeptik at linux.vnet.ibm.com Wed May 13 14:30:11 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Wed, 13 May 2009 07:30:11 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Add new SAE/02_reverse.py tc Message-ID: <9391439d65e7ec6b88f3.1242225011@elm3a148.beaverton.ibm.com> # HG changeset patch # User Deepti B. Kalakeri # Date 1242224913 25200 # Node ID 9391439d65e7ec6b88f34923d97f969c6114a237 # Parent 2d9e512a8116eb52eba4267617bb5b6eac1fc224 [TEST] Add new SAE/02_reverse.py tc Tested on F10 with KVM and latest sources. Signed-off-by: Deepti B. Kalakeri diff -r 2d9e512a8116 -r 9391439d65e7 suites/libvirt-cim/cimtest/ServiceAffectsElement/02_reverse.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/ServiceAffectsElement/02_reverse.py Wed May 13 07:28:33 2009 -0700 @@ -0,0 +1,168 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Deepti B. Kalakeri +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# This test case is used to verify the SAE association with the ComputerSystem, +# Poiniting Device, DisplayController providers. +# The SAE association when queried with the ComputerSystem/PoinitingDevice/ +# DisplayController should give the details of the CRS information +# to which they are part of. +# +# Ex: Command and some of the fields that are verified are given below. +# Command: + +# wbemcli ain -ac KVM_ServiceAffectsElement 'http://root:passwd +# @localhost/root/virt:KVM_ComputerSystem.CreationClassName=\ +# "KVM_ComputerSystem",Name="demo3"' +# +# Output: +# ------- +# host/root/virt:KVM_ConsoleRedirectionService.CreationClassName=\ +# "KVM_ConsoleRedirectionService",Name="ConsoleRedirectionService",\ +# SystemCreationClassName="KVM_HostSystem",SystemName="host" +# +# Similarly the above o/p is expected when SAE is queired with +# PoinitingDevice and DisplayController +# Date : 12-05-2009 + + +import sys +from sets import Set +from XenKvmLib.assoc import Associators, compare_all_prop +from XenKvmLib import vxml +from CimTest.Globals import logger +from XenKvmLib.classes import get_typed_class +from XenKvmLib.enumclass import EnumInstances, EnumNames +from XenKvmLib.common_util import parse_instance_id +from XenKvmLib.const import do_main, get_provider_version +from CimTest.ReturnCodes import FAIL, PASS +from pywbem.cim_obj import CIMInstance + +sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] +pd_dev_rev = 746 +dc_dev_rev = 725 + +test_dom = "SAE_dom" + +def get_dom_records(cn, ei_info): + ei_insts = {} + for ei_item in ei_info: + rec = None + CCN = ei_item['CreationClassName'] + if 'DisplayController' in CCN or 'PointingDevice' in CCN : + guest, dev, status = parse_instance_id(ei_item['DeviceID']) + if status != PASS: + logger.error("Unable to parse DeviceID") + return ei_insts, status + + if guest == test_dom: + rec = ei_item + elif 'ComputerSystem' in CCN: + if ei_item['Name'] == test_dom: + rec = ei_item + else: + logger.error("Unexpected CreationClassName %s returned by " \ + "%s association", CCN, cn) + return ei_insts, FAIL + + if not CCN in ei_insts.keys() and rec != None: + ei_insts[CCN]=rec + elif rec != None and (CCN in ei_insts.keys()): + logger.error("Got more than one record for '%s'", CCN) + return ei_insts, FAIL + + return ei_insts, PASS + + +def init_list_for_assoc(server, virt): + c_list = [ 'ComputerSystem'] + curr_cim_rev, changeset = get_provider_version(virt, server) + if curr_cim_rev >= pd_dev_rev: + c_list.append('PointingDevice' ) + if curr_cim_rev >= dc_dev_rev: + c_list.append('DisplayController') + + key_dict = {} + for name in c_list: + init_list = {} + c_name = get_typed_class(virt, name) + ei_details = EnumNames(server, c_name) + init_list, status = get_dom_records(c_name, ei_details) + if status != PASS: + return init_list, FAIL + key_dict[c_name] = dict(init_list[c_name].keybindings) + + return key_dict, PASS + + + at do_main(sup_types) +def main(): + options = main.options + server = options.ip + virt = options.virt + status = FAIL + + virt_xml = vxml.get_class(virt) + cxml = virt_xml(test_dom) + ret = cxml.cim_define(server) + if not ret: + logger.error("Failed to define the dom: %s", test_dom) + return FAIL + + an = get_typed_class(virt, "ServiceAffectsElement") + + try: + in_list, status = init_list_for_assoc(server, virt) + if status != PASS: + raise Exception("Failed to get init_list") + + c_name = get_typed_class(virt, 'ConsoleredirectionService') + crs = EnumInstances(server, c_name) + if len(crs) != 1: + raise Exception("'%s' returned %i records, expected 1" \ + % (c_name, len(crs))) + + for cn, value in in_list.iteritems(): + logger.info("Verifying '%s' association with '%s'", an, cn) + if 'ComputerSystem' in cn: + assoc_info = Associators(server, an, cn, + CreationClassName=cn, + Name=value['Name']) + else: + assoc_info = Associators(server, an, cn, + CreationClassName=cn, + SystemName=value['SystemName'], + DeviceID=value['DeviceID'], + SystemCreationClassName=\ + value['SystemCreationClassName']) + if len(assoc_info) != 1: + raise Exception("Got '%s' records for '%s' association with " \ + "'%s',expected 1" %(len(assoc_info), an, cn)) + status = compare_all_prop(assoc_info[0], crs[0]) + + except Exception, detail : + logger.error("Exception : %s", detail) + status = FAIL + + cxml.undefine(server) + return status + +if __name__ == "__main__": + sys.exit(main()) From deeptik at linux.vnet.ibm.com Wed May 13 14:28:44 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Wed, 13 May 2009 07:28:44 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Add new SAE/02_reverse.py tc Message-ID: <9391439d65e7ec6b88f3.1242224924@elm3a148.beaverton.ibm.com> # HG changeset patch # User Deepti B. Kalakeri # Date 1242224913 25200 # Node ID 9391439d65e7ec6b88f34923d97f969c6114a237 # Parent 2d9e512a8116eb52eba4267617bb5b6eac1fc224 [TEST] Add new SAE/02_reverse.py tc Tested on F10 with KVM and latest sources. Signed-off-by: Deepti B. Kalakeri diff -r 2d9e512a8116 -r 9391439d65e7 suites/libvirt-cim/cimtest/ServiceAffectsElement/02_reverse.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/ServiceAffectsElement/02_reverse.py Wed May 13 07:28:33 2009 -0700 @@ -0,0 +1,168 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Deepti B. Kalakeri +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# This test case is used to verify the SAE association with the ComputerSystem, +# Poiniting Device, DisplayController providers. +# The SAE association when queried with the ComputerSystem/PoinitingDevice/ +# DisplayController should give the details of the CRS information +# to which they are part of. +# +# Ex: Command and some of the fields that are verified are given below. +# Command: + +# wbemcli ain -ac KVM_ServiceAffectsElement 'http://root:passwd +# @localhost/root/virt:KVM_ComputerSystem.CreationClassName=\ +# "KVM_ComputerSystem",Name="demo3"' +# +# Output: +# ------- +# host/root/virt:KVM_ConsoleRedirectionService.CreationClassName=\ +# "KVM_ConsoleRedirectionService",Name="ConsoleRedirectionService",\ +# SystemCreationClassName="KVM_HostSystem",SystemName="host" +# +# Similarly the above o/p is expected when SAE is queired with +# PoinitingDevice and DisplayController +# Date : 12-05-2009 + + +import sys +from sets import Set +from XenKvmLib.assoc import Associators, compare_all_prop +from XenKvmLib import vxml +from CimTest.Globals import logger +from XenKvmLib.classes import get_typed_class +from XenKvmLib.enumclass import EnumInstances, EnumNames +from XenKvmLib.common_util import parse_instance_id +from XenKvmLib.const import do_main, get_provider_version +from CimTest.ReturnCodes import FAIL, PASS +from pywbem.cim_obj import CIMInstance + +sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] +pd_dev_rev = 746 +dc_dev_rev = 725 + +test_dom = "SAE_dom" + +def get_dom_records(cn, ei_info): + ei_insts = {} + for ei_item in ei_info: + rec = None + CCN = ei_item['CreationClassName'] + if 'DisplayController' in CCN or 'PointingDevice' in CCN : + guest, dev, status = parse_instance_id(ei_item['DeviceID']) + if status != PASS: + logger.error("Unable to parse DeviceID") + return ei_insts, status + + if guest == test_dom: + rec = ei_item + elif 'ComputerSystem' in CCN: + if ei_item['Name'] == test_dom: + rec = ei_item + else: + logger.error("Unexpected CreationClassName %s returned by " \ + "%s association", CCN, cn) + return ei_insts, FAIL + + if not CCN in ei_insts.keys() and rec != None: + ei_insts[CCN]=rec + elif rec != None and (CCN in ei_insts.keys()): + logger.error("Got more than one record for '%s'", CCN) + return ei_insts, FAIL + + return ei_insts, PASS + + +def init_list_for_assoc(server, virt): + c_list = [ 'ComputerSystem'] + curr_cim_rev, changeset = get_provider_version(virt, server) + if curr_cim_rev >= pd_dev_rev: + c_list.append('PointingDevice' ) + if curr_cim_rev >= dc_dev_rev: + c_list.append('DisplayController') + + key_dict = {} + for name in c_list: + init_list = {} + c_name = get_typed_class(virt, name) + ei_details = EnumNames(server, c_name) + init_list, status = get_dom_records(c_name, ei_details) + if status != PASS: + return init_list, FAIL + key_dict[c_name] = dict(init_list[c_name].keybindings) + + return key_dict, PASS + + + at do_main(sup_types) +def main(): + options = main.options + server = options.ip + virt = options.virt + status = FAIL + + virt_xml = vxml.get_class(virt) + cxml = virt_xml(test_dom) + ret = cxml.cim_define(server) + if not ret: + logger.error("Failed to define the dom: %s", test_dom) + return FAIL + + an = get_typed_class(virt, "ServiceAffectsElement") + + try: + in_list, status = init_list_for_assoc(server, virt) + if status != PASS: + raise Exception("Failed to get init_list") + + c_name = get_typed_class(virt, 'ConsoleredirectionService') + crs = EnumInstances(server, c_name) + if len(crs) != 1: + raise Exception("'%s' returned %i records, expected 1" \ + % (c_name, len(crs))) + + for cn, value in in_list.iteritems(): + logger.info("Verifying '%s' association with '%s'", an, cn) + if 'ComputerSystem' in cn: + assoc_info = Associators(server, an, cn, + CreationClassName=cn, + Name=value['Name']) + else: + assoc_info = Associators(server, an, cn, + CreationClassName=cn, + SystemName=value['SystemName'], + DeviceID=value['DeviceID'], + SystemCreationClassName=\ + value['SystemCreationClassName']) + if len(assoc_info) != 1: + raise Exception("Got '%s' records for '%s' association with " \ + "'%s',expected 1" %(len(assoc_info), an, cn)) + status = compare_all_prop(assoc_info[0], crs[0]) + + except Exception, detail : + logger.error("Exception : %s", detail) + status = FAIL + + cxml.undefine(server) + return status + +if __name__ == "__main__": + sys.exit(main()) From rmaciel at linux.vnet.ibm.com Wed May 13 12:37:10 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Wed, 13 May 2009 09:37:10 -0300 Subject: [Libvirt-cim] [PATCH 1 of 2] This fix the generation of the tag on fully-virtualizable Xen guests In-Reply-To: References: Message-ID: # HG changeset patch # User Richard Maciel # Date 1242071235 10800 # Node ID c0bd6c9a2c0084398784bb1ae36649bd3400e36c # Parent 5608b9455cd32fccbc324cd540c509d7230a113f This fix the generation of the tag on fully-virtualizable Xen guests. Right now it is generated with the boot device as a the value of the node (e.g. hd) However, the boot device must be a property of the node (e.g. ) Signed-off-by: Richard Maciel diff -r 5608b9455cd3 -r c0bd6c9a2c00 libxkutil/xmlgen.c --- a/libxkutil/xmlgen.c Mon Apr 27 17:05:48 2009 -0700 +++ b/libxkutil/xmlgen.c Mon May 11 16:47:15 2009 -0300 @@ -457,10 +457,12 @@ if (tmp == NULL) return XML_ERROR; - tmp = xmlNewChild(root, NULL, BAD_CAST "boot", BAD_CAST os->boot); + tmp = xmlNewChild(root, NULL, BAD_CAST "boot", NULL); if (tmp == NULL) return XML_ERROR; + xmlNewProp(tmp, BAD_CAST "dev", BAD_CAST os->boot); + tmp = xmlNewChild(root, NULL, BAD_CAST "features", NULL); xmlNewChild(tmp, NULL, BAD_CAST "pae", NULL); xmlNewChild(tmp, NULL, BAD_CAST "acpi", NULL); From rmaciel at linux.vnet.ibm.com Wed May 13 12:37:09 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Wed, 13 May 2009 09:37:09 -0300 Subject: [Libvirt-cim] [PATCH 0 of 2] [RFC] Add boot order support for full virtualization environments Message-ID: From rmaciel at linux.vnet.ibm.com Wed May 13 12:37:11 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Wed, 13 May 2009 09:37:11 -0300 Subject: [Libvirt-cim] [PATCH 2 of 2] Add boot order support In-Reply-To: References: Message-ID: # HG changeset patch # User Richard Maciel # Date 1242218025 10800 # Node ID b188a6d5bfea59ab1aae26be4b62817a5a414f4e # Parent c0bd6c9a2c0084398784bb1ae36649bd3400e36c Add boot order support Signed-off-by: Richard Maciel diff -r c0bd6c9a2c00 -r b188a6d5bfea libxkutil/device_parsing.c --- a/libxkutil/device_parsing.c Mon May 11 16:47:15 2009 -0300 +++ b/libxkutil/device_parsing.c Wed May 13 09:33:45 2009 -0300 @@ -810,6 +810,9 @@ static int parse_os(struct domain *dominfo, xmlNode *os) { xmlNode *child; + char **blist = NULL; + char **tmp_list = NULL; + unsigned bl_size = 0; for (child = os->children; child != NULL; child = child->next) { if (XSTREQ(child->name, "type")) @@ -822,10 +825,23 @@ STRPROP(dominfo, os_info.pv.cmdline, child); else if (XSTREQ(child->name, "loader")) STRPROP(dominfo, os_info.fv.loader, child); - else if (XSTREQ(child->name, "boot")) - dominfo->os_info.fv.boot = get_attr_value(child, - "dev"); - else if (XSTREQ(child->name, "init")) + else if (XSTREQ(child->name, "boot")) { + //dominfo->os_info.fv.boot = get_attr_value(child, + //"dev"); + bl_size++; + + tmp_list = (char **)realloc(blist, + bl_size * sizeof(char *)); + if (tmp_list == NULL) { + // Nothing you can do. Just go on. + CU_DEBUG("Could not alloc space for " + "boot device"); + bl_size--; + continue; + } + + blist[bl_size - 1] = get_attr_value(child, "dev"); + } else if (XSTREQ(child->name, "init")) STRPROP(dominfo, os_info.lxc.init, child); } @@ -843,6 +859,9 @@ else dominfo->type = -1; + dominfo->os_info.fv.bootlist = blist; + dominfo->os_info.fv.bootlist_size = bl_size; + return 1; } @@ -1001,9 +1020,15 @@ free(dom->os_info.pv.cmdline); } else if ((dom->type == DOMAIN_XENFV) || (dom->type == DOMAIN_KVM) || (dom->type == DOMAIN_QEMU)) { + int i; + free(dom->os_info.fv.type); free(dom->os_info.fv.loader); - free(dom->os_info.fv.boot); + + for (i = 0; i < dom->os_info.fv.bootlist_size; i++) { + free(dom->os_info.fv.bootlist[i]); + } + free(dom->os_info.fv.bootlist); } else if (dom->type == DOMAIN_LXC) { free(dom->os_info.lxc.type); free(dom->os_info.lxc.init); diff -r c0bd6c9a2c00 -r b188a6d5bfea libxkutil/device_parsing.h --- a/libxkutil/device_parsing.h Mon May 11 16:47:15 2009 -0300 +++ b/libxkutil/device_parsing.h Wed May 13 09:33:45 2009 -0300 @@ -99,7 +99,9 @@ struct fv_os_info { char *type; /* Should always be 'hvm' */ char *loader; - char *boot; + unsigned bootlist_size; + char **bootlist; + //char *boot; }; struct lxc_os_info { diff -r c0bd6c9a2c00 -r b188a6d5bfea libxkutil/xml_parse_test.c --- a/libxkutil/xml_parse_test.c Mon May 11 16:47:15 2009 -0300 +++ b/libxkutil/xml_parse_test.c Wed May 13 09:33:45 2009 -0300 @@ -28,6 +28,7 @@ static void print_os(struct domain *dom, FILE *d) { + int i; if (dom->type == DOMAIN_XENPV) { print_value(d, "Domain Type", "Xen PV"); @@ -39,13 +40,18 @@ print_value(d, "Domain Type", "Xen FV"); print_value(d, "Type", dom->os_info.fv.type); print_value(d, "Loader", dom->os_info.fv.loader); - print_value(d, "Boot", dom->os_info.fv.boot); + for (i = 0; i < dom->os_info.fv.bootlist_size; i++) { + print_value(d, "Boot", dom->os_info.fv.bootlist[i]); + } } else if ((dom->type == DOMAIN_KVM) || (dom->type == DOMAIN_QEMU)) { print_value(d, "Domain Type", "KVM/QEMU"); print_value(d, "Type", dom->os_info.fv.type); print_value(d, "Loader", dom->os_info.fv.loader); - print_value(d, "Boot", dom->os_info.fv.boot); + + for (i = 0; i < dom->os_info.fv.bootlist_size; i++) { + print_value(d, "Boot", dom->os_info.fv.bootlist[i]); + } } else if (dom->type == DOMAIN_LXC) { print_value(d, "Init", dom->os_info.lxc.init); } else { diff -r c0bd6c9a2c00 -r b188a6d5bfea libxkutil/xmlgen.c --- a/libxkutil/xmlgen.c Mon May 11 16:47:15 2009 -0300 +++ b/libxkutil/xmlgen.c Wed May 13 09:33:45 2009 -0300 @@ -439,6 +439,7 @@ { struct fv_os_info *os = &domain->os_info.fv; xmlNodePtr tmp; + unsigned i; if (os->type == NULL) os->type = strdup("hvm"); @@ -446,8 +447,13 @@ if (os->loader == NULL) os->loader = strdup("/usr/lib/xen/boot/hvmloader"); - if (os->boot == NULL) - os->boot = strdup("hd"); + //if (os->boot == NULL) + // os->boot = strdup("hd"); + if (os->bootlist_size == 0) { + os->bootlist_size = 1; + os->bootlist = (char **)calloc(1, sizeof(char *)); + os->bootlist[0] = strdup("hd"); + } tmp = xmlNewChild(root, NULL, BAD_CAST "type", BAD_CAST os->type); if (tmp == NULL) @@ -457,11 +463,13 @@ if (tmp == NULL) return XML_ERROR; - tmp = xmlNewChild(root, NULL, BAD_CAST "boot", NULL); - if (tmp == NULL) - return XML_ERROR; + for (i = 0; i < os->bootlist_size; i++) { + tmp = xmlNewChild(root, NULL, BAD_CAST "boot", NULL); + if (tmp == NULL) + return XML_ERROR; - xmlNewProp(tmp, BAD_CAST "dev", BAD_CAST os->boot); + xmlNewProp(tmp, BAD_CAST "dev", BAD_CAST os->bootlist[i]); + } tmp = xmlNewChild(root, NULL, BAD_CAST "features", NULL); xmlNewChild(tmp, NULL, BAD_CAST "pae", NULL); @@ -475,21 +483,28 @@ { struct fv_os_info *os = &domain->os_info.fv; xmlNodePtr tmp; + unsigned i; if (os->type == NULL) os->type = strdup("hvm"); - if (os->boot == NULL) - os->boot = strdup("hd"); + if (os->bootlist_size == 0) { + os->bootlist_size = 1; + os->bootlist = (char **)calloc(1, sizeof(char *)); + os->bootlist[0] = strdup("hd"); + } tmp = xmlNewChild(root, NULL, BAD_CAST "type", BAD_CAST os->type); if (tmp == NULL) return XML_ERROR; - tmp = xmlNewChild(root, NULL, BAD_CAST "boot", NULL); - if (tmp == NULL) - return XML_ERROR; - xmlNewProp(tmp, BAD_CAST "dev", BAD_CAST os->boot); + for (i = 0; i < os->bootlist_size; i++) { + tmp = xmlNewChild(root, NULL, BAD_CAST "boot", NULL); + if (tmp == NULL) + return XML_ERROR; + + xmlNewProp(tmp, BAD_CAST "dev", BAD_CAST os->bootlist[i]); + } return NULL; } diff -r c0bd6c9a2c00 -r b188a6d5bfea schema/VSSD.mof --- a/schema/VSSD.mof Mon May 11 16:47:15 2009 -0300 +++ b/schema/VSSD.mof Wed May 13 09:33:45 2009 -0300 @@ -27,7 +27,7 @@ [Description ("The device to boot from when in fully-virtualized mode." "One of hd,fd,cdrom.")] - string BootDevice; + string BootDevices[]; [Description ("The emulator the guest should use during runtime.")] string Emulator; @@ -42,8 +42,8 @@ class KVM_VirtualSystemSettingData : Virt_VirtualSystemSettingData { - [Description ("The device to boot from. One of hd,fd,cdrom.")] - string BootDevice; + [Description ("The list of devices to boot from. hd,fd,cdrom.")] + string BootDevices[]; [Description ("The emulator the guest should use during runtime.")] string Emulator; diff -r c0bd6c9a2c00 -r b188a6d5bfea src/Virt_VSSD.c --- a/src/Virt_VSSD.c Mon May 11 16:47:15 2009 -0300 +++ b/src/Virt_VSSD.c Wed May 13 09:33:45 2009 -0300 @@ -42,16 +42,50 @@ CMPIInstance *inst) { bool fv = true; + CMPIArray *array; if (dominfo->type == DOMAIN_XENFV) CMSetProperty(inst, "IsFullVirt", (CMPIValue *)&fv, CMPI_boolean); - if (dominfo->os_info.fv.boot != NULL) - CMSetProperty(inst, - "BootDevice", - (CMPIValue *)dominfo->os_info.fv.boot, - CMPI_chars); + if (dominfo->os_info.fv.bootlist_size > 0) { + CMPICount i; + CMPICount bl_size; + CMPIStatus s; + + bl_size = (CMPICount)dominfo->os_info.fv.bootlist_size; + + array = CMNewArray(_BROKER, + bl_size, + CMPI_string, + &s); + + if (s.rc != CMPI_RC_OK) + CU_DEBUG("Error creating BootDevice list"); + + for (i = 0; i < bl_size; i++) { + CMPIString *cm_str; + + cm_str = CMNewString(_BROKER, + (const char *)dominfo->os_info.fv.bootlist[i], + &s); + if (s.rc != CMPI_RC_OK) + CU_DEBUG("Error adding item to BootDevice " + "list"); + } + + s = CMSetProperty(inst, + "BootDevices", + (CMPIValue *)array, + CMPI_stringA); + + if (s.rc != CMPI_RC_OK) + CU_DEBUG("Error setting BootDevices property"); + + //CMSetProperty(inst, + // "BootDevices", + // (CMPIValue *)dominfo->os_info.fv.boot); + } } static void _set_pv_prop(struct domain *dominfo, diff -r c0bd6c9a2c00 -r b188a6d5bfea src/Virt_VirtualSystemManagementService.c --- a/src/Virt_VirtualSystemManagementService.c Mon May 11 16:47:15 2009 -0300 +++ b/src/Virt_VirtualSystemManagementService.c Wed May 13 09:33:45 2009 -0300 @@ -202,7 +202,13 @@ const char *pfx) { int ret; + CMPICount i; const char *val; + CMPIArray *bootlist; + CMPIStatus s; + CMPIData boot_elem; + char **tmp_str_arr; + if (STREQC(pfx, "KVM")) { if (system_has_kvm(pfx)) @@ -216,12 +222,75 @@ return 0; } - ret = cu_get_str_prop(inst, "BootDevice", &val); - if (ret != CMPI_RC_OK) - val = "hd"; + for (i = 0; i < domain->os_info.fv.bootlist_size; i++) + free(domain->os_info.fv.bootlist[i]); - free(domain->os_info.fv.boot); - domain->os_info.fv.boot = strdup(val); + ret = cu_get_array_prop(inst, "BootDevices", &bootlist); + + if (ret == CMPI_RC_OK) { + CMPICount bl_size; + + bl_size = CMGetArrayCount(bootlist, &s); + if (s.rc != CMPI_RC_OK) { + CU_DEBUG("Invalid BootDevice array size"); + return 0; + } + + tmp_str_arr = (char **)realloc(domain->os_info.fv.bootlist, + bl_size * sizeof(char *)); + + if (tmp_str_arr == NULL) { + CU_DEBUG("Could not alloc BootDevices array"); + return 0; + } + + for (i = 0; i < bl_size; i++) { + CMPIString *cmpi_str; + const char *str; + + boot_elem = CMGetArrayElementAt(bootlist, + i, + NULL); + + if (CMIsNullValue(boot_elem)) { + CU_DEBUG("Null BootDevice"); + return 0; + } + + cmpi_str = boot_elem.value.string; + + free(domain->os_info.fv.bootlist[i]); + CU_DEBUG("Freed item from bootlist"); + + str = cmpi_str->ft->getCharPtr(cmpi_str, &s); + + if (s.rc != CMPI_RC_OK) { + CU_DEBUG("Could not extract char pointer from " + "CMPIArray"); + } + + tmp_str_arr[i] = strdup(str); + } + domain->os_info.fv.bootlist_size = bl_size; + domain->os_info.fv.bootlist = tmp_str_arr; + + } else { + + CU_DEBUG("Failed to get BootDevices property"); + + tmp_str_arr = (char **)realloc(domain->os_info.fv.bootlist, + sizeof(char *)); + if (tmp_str_arr == NULL) + return 0; + + tmp_str_arr[0] = strdup("hd"); + + domain->os_info.fv.bootlist = tmp_str_arr; + domain->os_info.fv.bootlist_size = 1; + } + + //free(domain->os_info.fv.boot); + //domain->os_info.fv.boot = strdup(val); ret = cu_get_str_prop(inst, "Emulator", &val); if (ret != CMPI_RC_OK) From kaitlin at linux.vnet.ibm.com Wed May 13 17:15:57 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Wed, 13 May 2009 10:15:57 -0700 Subject: [Libvirt-cim] [PATCH 2 of 2] Add boot order support In-Reply-To: References: Message-ID: <4A0B004D.3060305@linux.vnet.ibm.com> Richard Maciel wrote: > # HG changeset patch > # User Richard Maciel > # Date 1242218025 10800 > # Node ID b188a6d5bfea59ab1aae26be4b62817a5a414f4e > # Parent c0bd6c9a2c0084398784bb1ae36649bd3400e36c > Add boot order support This patch is quite long and difficult to read. Can you break this up into smaller patches? > @@ -822,10 +825,23 @@ > STRPROP(dominfo, os_info.pv.cmdline, child); > else if (XSTREQ(child->name, "loader")) > STRPROP(dominfo, os_info.fv.loader, child); > - else if (XSTREQ(child->name, "boot")) > - dominfo->os_info.fv.boot = get_attr_value(child, > - "dev"); > - else if (XSTREQ(child->name, "init")) > + else if (XSTREQ(child->name, "boot")) { > + //dominfo->os_info.fv.boot = get_attr_value(child, > + //"dev"); > + bl_size++; > + > + tmp_list = (char **)realloc(blist, > + bl_size * sizeof(char *)); tmp_list isn't needed here - you aren't using it anywhere else. Just assign the the realloc back to blist. > + if (tmp_list == NULL) { > + // Nothing you can do. Just go on. > + CU_DEBUG("Could not alloc space for " > + "boot device"); > + bl_size--; Instead of incrementing prior to the realloc(), just call realloc() with (bl_size + 1). If the realloc() is successful, increment bl_size after the assignment below. > + continue; > + } > + > + blist[bl_size - 1] = get_attr_value(child, "dev"); So you would increment bl_size here. > diff -r c0bd6c9a2c00 -r b188a6d5bfea libxkutil/device_parsing.h > --- a/libxkutil/device_parsing.h Mon May 11 16:47:15 2009 -0300 > +++ b/libxkutil/device_parsing.h Wed May 13 09:33:45 2009 -0300 > @@ -99,7 +99,9 @@ > struct fv_os_info { > char *type; /* Should always be 'hvm' */ > char *loader; > - char *boot; > + unsigned bootlist_size; I would call this bootlist_cnt - it'll parallel the dev_disk_ct (etc) fields of the domain struct > + char **bootlist; > + //char *boot; Remove commented out line. > diff -r c0bd6c9a2c00 -r b188a6d5bfea libxkutil/xmlgen.c > --- a/libxkutil/xmlgen.c Mon May 11 16:47:15 2009 -0300 > +++ b/libxkutil/xmlgen.c Wed May 13 09:33:45 2009 -0300 > @@ -439,6 +439,7 @@ > { > struct fv_os_info *os = &domain->os_info.fv; > xmlNodePtr tmp; > + unsigned i; > > if (os->type == NULL) > os->type = strdup("hvm"); > @@ -446,8 +447,13 @@ > if (os->loader == NULL) > os->loader = strdup("/usr/lib/xen/boot/hvmloader"); > > - if (os->boot == NULL) > - os->boot = strdup("hd"); > + //if (os->boot == NULL) > + // os->boot = strdup("hd"); Remove commented out lines. > + if (os->bootlist_size == 0) { > + os->bootlist_size = 1; > + os->bootlist = (char **)calloc(1, sizeof(char *)); > + os->bootlist[0] = strdup("hd"); > + } libvirt will set a default for us, but it's good to add something just in case. > diff -r c0bd6c9a2c00 -r b188a6d5bfea src/Virt_VSSD.c > --- a/src/Virt_VSSD.c Mon May 11 16:47:15 2009 -0300 > +++ b/src/Virt_VSSD.c Wed May 13 09:33:45 2009 -0300 > @@ -42,16 +42,50 @@ > CMPIInstance *inst) > { > bool fv = true; > + CMPIArray *array; > > if (dominfo->type == DOMAIN_XENFV) > CMSetProperty(inst, "IsFullVirt", > (CMPIValue *)&fv, CMPI_boolean); > > - if (dominfo->os_info.fv.boot != NULL) > - CMSetProperty(inst, > - "BootDevice", > - (CMPIValue *)dominfo->os_info.fv.boot, > - CMPI_chars); > + if (dominfo->os_info.fv.bootlist_size > 0) { > + CMPICount i; > + CMPICount bl_size; > + CMPIStatus s; > + > + bl_size = (CMPICount)dominfo->os_info.fv.bootlist_size; > + > + array = CMNewArray(_BROKER, > + bl_size, > + CMPI_string, > + &s); > + > + if (s.rc != CMPI_RC_OK) > + CU_DEBUG("Error creating BootDevice list"); You should return here - if you fail to create the array, you can't add elements to it. > + > + for (i = 0; i < bl_size; i++) { > + CMPIString *cm_str; > + > + cm_str = CMNewString(_BROKER, > + (const char *)dominfo->os_info.fv.bootlist[i], > + &s); You need to set the elements of the array. See Virt_VirtualSystemManagementCapabilities.c (or other providers that need to set an array). > + if (s.rc != CMPI_RC_OK) > + CU_DEBUG("Error adding item to BootDevice " > + "list"); The debug message here is misleading - the call to CMNewString() doesn't add the string to the array. You'll need to call CMSetArrayElementAt() for that. Also, if you encounter an error here, you need to return an error. > + } > + > + s = CMSetProperty(inst, > + "BootDevices", > + (CMPIValue *)array, This needs to be &array. > + CMPI_stringA); > + > + if (s.rc != CMPI_RC_OK) > + CU_DEBUG("Error setting BootDevices property"); > + > + //CMSetProperty(inst, > + // "BootDevices", > + // (CMPIValue *)dominfo->os_info.fv.boot); Remove commented out lines. > + } > } > > static void _set_pv_prop(struct domain *dominfo, > diff -r c0bd6c9a2c00 -r b188a6d5bfea src/Virt_VirtualSystemManagementService.c > --- a/src/Virt_VirtualSystemManagementService.c Mon May 11 16:47:15 2009 -0300 > +++ b/src/Virt_VirtualSystemManagementService.c Wed May 13 09:33:45 2009 -0300 > @@ -202,7 +202,13 @@ > const char *pfx) > { > int ret; > + CMPICount i; > const char *val; > + CMPIArray *bootlist; > + CMPIStatus s; > + CMPIData boot_elem; > + char **tmp_str_arr; > + > > if (STREQC(pfx, "KVM")) { > if (system_has_kvm(pfx)) > @@ -216,12 +222,75 @@ > return 0; > } > > - ret = cu_get_str_prop(inst, "BootDevice", &val); > - if (ret != CMPI_RC_OK) > - val = "hd"; > + for (i = 0; i < domain->os_info.fv.bootlist_size; i++) > + free(domain->os_info.fv.bootlist[i]); > > - free(domain->os_info.fv.boot); > - domain->os_info.fv.boot = strdup(val); > + ret = cu_get_array_prop(inst, "BootDevices", &bootlist); > + > + if (ret == CMPI_RC_OK) { > + CMPICount bl_size; > + > + bl_size = CMGetArrayCount(bootlist, &s); > + if (s.rc != CMPI_RC_OK) { > + CU_DEBUG("Invalid BootDevice array size"); > + return 0; > + } > + > + tmp_str_arr = (char **)realloc(domain->os_info.fv.bootlist, > + bl_size * sizeof(char *)); > + > + if (tmp_str_arr == NULL) { > + CU_DEBUG("Could not alloc BootDevices array"); > + return 0; > + } > + > + for (i = 0; i < bl_size; i++) { > + CMPIString *cmpi_str; > + const char *str; > + > + boot_elem = CMGetArrayElementAt(bootlist, > + i, > + NULL); > + > + if (CMIsNullValue(boot_elem)) { > + CU_DEBUG("Null BootDevice"); > + return 0; > + } > + > + cmpi_str = boot_elem.value.string; You'll want to make sure that boot_elem isn't null. You can use CMIsNullObject(). > + > + free(domain->os_info.fv.bootlist[i]); > + CU_DEBUG("Freed item from bootlist"); > + > + str = cmpi_str->ft->getCharPtr(cmpi_str, &s); Instead of doing this, you can use CMGetCharPtr() to pull the string from the CMPIData object. If you use CMGetCharPtr(), you won't need the cmpi_str variable. > + > + if (s.rc != CMPI_RC_OK) { > + CU_DEBUG("Could not extract char pointer from " > + "CMPIArray"); You'll want to return an error here. > + } > + > + tmp_str_arr[i] = strdup(str); > + } > + domain->os_info.fv.bootlist_size = bl_size; > + domain->os_info.fv.bootlist = tmp_str_arr; > + > + } else { > + > + CU_DEBUG("Failed to get BootDevices property"); > + > + tmp_str_arr = (char **)realloc(domain->os_info.fv.bootlist, > + sizeof(char *)); > + if (tmp_str_arr == NULL) > + return 0; > + > + tmp_str_arr[0] = strdup("hd"); > + > + domain->os_info.fv.bootlist = tmp_str_arr; > + domain->os_info.fv.bootlist_size = 1; In xmlgen, you already set a default boot device. So we probably only need to do this once. You can remove this or remove the bit in xmlgen. > + } This whole block is quite large - I would make the boot order bits a separate function. > + > + //free(domain->os_info.fv.boot); > + //domain->os_info.fv.boot = strdup(val); Remove commented lines. > > ret = cu_get_str_prop(inst, "Emulator", &val); > if (ret != CMPI_RC_OK) > -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From rmaciel at linux.vnet.ibm.com Wed May 13 17:55:27 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Wed, 13 May 2009 14:55:27 -0300 Subject: [Libvirt-cim] [PATCH 2 of 2] Add boot order support In-Reply-To: <4A0B004D.3060305@linux.vnet.ibm.com> References: <4A0B004D.3060305@linux.vnet.ibm.com> Message-ID: <4A0B098F.70106@linux.vnet.ibm.com> Kaitlin Rupert wrote: > Richard Maciel wrote: >> # HG changeset patch >> # User Richard Maciel >> # Date 1242218025 10800 >> # Node ID b188a6d5bfea59ab1aae26be4b62817a5a414f4e >> # Parent c0bd6c9a2c0084398784bb1ae36649bd3400e36c >> Add boot order support > > This patch is quite long and difficult to read. Can you break this up > into smaller patches? > > >> @@ -822,10 +825,23 @@ >> STRPROP(dominfo, os_info.pv.cmdline, child); >> else if (XSTREQ(child->name, "loader")) >> STRPROP(dominfo, os_info.fv.loader, child); >> - else if (XSTREQ(child->name, "boot")) >> - dominfo->os_info.fv.boot = get_attr_value(child, >> - >> "dev"); >> - else if (XSTREQ(child->name, "init")) >> + else if (XSTREQ(child->name, "boot")) { >> + //dominfo->os_info.fv.boot = >> get_attr_value(child, >> + >> //"dev"); >> + bl_size++; >> + >> + tmp_list = (char **)realloc(blist, >> + bl_size * >> sizeof(char *)); > > tmp_list isn't needed here - you aren't using it anywhere else. Just > assign the the realloc back to blist. > Well, if realloc returns a null pointer I lose my pointer to the array which makes me deliver a NULL-pointer array with non-zero size. >> + if (tmp_list == NULL) { >> + // Nothing you can do. Just go on. >> + CU_DEBUG("Could not alloc space for " >> + "boot device"); >> + bl_size--; > > Instead of incrementing prior to the realloc(), just call realloc() with > (bl_size + 1). If the realloc() is successful, increment bl_size after > the assignment below. > >> + continue; + } >> + + blist[bl_size - 1] = >> get_attr_value(child, "dev"); > > So you would increment bl_size here. > > >> diff -r c0bd6c9a2c00 -r b188a6d5bfea libxkutil/device_parsing.h >> --- a/libxkutil/device_parsing.h Mon May 11 16:47:15 2009 -0300 >> +++ b/libxkutil/device_parsing.h Wed May 13 09:33:45 2009 -0300 >> @@ -99,7 +99,9 @@ >> struct fv_os_info { >> char *type; /* Should always be 'hvm' */ >> char *loader; >> - char *boot; >> + unsigned bootlist_size; > > I would call this bootlist_cnt - it'll parallel the dev_disk_ct (etc) > fields of the domain struct Well, the size suffix is better, IMO. But if you think it's better to follow the name pattern I'll fix it. > >> + char **bootlist; >> + //char *boot; > > Remove commented out line. > > >> diff -r c0bd6c9a2c00 -r b188a6d5bfea libxkutil/xmlgen.c >> --- a/libxkutil/xmlgen.c Mon May 11 16:47:15 2009 -0300 >> +++ b/libxkutil/xmlgen.c Wed May 13 09:33:45 2009 -0300 >> @@ -439,6 +439,7 @@ >> { >> struct fv_os_info *os = &domain->os_info.fv; >> xmlNodePtr tmp; >> + unsigned i; >> >> if (os->type == NULL) >> os->type = strdup("hvm"); >> @@ -446,8 +447,13 @@ >> if (os->loader == NULL) >> os->loader = strdup("/usr/lib/xen/boot/hvmloader"); >> >> - if (os->boot == NULL) >> - os->boot = strdup("hd"); >> + //if (os->boot == NULL) >> + // os->boot = strdup("hd"); > > Remove commented out lines. Leave my comments alone! > >> + if (os->bootlist_size == 0) { >> + os->bootlist_size = 1; >> + os->bootlist = (char **)calloc(1, sizeof(char *)); >> + os->bootlist[0] = strdup("hd"); >> + } > > libvirt will set a default for us, but it's good to add something just > in case. I don't get it. How can libvirt set a default? Will it automagically provide an array with a default value? > > >> diff -r c0bd6c9a2c00 -r b188a6d5bfea src/Virt_VSSD.c >> --- a/src/Virt_VSSD.c Mon May 11 16:47:15 2009 -0300 >> +++ b/src/Virt_VSSD.c Wed May 13 09:33:45 2009 -0300 >> @@ -42,16 +42,50 @@ >> CMPIInstance *inst) >> { >> bool fv = true; >> + CMPIArray *array; >> >> if (dominfo->type == DOMAIN_XENFV) >> CMSetProperty(inst, "IsFullVirt", >> (CMPIValue *)&fv, CMPI_boolean); >> >> - if (dominfo->os_info.fv.boot != NULL) >> - CMSetProperty(inst, >> - "BootDevice", >> - (CMPIValue *)dominfo->os_info.fv.boot, >> - CMPI_chars); >> + if (dominfo->os_info.fv.bootlist_size > 0) { >> + CMPICount i; >> + CMPICount bl_size; >> + CMPIStatus s; >> + >> + bl_size = (CMPICount)dominfo->os_info.fv.bootlist_size; >> + >> + array = CMNewArray(_BROKER, >> + bl_size, >> + CMPI_string, >> + &s); >> + >> + if (s.rc != CMPI_RC_OK) >> + CU_DEBUG("Error creating BootDevice list"); > > You should return here - if you fail to create the array, you can't add > elements to it. > >> + >> + for (i = 0; i < bl_size; i++) { >> + CMPIString *cm_str; >> + >> + cm_str = CMNewString(_BROKER, >> + (const char >> *)dominfo->os_info.fv.bootlist[i], >> + &s); > > You need to set the elements of the array. See > Virt_VirtualSystemManagementCapabilities.c (or other providers that need > to set an array). > Yes, I guess that would eliminate my segfault problem. :-( Ok. I just don't believe I actually forgot to set the elements of the array. I think YOU somehow changed my code to make me look silly! And I'll prove sending the original co... nevermind. >> + if (s.rc != CMPI_RC_OK) >> + CU_DEBUG("Error adding item to >> BootDevice " + "list"); > > The debug message here is misleading - the call to CMNewString() doesn't > add the string to the array. You'll need to call CMSetArrayElementAt() > for that. > > Also, if you encounter an error here, you need to return an error. > >> + } >> + >> + s = CMSetProperty(inst, >> + "BootDevices", >> + (CMPIValue *)array, > > This needs to be &array. Uh?! CMNewArray returns a pointer to CMPIArray. > >> + CMPI_stringA); >> + >> + if (s.rc != CMPI_RC_OK) >> + CU_DEBUG("Error setting BootDevices property"); >> + >> + //CMSetProperty(inst, >> + // "BootDevices", >> + // (CMPIValue *)dominfo->os_info.fv.boot); > > Remove commented out lines. > >> + } >> } >> >> static void _set_pv_prop(struct domain *dominfo, >> diff -r c0bd6c9a2c00 -r b188a6d5bfea >> src/Virt_VirtualSystemManagementService.c >> --- a/src/Virt_VirtualSystemManagementService.c Mon May 11 16:47:15 >> 2009 -0300 >> +++ b/src/Virt_VirtualSystemManagementService.c Wed May 13 09:33:45 >> 2009 -0300 >> @@ -202,7 +202,13 @@ >> const char *pfx) >> { >> int ret; >> + CMPICount i; >> const char *val; >> + CMPIArray *bootlist; >> + CMPIStatus s; >> + CMPIData boot_elem; >> + char **tmp_str_arr; >> + >> >> if (STREQC(pfx, "KVM")) { >> if (system_has_kvm(pfx)) >> @@ -216,12 +222,75 @@ >> return 0; >> } >> >> - ret = cu_get_str_prop(inst, "BootDevice", &val); >> - if (ret != CMPI_RC_OK) >> - val = "hd"; >> + for (i = 0; i < domain->os_info.fv.bootlist_size; i++) >> + free(domain->os_info.fv.bootlist[i]); >> >> - free(domain->os_info.fv.boot); >> - domain->os_info.fv.boot = strdup(val); >> + ret = cu_get_array_prop(inst, "BootDevices", &bootlist); >> + + if (ret == CMPI_RC_OK) { >> + CMPICount bl_size; >> + >> + bl_size = CMGetArrayCount(bootlist, &s); >> + if (s.rc != CMPI_RC_OK) { >> + CU_DEBUG("Invalid BootDevice array size"); >> + return 0; >> + } >> + >> + tmp_str_arr = (char >> **)realloc(domain->os_info.fv.bootlist, >> + bl_size * sizeof(char >> *)); >> + >> + if (tmp_str_arr == NULL) { >> + CU_DEBUG("Could not alloc BootDevices array"); >> + return 0; >> + } >> + >> + for (i = 0; i < bl_size; i++) { >> + CMPIString *cmpi_str; >> + const char *str; >> + >> + boot_elem = CMGetArrayElementAt(bootlist, >> + i, >> + NULL); + >> + if (CMIsNullValue(boot_elem)) { >> + CU_DEBUG("Null BootDevice"); >> + return 0; >> + } >> + >> + cmpi_str = boot_elem.value.string; > > You'll want to make sure that boot_elem isn't null. You can use > CMIsNullObject(). Should I use both functions or only CMIsNullObject? > >> + >> + free(domain->os_info.fv.bootlist[i]); >> + CU_DEBUG("Freed item from bootlist"); >> + >> + str = cmpi_str->ft->getCharPtr(cmpi_str, &s); > > Instead of doing this, you can use CMGetCharPtr() to pull the string > from the CMPIData object. If you use CMGetCharPtr(), you won't need the > cmpi_str variable. Oh, CMGetCharPtr is a macro! Nice! > > >> + >> + if (s.rc != CMPI_RC_OK) { >> + CU_DEBUG("Could not extract char >> pointer from " >> + "CMPIArray"); > > You'll want to return an error here. > >> + } >> + >> + tmp_str_arr[i] = strdup(str); >> + } >> + domain->os_info.fv.bootlist_size = bl_size; >> + domain->os_info.fv.bootlist = tmp_str_arr; >> + >> + } else { >> + + CU_DEBUG("Failed to get BootDevices >> property"); >> + >> + tmp_str_arr = (char >> **)realloc(domain->os_info.fv.bootlist, >> + sizeof(char *)); >> + if (tmp_str_arr == NULL) >> + return 0; >> + >> + tmp_str_arr[0] = strdup("hd"); >> + >> + domain->os_info.fv.bootlist = tmp_str_arr; >> + domain->os_info.fv.bootlist_size = 1; > > In xmlgen, you already set a default boot device. So we probably only > need to do this once. You can remove this or remove the bit in xmlgen. Does it always execute the code in xmlgen after this function? > >> + } > > This whole block is quite large - I would make the boot order bits a > separate function. > >> + >> + //free(domain->os_info.fv.boot); >> + //domain->os_info.fv.boot = strdup(val); > > Remove commented lines. > >> >> ret = cu_get_str_prop(inst, "Emulator", &val); >> if (ret != CMPI_RC_OK) >> > > > -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Wed May 13 19:21:38 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Wed, 13 May 2009 12:21:38 -0700 Subject: [Libvirt-cim] [PATCH 2 of 2] Add boot order support In-Reply-To: <4A0B098F.70106@linux.vnet.ibm.com> References: <4A0B004D.3060305@linux.vnet.ibm.com> <4A0B098F.70106@linux.vnet.ibm.com> Message-ID: <4A0B1DC2.6040604@linux.vnet.ibm.com> >>> - else if (XSTREQ(child->name, "init")) >>> + else if (XSTREQ(child->name, "boot")) { >>> + //dominfo->os_info.fv.boot = >>> get_attr_value(child, >>> + >>> //"dev"); >>> + bl_size++; >>> + >>> + tmp_list = (char **)realloc(blist, >>> + bl_size * >>> sizeof(char *)); >> >> tmp_list isn't needed here - you aren't using it anywhere else. Just >> assign the the realloc back to blist. >> > > Well, if realloc returns a null pointer I lose my pointer to the array > which makes me deliver a NULL-pointer array with non-zero size. Fair point. You'll want to be sure to use tmp_list when you do the assignment. > > >>> + if (tmp_list == NULL) { >>> + // Nothing you can do. Just go on. >>> + CU_DEBUG("Could not alloc space for " >>> + "boot device"); >>> + bl_size--; >> >> Instead of incrementing prior to the realloc(), just call realloc() >> with (bl_size + 1). If the realloc() is successful, increment bl_size >> after the assignment below. >> >>> + continue; + } >>> + + blist[bl_size - 1] >>> = get_attr_value(child, "dev"); Instead of blist, you'd need to use tmp_list here. >> >> So you would increment bl_size here. >> >> > Leave my comments alone! > >> >>> + if (os->bootlist_size == 0) { >>> + os->bootlist_size = 1; >>> + os->bootlist = (char **)calloc(1, sizeof(char *)); >>> + os->bootlist[0] = strdup("hd"); >>> + } >> >> libvirt will set a default for us, but it's good to add something just >> in case. > > I don't get it. How can libvirt set a default? Will it automagically > provide an array with a default value? libvirt will add a boot tag to the guest XML when the guest is defined if one isn't supplied. I haven't verified in the Xen case, but it definitely does this for KVM. >>> + bl_size = (CMPICount)dominfo->os_info.fv.bootlist_size; >>> + >>> + array = CMNewArray(_BROKER, >>> + bl_size, >>> + CMPI_string, >>> + &s); >>> + >>> + if (s.rc != CMPI_RC_OK) >>> + CU_DEBUG("Error creating BootDevice list"); >> >> You should return here - if you fail to create the array, you can't >> add elements to it. >> >>> + >>> + for (i = 0; i < bl_size; i++) { >>> + CMPIString *cm_str; >>> + >>> + cm_str = CMNewString(_BROKER, >>> + (const char >>> *)dominfo->os_info.fv.bootlist[i], >>> + &s); >> >> You need to set the elements of the array. See >> Virt_VirtualSystemManagementCapabilities.c (or other providers that >> need to set an array). >> > > Yes, I guess that would eliminate my segfault problem. :-( > Ok. I just don't believe I actually forgot to set the elements of the > array. I think YOU somehow changed my code to make me look silly! And > I'll prove sending the original co... nevermind. =) > >>> + if (s.rc != CMPI_RC_OK) >>> + CU_DEBUG("Error adding item to >>> BootDevice " + "list"); >> >> The debug message here is misleading - the call to CMNewString() >> doesn't add the string to the array. You'll need to call >> CMSetArrayElementAt() for that. >> >> Also, if you encounter an error here, you need to return an error. >> >>> + } >>> + >>> + s = CMSetProperty(inst, >>> + "BootDevices", >>> + (CMPIValue *)array, >> >> This needs to be &array. > > Uh?! CMNewArray returns a pointer to CMPIArray. Right. But then you are casting the CMPIArray pointer as a CMPIValue pointer. >>> + if (tmp_str_arr == NULL) { >>> + CU_DEBUG("Could not alloc BootDevices array"); >>> + return 0; >>> + } >>> + >>> + for (i = 0; i < bl_size; i++) { >>> + CMPIString *cmpi_str; >>> + const char *str; >>> + >>> + boot_elem = CMGetArrayElementAt(bootlist, >>> + i, >>> + NULL); + >>> + if (CMIsNullValue(boot_elem)) { >>> + CU_DEBUG("Null BootDevice"); >>> + return 0; >>> + } >>> + >>> + cmpi_str = boot_elem.value.string; >> >> You'll want to make sure that boot_elem isn't null. You can use >> CMIsNullObject(). > > > Should I use both functions or only CMIsNullObject? Oh! I missed the call to CMIsNullValue(). That check should be fine. >>> + + CU_DEBUG("Failed to get BootDevices >>> property"); >>> + >>> + tmp_str_arr = (char >>> **)realloc(domain->os_info.fv.bootlist, >>> + sizeof(char *)); >>> + if (tmp_str_arr == NULL) >>> + return 0; >>> + >>> + tmp_str_arr[0] = strdup("hd"); >>> + >>> + domain->os_info.fv.bootlist = tmp_str_arr; >>> + domain->os_info.fv.bootlist_size = 1; >> >> In xmlgen, you already set a default boot device. So we probably only >> need to do this once. You can remove this or remove the bit in xmlgen. > > Does it always execute the code in xmlgen after this function? Yes. Here's the general flow: 1) We pull the data from the CIM attributes and store them in the domain struct. 2) VSMS eventually calls system_to_xml() passing in the domain struct 3) xmlgen generates an XML for the guest based on the info in the domain struct 4) VSMS calls virDomainDefineXML() and passed in the XML we've generated -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From rmaciel at linux.vnet.ibm.com Wed May 13 18:45:35 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Wed, 13 May 2009 15:45:35 -0300 Subject: [Libvirt-cim] [PATCH 0 of 2] [RFC] (#2) Add boot order support for full virtualization environments Message-ID: From rmaciel at linux.vnet.ibm.com Wed May 13 18:45:36 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Wed, 13 May 2009 15:45:36 -0300 Subject: [Libvirt-cim] [PATCH 1 of 2] This fix the generation of the tag on fully-virtualizable Xen guests In-Reply-To: References: Message-ID: # HG changeset patch # User Richard Maciel # Date 1242071235 10800 # Node ID c0bd6c9a2c0084398784bb1ae36649bd3400e36c # Parent 5608b9455cd32fccbc324cd540c509d7230a113f This fix the generation of the tag on fully-virtualizable Xen guests. Right now it is generated with the boot device as a the value of the node (e.g. hd) However, the boot device must be a property of the node (e.g. ) Signed-off-by: Richard Maciel diff -r 5608b9455cd3 -r c0bd6c9a2c00 libxkutil/xmlgen.c --- a/libxkutil/xmlgen.c Mon Apr 27 17:05:48 2009 -0700 +++ b/libxkutil/xmlgen.c Mon May 11 16:47:15 2009 -0300 @@ -457,10 +457,12 @@ if (tmp == NULL) return XML_ERROR; - tmp = xmlNewChild(root, NULL, BAD_CAST "boot", BAD_CAST os->boot); + tmp = xmlNewChild(root, NULL, BAD_CAST "boot", NULL); if (tmp == NULL) return XML_ERROR; + xmlNewProp(tmp, BAD_CAST "dev", BAD_CAST os->boot); + tmp = xmlNewChild(root, NULL, BAD_CAST "features", NULL); xmlNewChild(tmp, NULL, BAD_CAST "pae", NULL); xmlNewChild(tmp, NULL, BAD_CAST "acpi", NULL); From rmaciel at linux.vnet.ibm.com Wed May 13 18:45:37 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Wed, 13 May 2009 15:45:37 -0300 Subject: [Libvirt-cim] [PATCH 2 of 2] Add boot order support In-Reply-To: References: Message-ID: # HG changeset patch # User Richard Maciel # Date 1242237321 10800 # Node ID e3ea69df8ae9fa611605843b471894c2baaa8bc0 # Parent c0bd6c9a2c0084398784bb1ae36649bd3400e36c Add boot order support Signed-off-by: Richard Maciel diff -r c0bd6c9a2c00 -r e3ea69df8ae9 libxkutil/device_parsing.c --- a/libxkutil/device_parsing.c Mon May 11 16:47:15 2009 -0300 +++ b/libxkutil/device_parsing.c Wed May 13 14:55:21 2009 -0300 @@ -810,6 +810,8 @@ static int parse_os(struct domain *dominfo, xmlNode *os) { xmlNode *child; + char **blist = NULL; + unsigned bl_size = 0; for (child = os->children; child != NULL; child = child->next) { if (XSTREQ(child->name, "type")) @@ -822,10 +824,23 @@ STRPROP(dominfo, os_info.pv.cmdline, child); else if (XSTREQ(child->name, "loader")) STRPROP(dominfo, os_info.fv.loader, child); - else if (XSTREQ(child->name, "boot")) - dominfo->os_info.fv.boot = get_attr_value(child, - "dev"); - else if (XSTREQ(child->name, "init")) + else if (XSTREQ(child->name, "boot")) { + char **tmp_list = NULL; + + tmp_list = (char **)realloc(blist, + (bl_size+1) * + sizeof(char *)); + if (tmp_list == NULL) { + // Nothing you can do. Just go on. + CU_DEBUG("Could not alloc space for " + "boot device"); + continue; + } + blist = tmp_list; + + blist[bl_size] = get_attr_value(child, "dev"); + bl_size++; + } else if (XSTREQ(child->name, "init")) STRPROP(dominfo, os_info.lxc.init, child); } @@ -843,6 +858,9 @@ else dominfo->type = -1; + dominfo->os_info.fv.bootlist = blist; + dominfo->os_info.fv.bootlist_ct = bl_size; + return 1; } @@ -1001,9 +1019,15 @@ free(dom->os_info.pv.cmdline); } else if ((dom->type == DOMAIN_XENFV) || (dom->type == DOMAIN_KVM) || (dom->type == DOMAIN_QEMU)) { + int i; + free(dom->os_info.fv.type); free(dom->os_info.fv.loader); - free(dom->os_info.fv.boot); + + for (i = 0; i < dom->os_info.fv.bootlist_ct; i++) { + free(dom->os_info.fv.bootlist[i]); + } + free(dom->os_info.fv.bootlist); } else if (dom->type == DOMAIN_LXC) { free(dom->os_info.lxc.type); free(dom->os_info.lxc.init); diff -r c0bd6c9a2c00 -r e3ea69df8ae9 libxkutil/device_parsing.h --- a/libxkutil/device_parsing.h Mon May 11 16:47:15 2009 -0300 +++ b/libxkutil/device_parsing.h Wed May 13 14:55:21 2009 -0300 @@ -99,7 +99,8 @@ struct fv_os_info { char *type; /* Should always be 'hvm' */ char *loader; - char *boot; + unsigned bootlist_ct; + char **bootlist; }; struct lxc_os_info { diff -r c0bd6c9a2c00 -r e3ea69df8ae9 libxkutil/xml_parse_test.c --- a/libxkutil/xml_parse_test.c Mon May 11 16:47:15 2009 -0300 +++ b/libxkutil/xml_parse_test.c Wed May 13 14:55:21 2009 -0300 @@ -28,6 +28,7 @@ static void print_os(struct domain *dom, FILE *d) { + int i; if (dom->type == DOMAIN_XENPV) { print_value(d, "Domain Type", "Xen PV"); @@ -39,13 +40,18 @@ print_value(d, "Domain Type", "Xen FV"); print_value(d, "Type", dom->os_info.fv.type); print_value(d, "Loader", dom->os_info.fv.loader); - print_value(d, "Boot", dom->os_info.fv.boot); + for (i = 0; i < dom->os_info.fv.bootlist_size; i++) { + print_value(d, "Boot", dom->os_info.fv.bootlist[i]); + } } else if ((dom->type == DOMAIN_KVM) || (dom->type == DOMAIN_QEMU)) { print_value(d, "Domain Type", "KVM/QEMU"); print_value(d, "Type", dom->os_info.fv.type); print_value(d, "Loader", dom->os_info.fv.loader); - print_value(d, "Boot", dom->os_info.fv.boot); + + for (i = 0; i < dom->os_info.fv.bootlist_size; i++) { + print_value(d, "Boot", dom->os_info.fv.bootlist[i]); + } } else if (dom->type == DOMAIN_LXC) { print_value(d, "Init", dom->os_info.lxc.init); } else { diff -r c0bd6c9a2c00 -r e3ea69df8ae9 libxkutil/xmlgen.c --- a/libxkutil/xmlgen.c Mon May 11 16:47:15 2009 -0300 +++ b/libxkutil/xmlgen.c Wed May 13 14:55:21 2009 -0300 @@ -439,6 +439,7 @@ { struct fv_os_info *os = &domain->os_info.fv; xmlNodePtr tmp; + unsigned i; if (os->type == NULL) os->type = strdup("hvm"); @@ -446,8 +447,11 @@ if (os->loader == NULL) os->loader = strdup("/usr/lib/xen/boot/hvmloader"); - if (os->boot == NULL) - os->boot = strdup("hd"); + if (os->bootlist_ct == 0) { + os->bootlist_ct = 1; + os->bootlist = (char **)calloc(1, sizeof(char *)); + os->bootlist[0] = strdup("hd"); + } tmp = xmlNewChild(root, NULL, BAD_CAST "type", BAD_CAST os->type); if (tmp == NULL) @@ -457,11 +461,13 @@ if (tmp == NULL) return XML_ERROR; - tmp = xmlNewChild(root, NULL, BAD_CAST "boot", NULL); - if (tmp == NULL) - return XML_ERROR; + for (i = 0; i < os->bootlist_ct; i++) { + tmp = xmlNewChild(root, NULL, BAD_CAST "boot", NULL); + if (tmp == NULL) + return XML_ERROR; - xmlNewProp(tmp, BAD_CAST "dev", BAD_CAST os->boot); + xmlNewProp(tmp, BAD_CAST "dev", BAD_CAST os->bootlist[i]); + } tmp = xmlNewChild(root, NULL, BAD_CAST "features", NULL); xmlNewChild(tmp, NULL, BAD_CAST "pae", NULL); @@ -475,21 +481,28 @@ { struct fv_os_info *os = &domain->os_info.fv; xmlNodePtr tmp; + unsigned i; if (os->type == NULL) os->type = strdup("hvm"); - if (os->boot == NULL) - os->boot = strdup("hd"); + if (os->bootlist_ct == 0) { + os->bootlist_ct = 1; + os->bootlist = (char **)calloc(1, sizeof(char *)); + os->bootlist[0] = strdup("hd"); + } tmp = xmlNewChild(root, NULL, BAD_CAST "type", BAD_CAST os->type); if (tmp == NULL) return XML_ERROR; - tmp = xmlNewChild(root, NULL, BAD_CAST "boot", NULL); - if (tmp == NULL) - return XML_ERROR; - xmlNewProp(tmp, BAD_CAST "dev", BAD_CAST os->boot); + for (i = 0; i < os->bootlist_ct; i++) { + tmp = xmlNewChild(root, NULL, BAD_CAST "boot", NULL); + if (tmp == NULL) + return XML_ERROR; + + xmlNewProp(tmp, BAD_CAST "dev", BAD_CAST os->bootlist[i]); + } return NULL; } diff -r c0bd6c9a2c00 -r e3ea69df8ae9 schema/VSSD.mof --- a/schema/VSSD.mof Mon May 11 16:47:15 2009 -0300 +++ b/schema/VSSD.mof Wed May 13 14:55:21 2009 -0300 @@ -27,7 +27,7 @@ [Description ("The device to boot from when in fully-virtualized mode." "One of hd,fd,cdrom.")] - string BootDevice; + string BootDevices[]; [Description ("The emulator the guest should use during runtime.")] string Emulator; @@ -42,8 +42,8 @@ class KVM_VirtualSystemSettingData : Virt_VirtualSystemSettingData { - [Description ("The device to boot from. One of hd,fd,cdrom.")] - string BootDevice; + [Description ("The list of devices to boot from. hd,fd,cdrom.")] + string BootDevices[]; [Description ("The emulator the guest should use during runtime.")] string Emulator; diff -r c0bd6c9a2c00 -r e3ea69df8ae9 src/Virt_VSSD.c --- a/src/Virt_VSSD.c Mon May 11 16:47:15 2009 -0300 +++ b/src/Virt_VSSD.c Wed May 13 14:55:21 2009 -0300 @@ -42,16 +42,58 @@ CMPIInstance *inst) { bool fv = true; + CMPIArray *array; if (dominfo->type == DOMAIN_XENFV) CMSetProperty(inst, "IsFullVirt", (CMPIValue *)&fv, CMPI_boolean); - if (dominfo->os_info.fv.boot != NULL) - CMSetProperty(inst, - "BootDevice", - (CMPIValue *)dominfo->os_info.fv.boot, - CMPI_chars); + if (dominfo->os_info.fv.bootlist_ct > 0) { + CMPICount i; + CMPICount bl_size; + CMPIStatus s; + + bl_size = (CMPICount)dominfo->os_info.fv.bootlist_ct; + + array = CMNewArray(_BROKER, + bl_size, + CMPI_string, + &s); + + if (s.rc != CMPI_RC_OK) { + CU_DEBUG("Error creating BootDevice list"); + return; + } + + for (i = 0; i < bl_size; i++) { + CMPIString *cm_str; + + cm_str = CMNewString(_BROKER, + (const char *)dominfo->os_info.fv.bootlist[i], + &s); + if (s.rc != CMPI_RC_OK) { + CU_DEBUG("Error adding item to BootDevice " + "list"); + continue; + } + + s = CMSetArrayElementAt(array, + i, + cm_str, + CMPI_string); + if (s.rc != CMPI_RC_OK) + CU_DEBUG("Error setting BootDevice array " + "element"); + } + + s = CMSetProperty(inst, + "BootDevices", + (CMPIValue *)array, + CMPI_stringA); + + if (s.rc != CMPI_RC_OK) + CU_DEBUG("Error setting BootDevices property"); + } } static void _set_pv_prop(struct domain *dominfo, diff -r c0bd6c9a2c00 -r e3ea69df8ae9 src/Virt_VirtualSystemManagementService.c --- a/src/Virt_VirtualSystemManagementService.c Mon May 11 16:47:15 2009 -0300 +++ b/src/Virt_VirtualSystemManagementService.c Wed May 13 14:55:21 2009 -0300 @@ -202,7 +202,13 @@ const char *pfx) { int ret; + CMPICount i; const char *val; + CMPIArray *bootlist; + CMPIStatus s; + CMPIData boot_elem; + char **tmp_str_arr; + if (STREQC(pfx, "KVM")) { if (system_has_kvm(pfx)) @@ -216,12 +222,69 @@ return 0; } - ret = cu_get_str_prop(inst, "BootDevice", &val); - if (ret != CMPI_RC_OK) - val = "hd"; + for (i = 0; i < domain->os_info.fv.bootlist_ct; i++) + free(domain->os_info.fv.bootlist[i]); - free(domain->os_info.fv.boot); - domain->os_info.fv.boot = strdup(val); + ret = cu_get_array_prop(inst, "BootDevices", &bootlist); + + if (ret == CMPI_RC_OK) { + CMPICount bl_size; + + bl_size = CMGetArrayCount(bootlist, &s); + if (s.rc != CMPI_RC_OK) { + CU_DEBUG("Invalid BootDevice array size"); + return 0; + } + + tmp_str_arr = (char **)realloc(domain->os_info.fv.bootlist, + bl_size * sizeof(char *)); + + if (tmp_str_arr == NULL) { + CU_DEBUG("Could not alloc BootDevices array"); + return 0; + } + + for (i = 0; i < bl_size; i++) { + const char *str; + + boot_elem = CMGetArrayElementAt(bootlist, + i, + NULL); + + if (CMIsNullValue(boot_elem)) { + CU_DEBUG("Null BootDevice"); + return 0; + } + + free(domain->os_info.fv.bootlist[i]); + CU_DEBUG("Freed item from bootlist"); + + str = CMGetCharPtr(boot_elem.value.string); + + if (s.rc != CMPI_RC_OK) { + CU_DEBUG("Could not extract char pointer from " + "CMPIArray"); + } + + tmp_str_arr[i] = strdup(str); + } + domain->os_info.fv.bootlist_ct = bl_size; + domain->os_info.fv.bootlist = tmp_str_arr; + + } else { + + CU_DEBUG("Failed to get BootDevices property"); + + tmp_str_arr = (char **)realloc(domain->os_info.fv.bootlist, + sizeof(char *)); + if (tmp_str_arr == NULL) + return 0; + + tmp_str_arr[0] = strdup("hd"); + + domain->os_info.fv.bootlist = tmp_str_arr; + domain->os_info.fv.bootlist_ct = 1; + } ret = cu_get_str_prop(inst, "Emulator", &val); if (ret != CMPI_RC_OK) From dayne.medlyn at hp.com Wed May 13 22:38:44 2009 From: dayne.medlyn at hp.com (Medlyn, Dayne (VSL - Ft Collins)) Date: Wed, 13 May 2009 22:38:44 +0000 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks in the Xen_Memory class represent? Message-ID: All, I am trying to understand the use of NumberOfBlocks and ConsumableBlocks in the Xen_Memory class, specifically for the Xen host. What I have noticed is that between libvirt-cim-0.4.1 and libvirt-cim-0.5.2 the values for NumberOfBlock is now different than ConsumableBlocks and much larger than the physical memory installed on the system. Is it the case that NumberOfBlocks represents the maximum possible blocks for the hardware, or some such number ConsumableBlocks is the memory that is actually installed in the system? On my system, however, NumberOfBlocks reports 16TB where /proc/meminfo reports 32Tb for VmallocTotal. In short, should I be using ConsumableBlocks to determine the total physical memory on the system? Thanks for your help. Dayne From kaitlin at linux.vnet.ibm.com Thu May 14 01:09:49 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Wed, 13 May 2009 18:09:49 -0700 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks in theXen_Memory class represent? In-Reply-To: References: Message-ID: <4A0B6F5D.4030700@linux.vnet.ibm.com> Medlyn, Dayne (VSL - Ft Collins) wrote: > All, > > I am trying to understand the use of NumberOfBlocks and ConsumableBlocks in the Xen_Memory class, specifically for the Xen host. > What I have noticed is that between libvirt-cim-0.4.1 and libvirt-cim-0.5.2 the values for NumberOfBlock is now different than ConsumableBlocks and > much larger than the physical memory installed on the system. > Is it the case that NumberOfBlocks represents the maximum possible blocks for the hardware, > or some such number ConsumableBlocks is the memory that is > actually installed in the system? On my system, however, NumberOfBlocks reports 16TB where /proc/meminfo > reports 32Tb for VmallocTotal. In short, should I be using ConsumableBlocks to determine the total physical memory on the system? Hi Dayne, It looks like there is a bug here. Currently, the providers use the following representation: NumberOfBlocks: max amount of memory that can be allocated to a guest ConsumableBlocks: current memory allocated to the guest However, these values should be reversed based on the attribute definitions. Here's an example using one of the guests on my system: # virsh dominfo rstest_domainId: - Name: rstest_domain UUID: 746de06d-cb45-4efd-bc18-bf91d10bec84 State: shut off CPU(s): 1 Max memory: 131072 kB Used memory: 130048 kB Autostart: disable We take the max and used memory values libvirt reports and then convert them based on the block size. # wbemcli gi 'http://localhost:5988/root/virt:Xen_Memory.CreationClassName="Xen_Memory",DeviceID="rstest_domain/mem",SystemCreationClassName="Xen_ComputerSystem",SystemName="rstest_domain"' -nl localhost:5988/root/virt:Xen_Memory.CreationClassName="Xen_Memory",DeviceID="rstest_domain/mem",SystemCreationClassName="Xen_ComputerSystem",SystemName="rstest_domain" -TransitioningToState=12 -SystemCreationClassName="Xen_ComputerSystem" -SystemName="rstest_domain" -CreationClassName="Xen_Memory" -DeviceID="rstest_domain/mem" -BlockSize=4096 -NumberOfBlocks=32768 -ConsumableBlocks=32512 -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Thu May 14 01:34:05 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Wed, 13 May 2009 18:34:05 -0700 Subject: [Libvirt-cim] [PATCH 2 of 2] Add boot order support In-Reply-To: References: Message-ID: <4A0B750D.8020908@linux.vnet.ibm.com> > + > + tmp_list = (char **)realloc(blist, > + (bl_size+1) * Style-wise, the rest of the code uses (bl_size + 1). > diff -r c0bd6c9a2c00 -r e3ea69df8ae9 libxkutil/xml_parse_test.c > --- a/libxkutil/xml_parse_test.c Mon May 11 16:47:15 2009 -0300 > +++ b/libxkutil/xml_parse_test.c Wed May 13 14:55:21 2009 -0300 > @@ -28,6 +28,7 @@ > static void print_os(struct domain *dom, > FILE *d) > { > + int i; > > if (dom->type == DOMAIN_XENPV) { > print_value(d, "Domain Type", "Xen PV"); > @@ -39,13 +40,18 @@ > print_value(d, "Domain Type", "Xen FV"); > print_value(d, "Type", dom->os_info.fv.type); > print_value(d, "Loader", dom->os_info.fv.loader); > - print_value(d, "Boot", dom->os_info.fv.boot); > > + for (i = 0; i < dom->os_info.fv.bootlist_size; i++) { This should be bootlist_ct. > + print_value(d, "Boot", dom->os_info.fv.bootlist[i]); > + } > } else if ((dom->type == DOMAIN_KVM) || (dom->type == DOMAIN_QEMU)) { > print_value(d, "Domain Type", "KVM/QEMU"); > print_value(d, "Type", dom->os_info.fv.type); > print_value(d, "Loader", dom->os_info.fv.loader); > - print_value(d, "Boot", dom->os_info.fv.boot); > + > + for (i = 0; i < dom->os_info.fv.bootlist_size; i++) { Same here. > + } > + > + for (i = 0; i < bl_size; i++) { > + CMPIString *cm_str; > + > + cm_str = CMNewString(_BROKER, > + (const char *)dominfo->os_info.fv.bootlist[i], > + &s); > + if (s.rc != CMPI_RC_OK) { > + CU_DEBUG("Error adding item to BootDevice " > + "list"); > + continue; > + } > + > + s = CMSetArrayElementAt(array, > + i, > + cm_str, This should be (CMPIValue *)&cm_str > + CMPI_string); > + if (s.rc != CMPI_RC_OK) > + CU_DEBUG("Error setting BootDevice array " > + "element"); > + } > + > + s = CMSetProperty(inst, > + "BootDevices", > + (CMPIValue *)array, This should be (CMPIValue *)&array > + > + } else { > + > + CU_DEBUG("Failed to get BootDevices property"); > + > + tmp_str_arr = (char **)realloc(domain->os_info.fv.bootlist, > + sizeof(char *)); > + if (tmp_str_arr == NULL) > + return 0; > + > + tmp_str_arr[0] = strdup("hd"); > + > + domain->os_info.fv.bootlist = tmp_str_arr; > + domain->os_info.fv.bootlist_ct = 1; > + } > Since you've added this here, I would remove the code that adds a default boot device in xmlgen - that allows you to remove some duplicate code. Plus, this follows convention - most of the defaults are set in VSMS - not in xmlgen itself. -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Thu May 14 05:49:16 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Wed, 13 May 2009 22:49:16 -0700 Subject: [Libvirt-cim] [PATCH] Set the InitPath for LXC guests Message-ID: <1a625ee4280f4f55fbed.1242280156@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242279945 25200 # Node ID 1a625ee4280f4f55fbed04c2eeb973e3af3e5816 # Parent 032d34765a83b2f6784fd33e488b8829f3391603 Set the InitPath for LXC guests This was missed when InitPath support for LXC guests was added. Signed-off-by: Kaitlin Rupert diff -r 032d34765a83 -r 1a625ee4280f src/Virt_VSSD.c --- a/src/Virt_VSSD.c Wed May 13 07:40:47 2009 -0700 +++ b/src/Virt_VSSD.c Wed May 13 22:45:45 2009 -0700 @@ -88,6 +88,16 @@ CMPI_chars); } +static void _set_lxc_prop(struct domain *dominfo, + CMPIInstance *inst) +{ + if (dominfo->os_info.lxc.init != NULL) + CMSetProperty(inst, + "InitPath", + (CMPIValue *)dominfo->os_info.lxc.init, + CMPI_chars); +} + static int instance_from_dom(virDomainPtr dom, CMPIInstance *inst) { @@ -151,6 +161,8 @@ _set_fv_prop(dominfo, inst); else if (dominfo->type == DOMAIN_XENPV) _set_pv_prop(dominfo, inst); + else if (dominfo->type == DOMAIN_LXC) + _set_lxc_prop(dominfo, inst); else CU_DEBUG("Unknown domain type %i for creating VSSD", dominfo->type); From yunguol at cn.ibm.com Thu May 14 07:43:29 2009 From: yunguol at cn.ibm.com (yunguol at cn.ibm.com) Date: Thu, 14 May 2009 00:43:29 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Add general functions for pool verification Message-ID: # HG changeset patch # User Guolian Yun # Date 1242287001 25200 # Node ID fe5b8e3ff97aa7892c050b2642838ca2d0840a5e # Parent 9391439d65e7ec6b88f34923d97f969c6114a237 [TEST] #2 Add general functions for pool verification Tested for KVM with current sources Signed-off-by: Guolian Yun diff -r 9391439d65e7 -r fe5b8e3ff97a suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Wed May 13 07:28:33 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Thu May 14 00:43:21 2009 -0700 @@ -21,15 +21,23 @@ # import sys -from CimTest.Globals import logger +from CimTest.Globals import logger, CIM_NS from CimTest.ReturnCodes import PASS, FAIL -from XenKvmLib.classes import get_typed_class +from XenKvmLib.classes import get_typed_class, inst_to_mof from XenKvmLib.const import get_provider_version, default_pool_name -from XenKvmLib.enumclass import EnumInstances +from XenKvmLib.enumclass import EnumInstances, GetInstance +from XenKvmLib.assoc import Associators from VirtLib.utils import run_remote -from XenKvmLib.xm_virt_util import virt2uri +from XenKvmLib.xm_virt_util import virt2uri, net_list +from XenKvmLib import rpcs_service +import pywbem +from CimTest.CimExt import CIMClassMOF +from XenKvmLib.vxml import NetXML +cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED +cim_mname = "CreateChildResourcePool" input_graphics_pool_rev = 757 +libvirt_cim_child_pool_rev = 837 def pool_cn_to_rasd_cn(pool_cn, virt): if pool_cn.find('ProcessorPool') >= 0: @@ -97,3 +105,118 @@ return volume +def get_pool_rasds(server, virt): + net_pool_rasds = [] + + ac_cn = get_typed_class(virt, "AllocationCapabilities") + an_cn = get_typed_class(virt, "SettingsDefineCapabilities") + key_list = {"InstanceID" : "NetworkPool/0" } + + try: + inst = GetInstance(server, ac_cn, key_list) + rasd = Associators(server, an_cn, ac_cn, InstanceID=inst.InstanceID) + except Exception, detail: + logger.error("Exception: %s", detail) + return None + + for item in rasd: + if item['InstanceID'] == "Default": + net_pool_rasds.append(item) + + return net_pool_rasds + +def net_undefine(network, server, virt="Xen"): + """Function undefine a given virtual network""" + + cmd = "virsh -c %s net-undefine %s" % (virt2uri(virt), network) + ret, out = run_remote(server, cmd) + + return ret + +def undefine_netpool(server, virt, net_name): + if net_name == None: + return FAIL + + ret = net_undefine(net_name, server, virt) + if ret != 0: + logger.error("Failed to undefine Virtual Network '%s'", net_name) + return FAIL + + return PASS + +def create_netpool(server, virt, test_pool, pool_attr_list): + status = PASS + rpcs = get_typed_class(virt, "ResourcePoolConfigurationService") + rpcs_conn = eval("rpcs_service." + rpcs)(server) + curr_cim_rev, changeset = get_provider_version(virt, server) + if curr_cim_rev < libvirt_cim_child_pool_rev: + try: + rpcs_conn.CreateChildResourcePool() + except pywbem.CIMError, (err_no, desc): + if err_no == cim_errno : + logger.info("Got expected exception for '%s'service", cim_mname) + logger.info("Errno is '%s' ", err_no) + logger.info("Error string is '%s'", desc) + return PASS + else: + logger.error("Unexpected rc code %s and description %s\n", + err_no, desc) + return FAIL + elif curr_cim_rev >= libvirt_cim_child_pool_rev: + n_list = net_list(server, virt) + for _net_name in n_list: + net_xml = NetXML(server=server, networkname=_net_name, + virt=virt, is_new_net=False) + pool_use_attr = net_xml.xml_get_netpool_attr_list() + if pool_attr_list['Address'] in pool_use_attr: + logger.error("IP address is in use by a different network") + return FAIL + + net_pool_rasds = get_pool_rasds(server, virt) + if len(net_pool_rasds) == 0: + logger.error("We can not get NetPoolRASDs") + return FAIL + else: + net_pool_rasds[0]['PoolID'] = "NetworkPool/%s" % test_pool + for attr, val in pool_attr_list.iteritems(): + net_pool_rasds[0][attr] = val + + pool_settings = inst_to_mof(net_pool_rasds[0]) + + try: + rpcs_conn.CreateChildResourcePool(ElementName=test_pool, + Settings=[pool_settings]) + except Exception, details: + logger.error("Error in childpool creation") + logger.error(details) + return FAIL + + return status + + +def verify_pool(server, virt, pooltype, poolname, pool_attr_list): + status = FAIL + pool_list = EnumInstances(server, pooltype) + if len(pool_list) < 1: + logger.error("Return %i instances, expected at least one instance", + len(pool_list)) + return FAIL + + poolid = "NetworkPool/%s" % poolname + for i in range(0, len(pool_list)): + ret_pool = pool_list[i].InstanceID + if ret_pool != poolid: + continue + + net_xml = NetXML(server, virt=virt, networkname=poolname, + is_new_net=False) + ret_pool_attr_list = net_xml.xml_get_netpool_attr_list() + + for i in range(0, len(ret_pool_attr_list)): + if ret_pool_attr_list[i] not in pool_attr_list.itervalues(): + logger.error("Got error when parsing %s", ret_pool_attr_list[i]) + return FAIL + + status = PASS + + return status diff -r 9391439d65e7 -r fe5b8e3ff97a suites/libvirt-cim/lib/XenKvmLib/vxml.py --- a/suites/libvirt-cim/lib/XenKvmLib/vxml.py Wed May 13 07:28:33 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/vxml.py Thu May 14 00:43:21 2009 -0700 @@ -197,7 +197,8 @@ def __init__(self, server, bridgename=const.default_bridge_name, networkname=const.default_network_name, - virt='xen'): + virt='xen', + is_new_net=True): def get_valid_bridge_name(server): bridge_list = live.available_bridges(server) @@ -222,6 +223,17 @@ self.net_name = networkname self.server = server + if is_new_net is False: + cmd = "virsh net-dumpxml %s" % self.net_name + s, net_xml = utils.run_remote(server, cmd) + if s != 0: + logger.error("Encounter error dump netxml") + return None + else: + self.xml_string = net_xml + self.xdoc = minidom.parseString(self.xml_string) + return + network = self.add_sub_node(self.xdoc, 'network') self.add_sub_node(network, 'name', self.net_name) self.add_sub_node(network, 'uuid', set_uuid()) @@ -259,6 +271,22 @@ npoolname = self.get_value_xpath('/network/name') return npoolname + def xml_get_netpool_attr_list(self): + pool_attr_list = [] + + npoolmode = self.get_value_xpath('/network/forward/@mode') + npooladdr = self.get_value_xpath('/network/ip/@address') + npoolmask = self.get_value_xpath('/network/ip/@netmask') + npoolstart = self.get_value_xpath('/network/ip/dhcp/range/@start') + npoolend = self.get_value_xpath('/network/ip/dhcp/range/@end') + + pool_attr_list.append(npoolmode) + pool_attr_list.append(npooladdr) + pool_attr_list.append(npoolmask) + pool_attr_list.append(npoolstart) + pool_attr_list.append(npoolend) + + return pool_attr_list class PoolXML(Virsh, XMLClass): From yunguol at cn.ibm.com Thu May 14 07:50:24 2009 From: yunguol at cn.ibm.com (yunguol at cn.ibm.com) Date: Thu, 14 May 2009 00:50:24 -0700 Subject: [Libvirt-cim] [PATCH] [TEST]Update RPCS/04 with the latest updates of pool verification Message-ID: # HG changeset patch # User Guolian Yun # Date 1242287412 25200 # Node ID e1ca990097ed08771ba19aa8d6ac21af16c10a65 # Parent 9391439d65e7ec6b88f34923d97f969c6114a237 [TEST]Update RPCS/04 with the latest updates of pool verification Tested for KVM with current sources Signed-off-by: Guolian Yun diff -r 9391439d65e7 -r e1ca990097ed suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Wed May 13 07:28:33 2009 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Thu May 14 00:50:12 2009 -0700 @@ -39,45 +39,59 @@ # OUT -- Error -- String -- Encoded error instance if the operation # failed and did not return a job # -# REVISIT : -# -------- -# As of now the CreateChildResourcePool() simply throws an Exception. -# We must improve this tc once the service is implemented. -# -# -Date: 20.02.2008 - +# Exception details before Revision 837 +# ----- +# Error code: CIM_ERR_NOT_SUPPORTED +# +# After revision 837, the service is implemented +# +# -Date: 20.02.2008 import sys -import pywbem -from XenKvmLib import rpcs_service from CimTest.Globals import logger from CimTest.ReturnCodes import FAIL, PASS from XenKvmLib.const import do_main, platform_sup from XenKvmLib.classes import get_typed_class +from XenKvmLib.common_util import destroy_netpool +from XenKvmLib.pool import create_netpool, verify_pool -cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED -cim_mname = "CreateChildResourcePool" +test_pool = "testpool" +test_mode = ["nat"] @do_main(platform_sup) def main(): + status = PASS options = main.options - rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \ - "ResourcePoolConfigurationService"))(options.ip) - try: - rpcs_conn.CreateChildResourcePool() - except pywbem.CIMError, (err_no, desc): - if err_no == cim_errno : - logger.info("Got expected exception for '%s' service", cim_mname) - logger.info("Errno is '%s' ", err_no) - logger.info("Error string is '%s'", desc) - return PASS - else: - logger.error("Unexpected rc code %s and description %s\n", - err_no, desc) + + np = get_typed_class(options.virt, 'NetworkPool') + np_id = "NetworkPool/%s" % test_pool + pool_attr = { + "Address" : "192.168.0.8", + "Netmask" : "255.255.255.0", + "IPRangeStart" : "192.168.0.9", + "IPRangeEnd" : "192.168.0.15" + } + for i in range(0, len(test_mode)): + pool_attr["ForwardMode"] = test_mode[i] + status = create_netpool(options.ip, options.virt, + test_pool, pool_attr) + if status != PASS: + logger.error("Error in networkpool creation") return FAIL - - logger.error("The execution should not have reached here!!") - return FAIL + + status = verify_pool(options.ip, options.virt, np, + test_pool, pool_attr) + if status != PASS: + logger.error("Error in networkpool verification") + destroy_netpool(options.ip, options.virt, test_pool) + return FAIL + + status = destroy_netpool(options.ip, options.virt, test_pool) + if status != PASS: + logger.error("Unable to destroy networkpool %s", test_pool) + return FAIL + + return status + if __name__ == "__main__": sys.exit(main()) - From yunguol at cn.ibm.com Thu May 14 08:52:57 2009 From: yunguol at cn.ibm.com (yunguol at cn.ibm.com) Date: Thu, 14 May 2009 01:52:57 -0700 Subject: [Libvirt-cim] [PATCH] [TEST]Update RPCS/07 with the latest updates of pool verification Message-ID: # HG changeset patch # User Guolian Yun # Date 1242291170 25200 # Node ID f899232889810985dadbaabeed1198bad05edde2 # Parent 9391439d65e7ec6b88f34923d97f969c6114a237 [TEST]Update RPCS/07 with the latest updates of pool verification Tested for KVM with current sources Signed-off-by: Guolian Yun diff -r 9391439d65e7 -r f89923288981 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py Wed May 13 07:28:33 2009 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py Thu May 14 01:52:50 2009 -0700 @@ -33,10 +33,12 @@ # OUT -- Job -- CIM_ConcreteJob REF -- Returned job if started # OUT -- Error-- String -- Encoded error instance if the operation # failed and did not return a job. -# REVISIT : -# -------- -# As of now the DeleteResourcePool() simply throws an Exception. -# We must improve this tc once the service is implemented. +# +# Exception details before Revision 841 +# ----- +# Error code: CIM_ERR_NOT_SUPPORTED +# +# After revision 841, the service is implemented # # -Date: 20.02.2008 @@ -46,32 +48,79 @@ from XenKvmLib import rpcs_service from CimTest.Globals import logger from CimTest.ReturnCodes import FAIL, PASS -from XenKvmLib.const import do_main, platform_sup +from XenKvmLib.const import do_main, platform_sup, get_provider_version +from XenKvmLib.enumclass import EnumInstances, EnumNames from XenKvmLib.classes import get_typed_class +from XenKvmLib.common_util import destroy_netpool +from XenKvmLib.pool import create_netpool, verify_pool cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED cim_mname = "DeleteResourcePool" +libvirt_cim_child_pool_rev = 841 +test_pool = "nat_pool" @do_main(platform_sup) def main(): + status = FAIL options = main.options rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \ "ResourcePoolConfigurationService"))(options.ip) - try: - rpcs_conn.DeleteResourcePool() - except pywbem.CIMError, (err_no, desc): - if err_no == cim_errno : - logger.info("Got expected exception for '%s' service", cim_mname) - logger.info("Errno is '%s' ", err_no) - logger.info("Error string is '%s'", desc) - return PASS - else: - logger.error("Unexpected rc code %s and description %s\n", - err_no, desc) + curr_cim_rev, changeset = get_provider_version(options.virt, options.ip) + if curr_cim_rev < libvirt_cim_child_pool_rev: + try: + rpcs_conn.DeleteResourcePool() + except pywbem.CIMError, (err_no, desc): + if err_no == cim_errno : + logger.info("Got expected exception for '%s' service", cim_mname) + logger.info("Errno is '%s' ", err_no) + logger.info("Error string is '%s'", desc) + return PASS + else: + logger.error("Unexpected rc code %s and description %s\n", + err_no, desc) + return FAIL + elif curr_cim_rev >= libvirt_cim_child_pool_rev: + pool_attr = { + "Address" : "192.168.0.8", + "Netmask" : "255.255.255.0", + "IPRangeStart" : "192.168.0.9", + "IPRangeEnd" : "192.168.0.15", + "ForwardMode" : "nat" + } + np = get_typed_class(options.virt, 'NetworkPool') + np_id = "NetworkPool/%s" % test_pool + + status = create_netpool(options.ip, options.virt, test_pool, pool_attr) + if status != PASS: + logger.error("Error in networkpool creation") return FAIL - - logger.error("The execution should not have reached here!!") - return FAIL + + status = verify_pool(options.ip, options.virt, np, + test_pool, pool_attr) + if status != PASS: + logger.error("Error in networkpool verification") + destroy_netpool(options.ip, options.virt, test_pool) + return FAIL + + netpool = EnumNames(options.ip, np) + for i in range(0, len(netpool)): + ret_pool = netpool[i].keybindings['InstanceID'] + if ret_pool == np_id: + pool_settings = netpool[i] + break + try: + rpcs_conn.DeleteResourcePool(Pool = pool_settings) + netpool = EnumInstances(options.ip, np) + for i in range(0, len(netpool)): + ret_pool = netpool[i].InstanceID + if ret_pool == np_id: + raise Exception("Failed to delete %s" % test_pool) + status = PASS + except Exception, details: + logger.error(details) + return FAIL + + return status + if __name__ == "__main__": sys.exit(main()) - From deeptik at linux.vnet.ibm.com Thu May 14 08:59:55 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Thu, 14 May 2009 14:29:55 +0530 Subject: [Libvirt-cim] [PATCH 2 of 2] Add VSSS 03_create_snapshot.py In-Reply-To: <3c03fd52e4400d111720.1242082900@localhost.localdomain> References: <3c03fd52e4400d111720.1242082900@localhost.localdomain> Message-ID: <4A0BDD8B.9030004@linux.vnet.ibm.com> Just minor comments... Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1242082293 25200 > # Node ID 3c03fd52e4400d1117203c47a50ca1b8e75bf5f3 > # Parent 61d1a7aa49471be6604552efc2dba8491ccd0ad7 > Add VSSS 03_create_snapshot.py > > This test case attempts a guest snapshot and verifies the results. > > Signed-off-by: Kaitlin Rupert > > diff -r 61d1a7aa4947 -r 3c03fd52e440 suites/libvirt-cim/cimtest/VirtualSystemSnapshotService/03_create_snapshot.py > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > +++ b/suites/libvirt-cim/cimtest/VirtualSystemSnapshotService/03_create_snapshot.py Mon May 11 15:51:33 2009 -0700 > @@ -0,0 +1,140 @@ > +#!/usr/bin/python > +# > +# Copyright 2009 IBM Corp. > +# > +# Authors: > +# Kaitlin Rupert > +# > +# This library is free software; you can redistribute it and/or > +# modify it under the terms of the GNU General Public > +# License as published by the Free Software Foundation; either > +# version 2.1 of the License, or (at your option) any later version. > +# > +# This library is distributed in the hope that it will be useful, > +# but WITHOUT ANY WARRANTY; without even the implied warranty of > +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > +# General Public License for more details. > +# > +# You should have received a copy of the GNU General Public > +# License along with this library; if not, write to the Free Software > +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA > +# > A one line desc of the test case would be good. > + > +import sys > +from pywbem import CIM_ERR_FAILED, cim_types > CIM_ERR_FAILED is not needed. > +from CimTest.Globals import logger > +from CimTest.ReturnCodes import PASS, FAIL > +from XenKvmLib.const import do_main > +from XenKvmLib.vxml import get_class > +from XenKvmLib.classes import get_typed_class, inst_to_mof > +from XenKvmLib.enumclass import EnumNames, EnumInstances, GetInstance > +from XenKvmLib.vsss import remove_snapshot > + > +sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] > + > +SNAPSHOT = cim_types.Uint16(32769) > +test_dom = "snapshot_vm" > + > +def get_cs_ref(virt, ip): > + cs_cn = get_typed_class(virt, "ComputerSystem") > + > + cs_refs = EnumNames(ip, cs_cn) > + if cs_refs is None or len(cs_refs) < 1: > + logger.error("Exp at least one domain defined on the system") > + return FAIL, None > + > + cs_ref = None > + for ref in cs_refs: > + if ref['Name'] == test_dom: > + cs_ref = ref > + break > + > + if cs_ref is None: > + logger.error("Enum of %s didn't return %s", cs_cn, test_dom) > + return FAIL, None > + > + return PASS, cs_ref > + > +def get_vsssc_inst(virt, ip): > + vsssc_cn = get_typed_class(virt, "VirtualSystemSnapshotServiceCapabilities") > + > + vsssc_insts = EnumInstances(ip, vsssc_cn, ret_cim_inst=True) > + if vsssc_insts is None or len(vsssc_insts) < 1: > + logger.error("Exp at least one %s", vsssc_cn) > + return FAIL, None > + > + vsssc = vsssc_insts[0] > + > + #Override the additional instance values. We only care about the key > + #values (eventhough CreateSnapshot takes a instance) > + vsssc['SynchronousMethodsSupported'] = "" > + vsssc['SnapshotTypesSupported'] = "" > + > + vsssc = inst_to_mof(vsssc) > + > + return PASS, vsssc > + > + at do_main(sup_types) > +def main(): > + options = main.options > + > + cxml = get_class(options.virt)(test_dom) > + > + try: > + ret = cxml.cim_define(options.ip) > + if not ret: > + raise Exception("Unable to define %s", test_dom) > + > + status = cxml.cim_start(options.ip) > + if status != PASS: > + raise Exception("Failed to start the defined domain: %s" % test_dom) > + > + status, cs_ref = get_cs_ref(options.virt, options.ip) > + if status != PASS: > + raise Exception("Unable to get reference for %s" % test_dom) > + > + status, vsssc = get_vsssc_inst(options.virt, options.ip) > + if status != PASS: > + raise Exception("Unable to get VSSSC instance") > + > + vsss_cn = get_typed_class(options.virt, "VirtualSystemSnapshotService") > + vsss_refs = EnumNames(options.ip, vsss_cn) > + if vsss_refs is None or len(vsss_refs) < 1: > + raise Exception("Exp at least one %s" % vsss_cn) > + > + service = vsss_refs[0] > + keys = { 'Name' : service['Name'], > + 'CreationClassName' : service['CreationClassName'], > + 'SystemCreationClassName' : service['SystemCreationClassName'], > + 'SystemName' : service['SystemName'] > + } > + service = GetInstance(options.ip, vsss_cn, keys) > + > + output = service.CreateSnapshot(AffectedSystem=cs_ref, > + SnapshotSettings=vsssc, > + SnapshotType=SNAPSHOT) > + > + ret = output[0] > + if ret != 0: > + raise Exception("Snapshot of %s failed!" % test_dom) > + > + if output[1]['Job'] is None: > + raise Exception("CreateSnapshot failed to return a CIM job inst") > + > + if output[1]['ResultingSnapshot'] is None: > + raise Exception("CreateSnapshot failed to return ResultingSnapshot") > + > + except Exception, detail: > + logger.error("Exception: %s", detail) > + status = FAIL > + > + cxml.cim_destroy(options.ip) > + cxml.undefine(options.ip) > + > + remove_snapshot(options.ip, test_dom) > + > + return status > + > +if __name__ == "__main__": > + sys.exit(main()) > + > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim > -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Thu May 14 10:51:39 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Thu, 14 May 2009 16:21:39 +0530 Subject: [Libvirt-cim] [PATCH] [TEST]Update RPCS/04 with the latest updatesof pool verification In-Reply-To: References: Message-ID: <4A0BF7BB.20502@linux.vnet.ibm.com> yunguol at cn.ibm.com wrote: > # HG changeset patch > # User Guolian Yun > # Date 1242287412 25200 > # Node ID e1ca990097ed08771ba19aa8d6ac21af16c10a65 > # Parent 9391439d65e7ec6b88f34923d97f969c6114a237 > [TEST]Update RPCS/04 with the latest updates of pool verification > > > Tested for KVM with current sources > Signed-off-by: Guolian Yun > > diff -r 9391439d65e7 -r e1ca990097ed suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py > --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Wed May 13 07:28:33 2009 -0700 > +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Thu May 14 00:50:12 2009 -0700 > @@ -39,45 +39,59 @@ > # OUT -- Error -- String -- Encoded error instance if the operation > # failed and did not return a job > # > -# REVISIT : > -# -------- > -# As of now the CreateChildResourcePool() simply throws an Exception. > -# We must improve this tc once the service is implemented. > -# > -# -Date: 20.02.2008 > - > +# Exception details before Revision 837 > +# ----- > +# Error code: CIM_ERR_NOT_SUPPORTED > +# > +# After revision 837, the service is implemented > +# > +# -Date: 20.02.2008 > > import sys > -import pywbem > -from XenKvmLib import rpcs_service > from CimTest.Globals import logger > from CimTest.ReturnCodes import FAIL, PASS > from XenKvmLib.const import do_main, platform_sup > from XenKvmLib.classes import get_typed_class > +from XenKvmLib.common_util import destroy_netpool > +from XenKvmLib.pool import create_netpool, verify_pool > > -cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED > -cim_mname = "CreateChildResourcePool" > +test_pool = "testpool" > +test_mode = ["nat"] > > Why is routed and isolated mode not included. > @do_main(platform_sup) > def main(): > + status = PASS > options = main.options > - rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \ > - "ResourcePoolConfigurationService"))(options.ip) > - try: > - rpcs_conn.CreateChildResourcePool() > - except pywbem.CIMError, (err_no, desc): > - if err_no == cim_errno : > - logger.info("Got expected exception for '%s' service", cim_mname) > - logger.info("Errno is '%s' ", err_no) > - logger.info("Error string is '%s'", desc) > - return PASS > - else: > - logger.error("Unexpected rc code %s and description %s\n", > - err_no, desc) > + > + np = get_typed_class(options.virt, 'NetworkPool') > + np_id = "NetworkPool/%s" % test_pool > + pool_attr = { > + "Address" : "192.168.0.8", > + "Netmask" : "255.255.255.0", > + "IPRangeStart" : "192.168.0.9", > + "IPRangeEnd" : "192.168.0.15" > + } > The test case failed saying the IP was already in use. I know this is not a test case issue but, using a random generated IP address is always better than hardcoding it. you can use something like this to create one: ip_base = random.randint(1, 100) addr = subnet+'%d' % ip_base > + for i in range(0, len(test_mode)): > + pool_attr["ForwardMode"] = test_mode[i] > + status = create_netpool(options.ip, options.virt, > + test_pool, pool_attr) > + if status != PASS: > + logger.error("Error in networkpool creation") > return FAIL > - > - logger.error("The execution should not have reached here!!") > - return FAIL > + > + status = verify_pool(options.ip, options.virt, np, > + test_pool, pool_attr) > + if status != PASS: > + logger.error("Error in networkpool verification") > + destroy_netpool(options.ip, options.virt, test_pool) > + return FAIL > + > + status = destroy_netpool(options.ip, options.virt, test_pool) > + if status != PASS: > + logger.error("Unable to destroy networkpool %s", test_pool) > + return FAIL > Need to undefine the testpool before exiting > + > + return status > + > if __name__ == "__main__": > sys.exit(main()) > - > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim > -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Thu May 14 10:53:35 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Thu, 14 May 2009 16:23:35 +0530 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Add general functions for poolverification In-Reply-To: References: Message-ID: <4A0BF82F.3050506@linux.vnet.ibm.com> +1 -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Thu May 14 10:58:34 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Thu, 14 May 2009 16:28:34 +0530 Subject: [Libvirt-cim] [PATCH] [TEST]Update RPCS/07 with the latest updatesof pool verification In-Reply-To: References: Message-ID: <4A0BF95A.5070307@linux.vnet.ibm.com> +1 -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From rmaciel at linux.vnet.ibm.com Thu May 14 15:28:45 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Thu, 14 May 2009 12:28:45 -0300 Subject: [Libvirt-cim] [PATCH] This fix the generation of the tag on fully-virtualizable Xen guests Message-ID: # HG changeset patch # User Richard Maciel # Date 1242071235 10800 # Node ID c0bd6c9a2c0084398784bb1ae36649bd3400e36c # Parent 5608b9455cd32fccbc324cd540c509d7230a113f This fix the generation of the tag on fully-virtualizable Xen guests. Right now it is generated with the boot device as a the value of the node (e.g. hd) However, the boot device must be a property of the node (e.g. ) Signed-off-by: Richard Maciel diff -r 5608b9455cd3 -r c0bd6c9a2c00 libxkutil/xmlgen.c --- a/libxkutil/xmlgen.c Mon Apr 27 17:05:48 2009 -0700 +++ b/libxkutil/xmlgen.c Mon May 11 16:47:15 2009 -0300 @@ -457,10 +457,12 @@ if (tmp == NULL) return XML_ERROR; - tmp = xmlNewChild(root, NULL, BAD_CAST "boot", BAD_CAST os->boot); + tmp = xmlNewChild(root, NULL, BAD_CAST "boot", NULL); if (tmp == NULL) return XML_ERROR; + xmlNewProp(tmp, BAD_CAST "dev", BAD_CAST os->boot); + tmp = xmlNewChild(root, NULL, BAD_CAST "features", NULL); xmlNewChild(tmp, NULL, BAD_CAST "pae", NULL); xmlNewChild(tmp, NULL, BAD_CAST "acpi", NULL); From dayne.medlyn at hp.com Thu May 14 19:46:25 2009 From: dayne.medlyn at hp.com (Medlyn, Dayne (VSL - Ft Collins)) Date: Thu, 14 May 2009 19:46:25 +0000 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks in the Xen_Memory class represent? In-Reply-To: <4A0B6F5D.4030700@linux.vnet.ibm.com> References: <4A0B6F5D.4030700@linux.vnet.ibm.com> Message-ID: Kaitlin, Thanks for the correction. It seems were trying to use these properties correctly and there is something just not right. Using the same wbemcli command I get: -SystemCreationClassName="Xen_ComputerSystem" -SystemName="Domain-0" -CreationClassName="Xen_Memory" -DeviceID="Domain-0/mem" -ConsumableBlocks=1717760 -NumberOfBlocks=4294967040 -BlockSize=4096 One difference I did notice is that we are trying to use these values from Dom0 to determine the amount of available memory for guests to use. Perhaps for Dom0 these values just map differently. My objective is to identify how much memory is available on the hypervisor that can be allocated to new guests. Looking more closely, I wonder if we should be using Xen_MemoryPool somehow to do this instead. What is the relationship between the Capacity and Reserved properties? I have not quite been able to make sense out of what these values mean. What I have noticed is that a host with no defined guests starts with Reserved smaller than Capacity: -PoolID="MemoryPool/0" -Primordial=FALSE -Capacity=8385536 -Reserved=8064748 -ResourceType=4 -OtherResourceType= -ResourceSubType= -AllocationUnits="KiloBytes" As guests are create and start the Reserved count increases and grows beyond the capacity. I am not quite sure how to make use of this information. Do you have any insights? Dayne > -----Original Message----- > From: libvirt-cim-bounces at redhat.com [mailto:libvirt-cim- > bounces at redhat.com] On Behalf Of Kaitlin Rupert > Sent: Wednesday, May 13, 2009 7:10 PM > To: List for discussion and development of libvirt CIM > Subject: Re: [Libvirt-cim] What does NumberOfBlocks and > ConsumableBlocks in theXen_Memory class represent? > > Medlyn, Dayne (VSL - Ft Collins) wrote: > > All, > > > > I am trying to understand the use of NumberOfBlocks and > ConsumableBlocks in the Xen_Memory class, specifically for the Xen > host. > > What I have noticed is that between libvirt-cim-0.4.1 and libvirt- > cim-0.5.2 the values for NumberOfBlock is now different than > ConsumableBlocks and > > much larger than the physical memory installed on the system. > > Is it the case that NumberOfBlocks represents the maximum possible > blocks for the hardware, > > or some such number ConsumableBlocks is the memory that is > > actually installed in the system? On my system, however, > NumberOfBlocks reports 16TB where /proc/meminfo > > reports 32Tb for VmallocTotal. In short, should I be using > ConsumableBlocks to determine the total physical memory on the system? > > > Hi Dayne, > > It looks like there is a bug here. Currently, the providers use the > following representation: > > NumberOfBlocks: max amount of memory that can be allocated to a guest > ConsumableBlocks: current memory allocated to the guest > > However, these values should be reversed based on the attribute > definitions. > > Here's an example using one of the guests on my system: > > # virsh dominfo rstest_domainId: - > Name: rstest_domain > UUID: 746de06d-cb45-4efd-bc18-bf91d10bec84 > State: shut off > CPU(s): 1 > Max memory: 131072 kB > Used memory: 130048 kB > Autostart: disable > > We take the max and used memory values libvirt reports and then convert > them based on the block size. > > # wbemcli gi > 'http://localhost:5988/root/virt:Xen_Memory.CreationClassName="Xen_Memo > ry",DeviceID="rstest_domain/mem",SystemCreationClassName="Xen_ComputerS > ystem",SystemName="rstest_domain"' > -nl > localhost:5988/root/virt:Xen_Memory.CreationClassName="Xen_Memory",Devi > ceID="rstest_domain/mem",SystemCreationClassName="Xen_ComputerSystem",S > ystemName="rstest_domain" > > > -TransitioningToState=12 > -SystemCreationClassName="Xen_ComputerSystem" > -SystemName="rstest_domain" > -CreationClassName="Xen_Memory" > -DeviceID="rstest_domain/mem" > > > > -BlockSize=4096 > -NumberOfBlocks=32768 > -ConsumableBlocks=32512 > > > -- > Kaitlin Rupert > IBM Linux Technology Center > kaitlin at linux.vnet.ibm.com > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim From rmaciel at linux.vnet.ibm.com Thu May 14 20:51:03 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Thu, 14 May 2009 17:51:03 -0300 Subject: [Libvirt-cim] [PATCH] Add "disk" type storage pools In-Reply-To: References: Message-ID: <4A0C8437.2030902@linux.vnet.ibm.com> +1 Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1242062614 25200 > # Node ID e08c78615c3ec653c2979b4619b15d48a1c56d48 > # Parent 3d42e1423d027c04c104ff0be6d99b86c46d1257 > Add "disk" type storage pools. > > This storage pool type is a disk backed pool, instead of being a file or > directory backed pool. > > This is tricky to test - you'll need a free disk / partition that you can > mount. I've tested this using a spare LVM partition. > > Signed-off-by: Kaitlin Rupert > > diff -r 3d42e1423d02 -r e08c78615c3e src/Virt_ResourcePoolConfigurationService.c > --- a/src/Virt_ResourcePoolConfigurationService.c Mon Apr 27 17:05:48 2009 -0700 > +++ b/src/Virt_ResourcePoolConfigurationService.c Mon May 11 10:23:34 2009 -0700 > @@ -148,8 +148,8 @@ > pool->pool_info.disk.src_dir = NULL; > } > > -static const char *disk_fs_pool(CMPIInstance *inst, > - struct virt_pool *pool) > +static const char *disk_fs_or_disk_pool(CMPIInstance *inst, > + struct virt_pool *pool) > { > const char *val = NULL; > > @@ -195,19 +195,19 @@ > case DISK_POOL_DIR: > break; > case DISK_POOL_FS: > - msg = disk_fs_pool(inst, pool); > - if (msg != NULL) > - goto out; > + case DISK_POOL_DISK: > + msg = disk_fs_or_disk_pool(inst, pool); > break; > case DISK_POOL_NETFS: > msg = disk_netfs_pool(inst, pool); > - if (msg != NULL) > - goto out; > break; > default: > return "Storage pool type not supported"; > } > > + if (msg != NULL) > + goto out; > + > pool->pool_info.disk.pool_type = type; > > if (cu_get_str_prop(inst, "Path", &val) != CMPI_RC_OK) > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Thu May 14 22:10:44 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 14 May 2009 15:10:44 -0700 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks inthe Xen_Memory class represent? In-Reply-To: References: < 4A0B6F5D.4030700@linux.vnet.ibm.com> Message-ID: <4A0C96E4.6060301@linux.vnet.ibm.com> Medlyn, Dayne (VSL - Ft Collins) wrote: > Kaitlin, > > Thanks for the correction. It seems were trying to use these properties correctly and there is something just not right. Using the same wbemcli command I get: > > -SystemCreationClassName="Xen_ComputerSystem" > -SystemName="Domain-0" > -CreationClassName="Xen_Memory" > -DeviceID="Domain-0/mem" > > -ConsumableBlocks=1717760 > -NumberOfBlocks=4294967040 > -BlockSize=4096 Yes, that's definitely a bug. The values for ConsumableBlocks and NumberOfBlocks should be swapped. I'd hoped to have a bugfix out today, but it looks like it'll be tomorrow. > > One difference I did notice is that we are trying to use these values from Dom0 to determine the amount of available memory for guests to use. Perhaps for Dom0 these values just map differently. > > My objective is to identify how much memory is available on the hypervisor that can be allocated to new guests. Looking more closely, I wonder if we should be using Xen_MemoryPool somehow to do this instead. What is the relationship between the Capacity and Reserved properties? I have not quite been able to make sense out of what these values mean. What I have noticed is that a host with no defined guests starts with Reserved smaller than Capacity: > > -PoolID="MemoryPool/0" > -Primordial=FALSE > -Capacity=8385536 > -Reserved=8064748 > -ResourceType=4 > -OtherResourceType= > -ResourceSubType= > -AllocationUnits="KiloBytes" > > As guests are create and start the Reserved count increases and grows beyond the capacity. I am not quite sure how to make use of this information. Do you have any insights? The Capacity value is the memory value libvirt reports for the host (you'd also get this value if you use: virsh nodeinfo). The Reserved value is the some of all the memory that is currently allocated to the guests on the system (as reported by libvirt). This includes guests that aren't running, which is why you are seeing the value grow beyond capacity. We don't represent the host capabilities, but in the case of Xen, you can get around that by pulling some things from Dom0. However, using Dom0's attribute may not give you the full picture you're looking for. I would suggest taking a look at a provider set that represents the host information. Something like the sblim-base providers should this info. > > > Dayne > > > >> -----Original Message----- >> From: libvirt-cim-bounces at redhat.com [mailto:libvirt-cim- >> bounces at redhat.com] On Behalf Of Kaitlin Rupert >> Sent: Wednesday, May 13, 2009 7:10 PM >> To: List for discussion and development of libvirt CIM >> Subject: Re: [Libvirt-cim] What does NumberOfBlocks and >> ConsumableBlocks in theXen_Memory class represent? >> >> Medlyn, Dayne (VSL - Ft Collins) wrote: >>> All, >>> >>> I am trying to understand the use of NumberOfBlocks and >> ConsumableBlocks in the Xen_Memory class, specifically for the Xen >> host. >>> What I have noticed is that between libvirt-cim-0.4.1 and libvirt- >> cim-0.5.2 the values for NumberOfBlock is now different than >> ConsumableBlocks and >> > much larger than the physical memory installed on the system. >>> Is it the case that NumberOfBlocks represents the maximum possible >> blocks for the hardware, >> > or some such number ConsumableBlocks is the memory that is >>> actually installed in the system? On my system, however, >> NumberOfBlocks reports 16TB where /proc/meminfo >>> reports 32Tb for VmallocTotal. In short, should I be using >> ConsumableBlocks to determine the total physical memory on the system? >> >> >> Hi Dayne, >> >> It looks like there is a bug here. Currently, the providers use the >> following representation: >> >> NumberOfBlocks: max amount of memory that can be allocated to a guest >> ConsumableBlocks: current memory allocated to the guest >> >> However, these values should be reversed based on the attribute >> definitions. >> >> Here's an example using one of the guests on my system: >> >> # virsh dominfo rstest_domainId: - >> Name: rstest_domain >> UUID: 746de06d-cb45-4efd-bc18-bf91d10bec84 >> State: shut off >> CPU(s): 1 >> Max memory: 131072 kB >> Used memory: 130048 kB >> Autostart: disable >> >> We take the max and used memory values libvirt reports and then convert >> them based on the block size. >> >> # wbemcli gi >> 'http://localhost:5988/root/virt:Xen_Memory.CreationClassName="Xen_Memo >> ry",DeviceID="rstest_domain/mem",SystemCreationClassName="Xen_ComputerS >> ystem",SystemName="rstest_domain"' >> -nl >> localhost:5988/root/virt:Xen_Memory.CreationClassName="Xen_Memory",Devi >> ceID="rstest_domain/mem",SystemCreationClassName="Xen_ComputerSystem",S >> ystemName="rstest_domain" >> >> >> -TransitioningToState=12 >> -SystemCreationClassName="Xen_ComputerSystem" >> -SystemName="rstest_domain" >> -CreationClassName="Xen_Memory" >> -DeviceID="rstest_domain/mem" >> >> >> >> -BlockSize=4096 >> -NumberOfBlocks=32768 >> -ConsumableBlocks=32512 >> >> >> -- >> Kaitlin Rupert >> IBM Linux Technology Center >> kaitlin at linux.vnet.ibm.com >> >> _______________________________________________ >> Libvirt-cim mailing list >> Libvirt-cim at redhat.com >> https://www.redhat.com/mailman/listinfo/libvirt-cim > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From dayne.medlyn at hp.com Thu May 14 23:17:21 2009 From: dayne.medlyn at hp.com (Medlyn, Dayne (VSL - Ft Collins)) Date: Thu, 14 May 2009 23:17:21 +0000 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks in the Xen_Memory class represent? In-Reply-To: <4A0C96E4.6060301@linux.vnet.ibm.com> References: < 4A0B6F5D.4030700@linux.vnet.ibm.com> <4A0C96E4.6060301@linux.vnet.ibm.com> Message-ID: > > Medlyn, Dayne (VSL - Ft Collins) wrote: > > Kaitlin, > > > > Thanks for the correction. It seems were trying to use these > properties correctly and there is something just not right. Using the > same wbemcli command I get: > > > > -SystemCreationClassName="Xen_ComputerSystem" > > -SystemName="Domain-0" > > -CreationClassName="Xen_Memory" > > -DeviceID="Domain-0/mem" > > > > -ConsumableBlocks=1717760 > > -NumberOfBlocks=4294967040 > > -BlockSize=4096 > > Yes, that's definitely a bug. The values for ConsumableBlocks and > NumberOfBlocks should be swapped. I'd hoped to have a bugfix out > today, > but it looks like it'll be tomorrow. I think there is something else going on here. As far as I can tell the numbers are not swapped. Here is what I think is going on. The NumberOfBlock x BlockSize equates to about 16TB (not what my system has - my system only has 8GB). If you look at virsh dominfo Domain-0 on my box I get: Max memory: no limit Used memory: 6595584 kB If NumberOfBlocks is MaxInt (of some sort) than this would make some sort of sense and ConsumableBlocks contains the correct value. It is a little bit misleading since processes in Dom0 take memory away from what can be allocated to a DomU. This is evident from this /proc/meminfo snippet: MemTotal: 6595584 kB MemFree: 5940684 kB Buffers: 16112 kB Cached: 264716 kB SwapCached: 0 kB Active: 138332 kB Inactive: 216032 kB SwapTotal: 2626544 kB SwapFree: 2626544 kB Dirty: 436 kB ... VmallocTotal: 34359738367 kB VmallocUsed: 267184 kB VmallocChunk: 34359470859 kB DirectMap4k: 8066284 kB DirectMap2M: 0 kB The current WBEM values: -ConsumableBlocks=1648896 -NumberOfBlocks=4294967040 -BlockSize=4096 Currently, on this system ConsumableBlocks represent MemTotal or the current memory allocated to the guest, even though it is not completely accurately representing the free memory on Dom0 that is available to new DomUs. The issue is that between version 0.4.1 and 0.5.2 NumberOfBlocks for a Dom0 changed to be MaxInt. It used to match ConsumableBlocks. I don't think there is an issue and I can work with this now that I understand it. As you point out below, I am probably not going to be able to get the information I want out of Xen_MemoryPool. Thanks for your help and insight. Dayne > > > > > One difference I did notice is that we are trying to use these values > from Dom0 to determine the amount of available memory for guests to > use. Perhaps for Dom0 these values just map differently. > > > > > > My objective is to identify how much memory is available on the > hypervisor that can be allocated to new guests. Looking more closely, > I wonder if we should be using Xen_MemoryPool somehow to do this > instead. What is the relationship between the Capacity and Reserved > properties? I have not quite been able to make sense out of what these > values mean. What I have noticed is that a host with no defined guests > starts with Reserved smaller than Capacity: > > > > -PoolID="MemoryPool/0" > > -Primordial=FALSE > > -Capacity=8385536 > > -Reserved=8064748 > > -ResourceType=4 > > -OtherResourceType= > > -ResourceSubType= > > -AllocationUnits="KiloBytes" > > > > As guests are create and start the Reserved count increases and grows > beyond the capacity. I am not quite sure how to make use of this > information. Do you have any insights? > > The Capacity value is the memory value libvirt reports for the host > (you'd also get this value if you use: virsh nodeinfo). > > The Reserved value is the some of all the memory that is currently > allocated to the guests on the system (as reported by libvirt). This > includes guests that aren't running, which is why you are seeing the > value grow beyond capacity. > > We don't represent the host capabilities, but in the case of Xen, you > can get around that by pulling some things from Dom0. > > However, using Dom0's attribute may not give you the full picture > you're > looking for. I would suggest taking a look at a provider set that > represents the host information. Something like the sblim-base > providers should this info. > > > > > > > Dayne > > > > > > > >> -----Original Message----- > >> From: libvirt-cim-bounces at redhat.com [mailto:libvirt-cim- > >> bounces at redhat.com] On Behalf Of Kaitlin Rupert > >> Sent: Wednesday, May 13, 2009 7:10 PM > >> To: List for discussion and development of libvirt CIM > >> Subject: Re: [Libvirt-cim] What does NumberOfBlocks and > >> ConsumableBlocks in theXen_Memory class represent? > >> > >> Medlyn, Dayne (VSL - Ft Collins) wrote: > >>> All, > >>> > >>> I am trying to understand the use of NumberOfBlocks and > >> ConsumableBlocks in the Xen_Memory class, specifically for the Xen > >> host. > >>> What I have noticed is that between libvirt-cim-0.4.1 and libvirt- > >> cim-0.5.2 the values for NumberOfBlock is now different than > >> ConsumableBlocks and > >> > much larger than the physical memory installed on the system. > >>> Is it the case that NumberOfBlocks represents the maximum possible > >> blocks for the hardware, > >> > or some such number ConsumableBlocks is the memory that is > >>> actually installed in the system? On my system, however, > >> NumberOfBlocks reports 16TB where /proc/meminfo > >>> reports 32Tb for VmallocTotal. In short, should I be using > >> ConsumableBlocks to determine the total physical memory on the > system? > >> > >> > >> Hi Dayne, > >> > >> It looks like there is a bug here. Currently, the providers use the > >> following representation: > >> > >> NumberOfBlocks: max amount of memory that can be allocated to a > guest > >> ConsumableBlocks: current memory allocated to the guest > >> > >> However, these values should be reversed based on the attribute > >> definitions. > >> > >> Here's an example using one of the guests on my system: > >> > >> # virsh dominfo rstest_domainId: - > >> Name: rstest_domain > >> UUID: 746de06d-cb45-4efd-bc18-bf91d10bec84 > >> State: shut off > >> CPU(s): 1 > >> Max memory: 131072 kB > >> Used memory: 130048 kB > >> Autostart: disable > >> > >> We take the max and used memory values libvirt reports and then > convert > >> them based on the block size. > >> > >> # wbemcli gi > >> > 'http://localhost:5988/root/virt:Xen_Memory.CreationClassName="Xen_Memo > >> > ry",DeviceID="rstest_domain/mem",SystemCreationClassName="Xen_ComputerS > >> ystem",SystemName="rstest_domain"' > >> -nl > >> > localhost:5988/root/virt:Xen_Memory.CreationClassName="Xen_Memory",Devi > >> > ceID="rstest_domain/mem",SystemCreationClassName="Xen_ComputerSystem",S > >> ystemName="rstest_domain" > >> > >> > >> -TransitioningToState=12 > >> -SystemCreationClassName="Xen_ComputerSystem" > >> -SystemName="rstest_domain" > >> -CreationClassName="Xen_Memory" > >> -DeviceID="rstest_domain/mem" > >> > >> > >> > >> -BlockSize=4096 > >> -NumberOfBlocks=32768 > >> -ConsumableBlocks=32512 > >> > >> > >> -- > >> Kaitlin Rupert > >> IBM Linux Technology Center > >> kaitlin at linux.vnet.ibm.com > >> > >> _______________________________________________ > >> Libvirt-cim mailing list > >> Libvirt-cim at redhat.com > >> https://www.redhat.com/mailman/listinfo/libvirt-cim > > > > _______________________________________________ > > Libvirt-cim mailing list > > Libvirt-cim at redhat.com > > https://www.redhat.com/mailman/listinfo/libvirt-cim > > > -- > Kaitlin Rupert > IBM Linux Technology Center > kaitlin at linux.vnet.ibm.com > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim From rmaciel at linux.vnet.ibm.com Fri May 15 00:23:52 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Thu, 14 May 2009 21:23:52 -0300 Subject: [Libvirt-cim] [PATCH] Allow user to specify UUID to use when guestis created In-Reply-To: <9a16c7a7963cebddd145.1242064889@localhost.localdomain> References: <9a16c7a7963cebddd145.1242064889@localhost.localdomain> Message-ID: <4A0CB618.1060502@linux.vnet.ibm.com> +1 Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1241805181 25200 > # Node ID 9a16c7a7963cebddd145212e57ccc9defa5e0a0c > # Parent e08c78615c3ec653c2979b4619b15d48a1c56d48 > Allow user to specify UUID to use when guest is created > > Also expose UUID in VSSD instances. > > Signed-off-by: Kaitlin Rupert > > diff -r e08c78615c3e -r 9a16c7a7963c schema/ComputerSystem.mof > --- a/schema/ComputerSystem.mof Mon May 11 10:23:34 2009 -0700 > +++ b/schema/ComputerSystem.mof Fri May 08 10:53:01 2009 -0700 > @@ -7,7 +7,6 @@ > ] > class Xen_ComputerSystem : CIM_ComputerSystem > { > - > [Description("UUID assigned to this DomU.")] > string UUID; > > diff -r e08c78615c3e -r 9a16c7a7963c schema/Virt_VSSD.mof > --- a/schema/Virt_VSSD.mof Mon May 11 10:23:34 2009 -0700 > +++ b/schema/Virt_VSSD.mof Fri May 08 10:53:01 2009 -0700 > @@ -12,4 +12,7 @@ > Values { "Turn Off", "Save state", "DMTF Reserved" }] > uint16 AutomaticShutdownAction; > > + [Description("UUID assigned to this DomU.")] > + string UUID; > + > }; > diff -r e08c78615c3e -r 9a16c7a7963c src/Virt_VSSD.c > --- a/src/Virt_VSSD.c Mon May 11 10:23:34 2009 -0700 > +++ b/src/Virt_VSSD.c Fri May 08 10:53:01 2009 -0700 > @@ -113,6 +113,9 @@ > CMSetProperty(inst, "VirtualSystemType", > (CMPIValue *)pfx, CMPI_chars); > > + CMSetProperty(inst, "UUID", > + (CMPIValue *)dominfo->uuid, CMPI_chars); > + > CMSetProperty(inst, "Caption", > (CMPIValue *)"Virtual System", CMPI_chars); > > diff -r e08c78615c3e -r 9a16c7a7963c src/Virt_VirtualSystemManagementService.c > --- a/src/Virt_VirtualSystemManagementService.c Mon May 11 10:23:34 2009 -0700 > +++ b/src/Virt_VirtualSystemManagementService.c Fri May 08 10:53:01 2009 -0700 > @@ -337,6 +337,12 @@ > free(domain->name); > domain->name = strdup(val); > > + ret = cu_get_str_prop(inst, "UUID", &val); > + if (ret == CMPI_RC_OK) { > + free(domain->uuid); > + domain->uuid = strdup(val); > + } > + > ret = cu_get_u16_prop(inst, "AutomaticShutdownAction", &tmp); > if (ret != CMPI_RC_OK) > tmp = 0; > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Fri May 15 00:49:50 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 14 May 2009 17:49:50 -0700 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks in the Xen_Memory class represent? In-Reply-To: References: < 4A0B6F5D.4030700@linux.vnet.ibm.com> <4A0C96E4.6060301@linux.vnet.ibm.com> Message-ID: <4A0CBC2E.7080109@linux.vnet.ibm.com> Hi Dayne, Let me sort of work backwards here.. > Currently, on this system ConsumableBlocks represent MemTotal or the > current memory allocated to the guest, even though it is not > completely accurately representing the free memory on Dom0 that is > available to new DomUs. The issue is that between version 0.4.1 and > 0.5.2 NumberOfBlocks for a Dom0 changed to be MaxInt. It used to > match ConsumableBlocks. Correct - that's what the bug is here. A regression was introduced. In 0.4.1: -NumberOfBlocks the memory currently assigned to the guest -ConsumableBlocks the maximum memory allocated to the guest In 0.5.2: -NumberOfBlocks the maximum memory allocated to the guest -ConsumableBlocks the memory currently assigned to the guest So this should definitely be fixed since we aren't adhering to the definitions in the mof... > I think there is something else going on here. As far as I can tell > the numbers are not swapped. Here is what I think is going on. The > NumberOfBlock x BlockSize equates to about 16TB (not what my system Ah, my mistake. When I read your previous message, I thought you were talking about the issue above. The values do seem strange. We use the following calculation: NumberOfBlocks = (max_mem * 1024) / BlockSize ConsumableBlocks = (used_mem * 1024) / BlockSize Agreed - NumberOfBlocks is puzzling: (4294967040 / 1024) * 4096 = 17179865088 KB ConsumableBlocks is correct though: (6595584 * 1024) / 4096 = 1648896 What does "xm list -l Domain-0" return for memory and maxmem? I don't have a Xen system with that much mem to test on. > has - my system only has 8GB). If you look at virsh dominfo Domain-0 > on my box I get: > > Max memory: no limit > Used memory: 6595584 kB > > If NumberOfBlocks is MaxInt (of some sort) than this would make some sort of sense and ConsumableBlocks contains the correct value. It is a little bit misleading since processes in Dom0 take memory away from what can be allocated to a DomU. This is evident from this /proc/meminfo snippet: > > MemTotal: 6595584 kB > MemFree: 5940684 kB > Buffers: 16112 kB > Cached: 264716 kB > SwapCached: 0 kB > Active: 138332 kB > Inactive: 216032 kB > SwapTotal: 2626544 kB > SwapFree: 2626544 kB > Dirty: 436 kB > ... > VmallocTotal: 34359738367 kB > VmallocUsed: 267184 kB > VmallocChunk: 34359470859 kB > DirectMap4k: 8066284 kB > DirectMap2M: 0 kB > > > The current WBEM values: > > -ConsumableBlocks=1648896 > -NumberOfBlocks=4294967040 > -BlockSize=4096 Medlyn, Dayne (VSL - Ft Collins) wrote: >> Medlyn, Dayne (VSL - Ft Collins) wrote: >>> Kaitlin, >>> >>> Thanks for the correction. It seems were trying to use these >> properties correctly and there is something just not right. Using the >> same wbemcli command I get: >>> -SystemCreationClassName="Xen_ComputerSystem" >>> -SystemName="Domain-0" >>> -CreationClassName="Xen_Memory" >>> -DeviceID="Domain-0/mem" >>> >>> -ConsumableBlocks=1717760 >>> -NumberOfBlocks=4294967040 >>> -BlockSize=4096 >> Yes, that's definitely a bug. The values for ConsumableBlocks and >> NumberOfBlocks should be swapped. I'd hoped to have a bugfix out >> today, >> but it looks like it'll be tomorrow. > > > I think there is something else going on here. As far as I can tell the numbers are not swapped. Here is what I think is going on. The NumberOfBlock x BlockSize equates to about 16TB (not what my system has - my system only has 8GB). If you look at virsh dominfo Domain-0 on my box I get: > > Max memory: no limit > Used memory: 6595584 kB > > If NumberOfBlocks is MaxInt (of some sort) than this would make some sort of sense and ConsumableBlocks contains the correct value. It is a little bit misleading since processes in Dom0 take memory away from what can be allocated to a DomU. This is evident from this /proc/meminfo snippet: > > MemTotal: 6595584 kB > MemFree: 5940684 kB > Buffers: 16112 kB > Cached: 264716 kB > SwapCached: 0 kB > Active: 138332 kB > Inactive: 216032 kB > SwapTotal: 2626544 kB > SwapFree: 2626544 kB > Dirty: 436 kB > ... > VmallocTotal: 34359738367 kB > VmallocUsed: 267184 kB > VmallocChunk: 34359470859 kB > DirectMap4k: 8066284 kB > DirectMap2M: 0 kB > > > The current WBEM values: > > -ConsumableBlocks=1648896 > -NumberOfBlocks=4294967040 > -BlockSize=4096 > > Currently, on this system ConsumableBlocks represent MemTotal or the current memory allocated to the guest, even though it is not completely accurately representing the free memory on Dom0 that is available to new DomUs. The issue is that between version 0.4.1 and 0.5.2 NumberOfBlocks for a Dom0 changed to be MaxInt. It used to match ConsumableBlocks. I don't think there is an issue and I can work with this now that I understand it. > > > As you point out below, I am probably not going to be able to get the information I want out of Xen_MemoryPool. > > Thanks for your help and insight. > > > Dayne > > > > > >>> One difference I did notice is that we are trying to use these values >> from Dom0 to determine the amount of available memory for guests to >> use. Perhaps for Dom0 these values just map differently. >> >> >>> My objective is to identify how much memory is available on the >> hypervisor that can be allocated to new guests. Looking more closely, >> I wonder if we should be using Xen_MemoryPool somehow to do this >> instead. What is the relationship between the Capacity and Reserved >> properties? I have not quite been able to make sense out of what these >> values mean. What I have noticed is that a host with no defined guests >> starts with Reserved smaller than Capacity: >>> -PoolID="MemoryPool/0" >>> -Primordial=FALSE >>> -Capacity=8385536 >>> -Reserved=8064748 >>> -ResourceType=4 >>> -OtherResourceType= >>> -ResourceSubType= >>> -AllocationUnits="KiloBytes" >>> >>> As guests are create and start the Reserved count increases and grows >> beyond the capacity. I am not quite sure how to make use of this >> information. Do you have any insights? >> >> The Capacity value is the memory value libvirt reports for the host >> (you'd also get this value if you use: virsh nodeinfo). >> >> The Reserved value is the some of all the memory that is currently >> allocated to the guests on the system (as reported by libvirt). This >> includes guests that aren't running, which is why you are seeing the >> value grow beyond capacity. >> >> We don't represent the host capabilities, but in the case of Xen, you >> can get around that by pulling some things from Dom0. >> >> However, using Dom0's attribute may not give you the full picture >> you're >> looking for. I would suggest taking a look at a provider set that >> represents the host information. Something like the sblim-base >> providers should this info. >> >>> >>> Dayne >>> >>> >>> >>>> -----Original Message----- >>>> From: libvirt-cim-bounces at redhat.com [mailto:libvirt-cim- >>>> bounces at redhat.com] On Behalf Of Kaitlin Rupert >>>> Sent: Wednesday, May 13, 2009 7:10 PM >>>> To: List for discussion and development of libvirt CIM >>>> Subject: Re: [Libvirt-cim] What does NumberOfBlocks and >>>> ConsumableBlocks in theXen_Memory class represent? >>>> >>>> Medlyn, Dayne (VSL - Ft Collins) wrote: >>>>> All, >>>>> >>>>> I am trying to understand the use of NumberOfBlocks and >>>> ConsumableBlocks in the Xen_Memory class, specifically for the Xen >>>> host. >>>>> What I have noticed is that between libvirt-cim-0.4.1 and libvirt- >>>> cim-0.5.2 the values for NumberOfBlock is now different than >>>> ConsumableBlocks and >>>> > much larger than the physical memory installed on the system. >>>>> Is it the case that NumberOfBlocks represents the maximum possible >>>> blocks for the hardware, >>>> > or some such number ConsumableBlocks is the memory that is >>>>> actually installed in the system? On my system, however, >>>> NumberOfBlocks reports 16TB where /proc/meminfo >>>>> reports 32Tb for VmallocTotal. In short, should I be using >>>> ConsumableBlocks to determine the total physical memory on the >> system? >>>> >>>> Hi Dayne, >>>> >>>> It looks like there is a bug here. Currently, the providers use the >>>> following representation: >>>> >>>> NumberOfBlocks: max amount of memory that can be allocated to a >> guest >>>> ConsumableBlocks: current memory allocated to the guest >>>> >>>> However, these values should be reversed based on the attribute >>>> definitions. >>>> >>>> Here's an example using one of the guests on my system: >>>> >>>> # virsh dominfo rstest_domainId: - >>>> Name: rstest_domain >>>> UUID: 746de06d-cb45-4efd-bc18-bf91d10bec84 >>>> State: shut off >>>> CPU(s): 1 >>>> Max memory: 131072 kB >>>> Used memory: 130048 kB >>>> Autostart: disable >>>> >>>> We take the max and used memory values libvirt reports and then >> convert >>>> them based on the block size. >>>> >>>> # wbemcli gi >>>> >> 'http://localhost:5988/root/virt:Xen_Memory.CreationClassName="Xen_Memo >> ry",DeviceID="rstest_domain/mem",SystemCreationClassName="Xen_ComputerS >>>> ystem",SystemName="rstest_domain"' >>>> -nl >>>> >> localhost:5988/root/virt:Xen_Memory.CreationClassName="Xen_Memory",Devi >> ceID="rstest_domain/mem",SystemCreationClassName="Xen_ComputerSystem",S >>>> ystemName="rstest_domain" >>>> >>>> >>>> -TransitioningToState=12 >>>> -SystemCreationClassName="Xen_ComputerSystem" >>>> -SystemName="rstest_domain" >>>> -CreationClassName="Xen_Memory" >>>> -DeviceID="rstest_domain/mem" >>>> >>>> >>>> >>>> -BlockSize=4096 >>>> -NumberOfBlocks=32768 >>>> -ConsumableBlocks=32512 >>>> >>>> >>>> -- >>>> Kaitlin Rupert >>>> IBM Linux Technology Center >>>> kaitlin at linux.vnet.ibm.com >>>> >>>> _______________________________________________ >>>> Libvirt-cim mailing list >>>> Libvirt-cim at redhat.com >>>> https://www.redhat.com/mailman/listinfo/libvirt-cim >>> _______________________________________________ >>> Libvirt-cim mailing list >>> Libvirt-cim at redhat.com >>> https://www.redhat.com/mailman/listinfo/libvirt-cim >> >> -- >> Kaitlin Rupert >> IBM Linux Technology Center >> kaitlin at linux.vnet.ibm.com >> >> _______________________________________________ >> Libvirt-cim mailing list >> Libvirt-cim at redhat.com >> https://www.redhat.com/mailman/listinfo/libvirt-cim > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Fri May 15 01:28:53 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 14 May 2009 18:28:53 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Add general functions forpoolverification In-Reply-To: <4A0BF82F.3050506@linux.vnet.ibm.com> References: <4A0BF82F.3050506@linux.vnet.ibm.com> Message-ID: <4A0CC555.4020408@linux.vnet.ibm.com> Deepti B Kalakeri wrote: > +1 > This patch looks good. libvirt.org is down, but I'll check it in tomorrow when it's backup. Same goes for RPCS/07 -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Fri May 15 01:43:01 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 14 May 2009 18:43:01 -0700 Subject: [Libvirt-cim] [PATCH 2 of 2] [TEST] (#2) Add VSSS 03_create_snapshot.py In-Reply-To: References: Message-ID: <0fd8e977083a6fd6c6c4.1242351781@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242082293 25200 # Node ID 0fd8e977083a6fd6c6c4fb2df9103b5943a625b0 # Parent 6654874e72ccd2e71d252ad553a298bd15079c7d [TEST] (#2) Add VSSS 03_create_snapshot.py This test case attempts a guest snapshot and verifies the results. Updates from 1 to 2: -Remove import of CIM_ERR_FAILED -Add test description -Add comment explaining which snapshot type is being used Signed-off-by: Kaitlin Rupert diff -r 6654874e72cc -r 0fd8e977083a suites/libvirt-cim/cimtest/VirtualSystemSnapshotService/03_create_snapshot.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/VirtualSystemSnapshotService/03_create_snapshot.py Mon May 11 15:51:33 2009 -0700 @@ -0,0 +1,146 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Kaitlin Rupert +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# Description: +# This test verfies that calling CreateSnapshot() on a running guest +# is successful and this it returns the proper Job and VSSD instances. +# + +import sys +from pywbem import cim_types +from CimTest.Globals import logger +from CimTest.ReturnCodes import PASS, FAIL +from XenKvmLib.const import do_main +from XenKvmLib.vxml import get_class +from XenKvmLib.classes import get_typed_class, inst_to_mof +from XenKvmLib.enumclass import EnumNames, EnumInstances, GetInstance +from XenKvmLib.vsss import remove_snapshot + +sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] + +#32769 - create a snapshot of the guest and leave the guest in a +# 'suspended' state +SNAPSHOT = cim_types.Uint16(32769) +test_dom = "snapshot_vm" + +def get_cs_ref(virt, ip): + cs_cn = get_typed_class(virt, "ComputerSystem") + + cs_refs = EnumNames(ip, cs_cn) + if cs_refs is None or len(cs_refs) < 1: + logger.error("Exp at least one domain defined on the system") + return FAIL, None + + cs_ref = None + for ref in cs_refs: + if ref['Name'] == test_dom: + cs_ref = ref + break + + if cs_ref is None: + logger.error("Enum of %s didn't return %s", cs_cn, test_dom) + return FAIL, None + + return PASS, cs_ref + +def get_vsssc_inst(virt, ip): + vsssc_cn = get_typed_class(virt, "VirtualSystemSnapshotServiceCapabilities") + + vsssc_insts = EnumInstances(ip, vsssc_cn, ret_cim_inst=True) + if vsssc_insts is None or len(vsssc_insts) < 1: + logger.error("Exp at least one %s", vsssc_cn) + return FAIL, None + + vsssc = vsssc_insts[0] + + #Override the additional instance values. We only care about the key + #values (eventhough CreateSnapshot takes a instance) + vsssc['SynchronousMethodsSupported'] = "" + vsssc['SnapshotTypesSupported'] = "" + + vsssc = inst_to_mof(vsssc) + + return PASS, vsssc + + at do_main(sup_types) +def main(): + options = main.options + + cxml = get_class(options.virt)(test_dom) + + try: + ret = cxml.cim_define(options.ip) + if not ret: + raise Exception("Unable to define %s", test_dom) + + status = cxml.cim_start(options.ip) + if status != PASS: + raise Exception("Failed to start the defined domain: %s" % test_dom) + + status, cs_ref = get_cs_ref(options.virt, options.ip) + if status != PASS: + raise Exception("Unable to get reference for %s" % test_dom) + + status, vsssc = get_vsssc_inst(options.virt, options.ip) + if status != PASS: + raise Exception("Unable to get VSSSC instance") + + vsss_cn = get_typed_class(options.virt, "VirtualSystemSnapshotService") + vsss_refs = EnumNames(options.ip, vsss_cn) + if vsss_refs is None or len(vsss_refs) < 1: + raise Exception("Exp at least one %s" % vsss_cn) + + service = vsss_refs[0] + keys = { 'Name' : service['Name'], + 'CreationClassName' : service['CreationClassName'], + 'SystemCreationClassName' : service['SystemCreationClassName'], + 'SystemName' : service['SystemName'] + } + service = GetInstance(options.ip, vsss_cn, keys) + + output = service.CreateSnapshot(AffectedSystem=cs_ref, + SnapshotSettings=vsssc, + SnapshotType=SNAPSHOT) + + ret = output[0] + if ret != 0: + raise Exception("Snapshot of %s failed!" % test_dom) + + if output[1]['Job'] is None: + raise Exception("CreateSnapshot failed to return a CIM job inst") + + if output[1]['ResultingSnapshot'] is None: + raise Exception("CreateSnapshot failed to return ResultingSnapshot") + + except Exception, detail: + logger.error("Exception: %s", detail) + status = FAIL + + cxml.cim_destroy(options.ip) + cxml.undefine(options.ip) + + remove_snapshot(options.ip, test_dom) + + return status + +if __name__ == "__main__": + sys.exit(main()) + From kaitlin at linux.vnet.ibm.com Fri May 15 01:43:00 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 14 May 2009 18:43:00 -0700 Subject: [Libvirt-cim] [PATCH 1 of 2] Add vsss.py module In-Reply-To: References: Message-ID: <6654874e72ccd2e71d25.1242351780@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242082293 25200 # Node ID 6654874e72ccd2e71d252ad553a298bd15079c7d # Parent a6630ea580cb00e8d5ef4ab93f7b18bb42414109 Add vsss.py module For keeping functions related to the VirtualSystemSnapshotService Signed-off-by: Kaitlin Rupert diff -r a6630ea580cb -r 6654874e72cc suites/libvirt-cim/lib/XenKvmLib/vsss.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/lib/XenKvmLib/vsss.py Mon May 11 15:51:33 2009 -0700 @@ -0,0 +1,40 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Kaitlin Rupert +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import pywbem +from VirtLib.utils import run_remote +from CimTest.Globals import logger +from CimTest.ReturnCodes import FAIL, PASS + +#Path to snapshot save location +snapshot_save_loc = '/var/lib/libvirt/' + +def remove_snapshot(ip, vm_name): + snapshot = "%s%s" % (snapshot_save_loc, vm_name) + + cmd = "rm %s.save" % snapshot + ret, out = run_remote(ip, cmd) + if ret != 0: + logger.error("Failed to remove snapshot file for %s", vm_name) + return FAIL + + return PASS From kaitlin at linux.vnet.ibm.com Fri May 15 01:42:59 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 14 May 2009 18:42:59 -0700 Subject: [Libvirt-cim] [PATCH 0 of 2] Add test to verifyCreateSnapshot() Message-ID: From yunguol at cn.ibm.com Fri May 15 02:29:57 2009 From: yunguol at cn.ibm.com (Guo Lian Yun) Date: Fri, 15 May 2009 10:29:57 +0800 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Add general functions forpoolverification In-Reply-To: <4A0CC555.4020408@linux.vnet.ibm.com> Message-ID: libvirt-cim-bounces at redhat.com wrote on 2009-05-15 09:28:53: > Deepti B Kalakeri wrote: > > +1 > > > > This patch looks good. libvirt.org is down, but I'll check it in > tomorrow when it's backup. Same goes for RPCS/07 Cool. Then I will work out a new patch of RPCS/04 when it's back. > > -- > Kaitlin Rupert > IBM Linux Technology Center > kaitlin at linux.vnet.ibm.com > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -------------- next part -------------- An HTML attachment was scrubbed... URL: From dayne.medlyn at hp.com Fri May 15 04:36:05 2009 From: dayne.medlyn at hp.com (Medlyn, Dayne (VSL - Ft Collins)) Date: Fri, 15 May 2009 04:36:05 +0000 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks in the Xen_Memory class represent? In-Reply-To: <4A0CBC2E.7080109@linux.vnet.ibm.com> References: < 4A0B6F5D.4030700@linux.vnet.ibm.com> <4A0C96E4.6060301@linux.vnet.ibm.com> <4A0CBC2E.7080109@linux.vnet.ibm.com> Message-ID: Hi Kaitlin, > Hi Dayne, > > Let me sort of work backwards here.. > > > Currently, on this system ConsumableBlocks represent MemTotal or the > > current memory allocated to the guest, even though it is not > > completely accurately representing the free memory on Dom0 that is > > available to new DomUs. The issue is that between version 0.4.1 and > > 0.5.2 NumberOfBlocks for a Dom0 changed to be MaxInt. It used to > > match ConsumableBlocks. In your original e-mail you said: "NumberOfBlocks: max amount of memory that can be allocated to a guest ConsumableBlocks: current memory allocated to the guest" This would mean that for a Dom0, according to the following output: $ virsh dominfo Domain ... Max memory: no limit Used memory: 6595584 kB ... Which seems to indicate that: NumberOfBlocks -> no limit (or max int as a reasonable representation) ConsumableBlocks -> 6595584 / 4096 blocks Which are the values I get in 0.5.2. > Correct - that's what the bug is here. A regression was introduced. Arguably, there may be a bug in that maxInt is the "max amount of memory that can be allocated to a guest" for Dom0. However, libvirt does report that there is "no limit" for Dom0. The change between versions, if it is a regression, is that 0.5.2 is representing "no limit" by maxInt where 0.4.1 represented it as "current memory allocated to ..." Dom0, or the MemTotal on Dom0. > > In 0.4.1: > -NumberOfBlocks the memory currently assigned to the guest > -ConsumableBlocks the maximum memory allocated to the guest\ Isn't this backwards from your original message? Or did I just misunderstand? Interestingly, what I see for a DomU with maxmem=1024 and memory=512 in 0.4.1 is the following: -ConsumableBlocks=262144 -NumberOfBlocks=131072 -BlockSize=4096 Which is consistent with what you just stated about 0.4.1 and reverse of what you said in your original response. > > In 0.5.2: > -NumberOfBlocks the maximum memory allocated to the guest > -ConsumableBlocks the memory currently assigned to the guest What I see in 0.5.2 for a DomU with maxmem=1024 and memory=512 is: -ConsumableBlocks=131072 -NumberOfBlocks=262144 -BlockSize=4096 I am pretty new at deciphering MOF files, but if I understand the mof, NumberOfBlocks should be the maximum as in the NumberOfBlock x BlockSize = total size of memory (I think this mean maximum memory). I believe the MOF says ConsumableBlocks is the number of blocks available for consumption, or the actual memory assigned to the guest. If I understand this right, it is actually 0.4.1 that is reversed and 0.5.2 contains the correction, yes? In either case I am going to have to figure out how to tell if I am talking to a 0.4.1 libvirt-CIM or 0.5.2 .. and where the change happened .. *sigh* ... so I can handle it appropriately as one of them is not right. > > So this should definitely be fixed since we aren't adhering to the > definitions in the mof... > > > I think there is something else going on here. As far as I can tell > > the numbers are not swapped. Here is what I think is going on. The > > NumberOfBlock x BlockSize equates to about 16TB (not what my system > > Ah, my mistake. When I read your previous message, I thought you were > talking about the issue above. I thought I was talking about the same issue :-). This is quite confusing. > > The values do seem strange. We use the following calculation: > > NumberOfBlocks = (max_mem * 1024) / BlockSize > ConsumableBlocks = (used_mem * 1024) / BlockSize For DomU this seems to be what I am seeing. > > Agreed - NumberOfBlocks is puzzling: > (4294967040 / 1024) * 4096 = 17179865088 KB > For Dom0 this seems to be a new behavior somewhere between 0.4.1 and 0.5.2. I am fine with this as there really is not maximum and it appears the max is possibly represented by maxInt (seeing how virsh show it as "no limit"). > ConsumableBlocks is correct though: (6595584 * 1024) / 4096 = 1648896 > > What does "xm list -l Domain-0" return for memory and maxmem? I don't > have a Xen system with that much mem to test on. On the host with libvirt-CIM 0.5.2 / libvirt 0.4.6 $ xm list -l Domain-0 | grep -i mem (maxmem 16777215) (memory 6441) (shadow_memory 0) xm list -l sles11-HVM | grep -i mem (maxmem 1024) (memory 512) (shadow_memory 0) On the host with libvirt-CIM 0.4.1 / libvirt $ xm list -l Domain-0 | grep -i mem (memory 3621) (shadow_memory 0) (maxmem 3621) $ xm list -l target | grep mem (memory 512) (shadow_memory 9) (maxmem 1024) It looks like you are just reporting what libvirt is telling you ... very interesting. BTW: I don't actually have 16Tb of memory either, I only have 8Gb. It looks like we are honing in on the problem. Based on everything I said above, I believe libvirt-CIM 0.4.1 to be flawed and libvirt-CIM 0.5.2 to be correct. Does this sound about right? Do you have any idea where it may have changed? Thanks for your patience. Dayne > > > has - my system only has 8GB). If you look at virsh dominfo Domain- > 0 > > on my box I get: > > > > Max memory: no limit > > Used memory: 6595584 kB > > > > If NumberOfBlocks is MaxInt (of some sort) than this would make some > sort of sense and ConsumableBlocks contains the correct value. It is a > little bit misleading since processes in Dom0 take memory away from > what > can be allocated to a DomU. This is evident from this /proc/meminfo > snippet: > > > > MemTotal: 6595584 kB > > MemFree: 5940684 kB > > Buffers: 16112 kB > > Cached: 264716 kB > > SwapCached: 0 kB > > Active: 138332 kB > > Inactive: 216032 kB > > SwapTotal: 2626544 kB > > SwapFree: 2626544 kB > > Dirty: 436 kB > > ... > > VmallocTotal: 34359738367 kB > > VmallocUsed: 267184 kB > > VmallocChunk: 34359470859 kB > > DirectMap4k: 8066284 kB > > DirectMap2M: 0 kB > > > > > > The current WBEM values: > > > > -ConsumableBlocks=1648896 > > -NumberOfBlocks=4294967040 > > -BlockSize=4096 > > > Medlyn, Dayne (VSL - Ft Collins) wrote: > >> Medlyn, Dayne (VSL - Ft Collins) wrote: > >>> Kaitlin, > >>> > >>> Thanks for the correction. It seems were trying to use these > >> properties correctly and there is something just not right. Using > the > >> same wbemcli command I get: > >>> -SystemCreationClassName="Xen_ComputerSystem" > >>> -SystemName="Domain-0" > >>> -CreationClassName="Xen_Memory" > >>> -DeviceID="Domain-0/mem" > >>> > >>> -ConsumableBlocks=1717760 > >>> -NumberOfBlocks=4294967040 > >>> -BlockSize=4096 > >> Yes, that's definitely a bug. The values for ConsumableBlocks and > >> NumberOfBlocks should be swapped. I'd hoped to have a bugfix out > >> today, > >> but it looks like it'll be tomorrow. > > > > > > I think there is something else going on here. As far as I can tell > the numbers are not swapped. Here is what I think is going on. The > NumberOfBlock x BlockSize equates to about 16TB (not what my system has > - my system only has 8GB). If you look at virsh dominfo Domain-0 on my > box I get: > > > > Max memory: no limit > > Used memory: 6595584 kB > > > > If NumberOfBlocks is MaxInt (of some sort) than this would make some > sort of sense and ConsumableBlocks contains the correct value. It is a > little bit misleading since processes in Dom0 take memory away from > what can be allocated to a DomU. This is evident from this > /proc/meminfo snippet: > > > > MemTotal: 6595584 kB > > MemFree: 5940684 kB > > Buffers: 16112 kB > > Cached: 264716 kB > > SwapCached: 0 kB > > Active: 138332 kB > > Inactive: 216032 kB > > SwapTotal: 2626544 kB > > SwapFree: 2626544 kB > > Dirty: 436 kB > > ... > > VmallocTotal: 34359738367 kB > > VmallocUsed: 267184 kB > > VmallocChunk: 34359470859 kB > > DirectMap4k: 8066284 kB > > DirectMap2M: 0 kB > > > > > > The current WBEM values: > > > > -ConsumableBlocks=1648896 > > -NumberOfBlocks=4294967040 > > -BlockSize=4096 > > > > Currently, on this system ConsumableBlocks represent MemTotal or the > current memory allocated to the guest, even though it is not completely > accurately representing the free memory on Dom0 that is available to > new DomUs. The issue is that between version 0.4.1 and 0.5.2 > NumberOfBlocks for a Dom0 changed to be MaxInt. It used to match > ConsumableBlocks. I don't think there is an issue and I can work with > this now that I understand it. > > > > > > As you point out below, I am probably not going to be able to get the > information I want out of Xen_MemoryPool. > > > > Thanks for your help and insight. > > > > > > Dayne > > > > > > > > > > > >>> One difference I did notice is that we are trying to use these > values > >> from Dom0 to determine the amount of available memory for guests to > >> use. Perhaps for Dom0 these values just map differently. > >> > >> > >>> My objective is to identify how much memory is available on the > >> hypervisor that can be allocated to new guests. Looking more > closely, > >> I wonder if we should be using Xen_MemoryPool somehow to do this > >> instead. What is the relationship between the Capacity and Reserved > >> properties? I have not quite been able to make sense out of what > these > >> values mean. What I have noticed is that a host with no defined > guests > >> starts with Reserved smaller than Capacity: > >>> -PoolID="MemoryPool/0" > >>> -Primordial=FALSE > >>> -Capacity=8385536 > >>> -Reserved=8064748 > >>> -ResourceType=4 > >>> -OtherResourceType= > >>> -ResourceSubType= > >>> -AllocationUnits="KiloBytes" > >>> > >>> As guests are create and start the Reserved count increases and > grows > >> beyond the capacity. I am not quite sure how to make use of this > >> information. Do you have any insights? > >> > >> The Capacity value is the memory value libvirt reports for the host > >> (you'd also get this value if you use: virsh nodeinfo). > >> > >> The Reserved value is the some of all the memory that is currently > >> allocated to the guests on the system (as reported by libvirt). > This > >> includes guests that aren't running, which is why you are seeing the > >> value grow beyond capacity. > >> > >> We don't represent the host capabilities, but in the case of Xen, > you > >> can get around that by pulling some things from Dom0. > >> > >> However, using Dom0's attribute may not give you the full picture > >> you're > >> looking for. I would suggest taking a look at a provider set that > >> represents the host information. Something like the sblim-base > >> providers should this info. > >> > >>> > >>> Dayne > >>> > >>> > >>> > >>>> -----Original Message----- > >>>> From: libvirt-cim-bounces at redhat.com [mailto:libvirt-cim- > >>>> bounces at redhat.com] On Behalf Of Kaitlin Rupert > >>>> Sent: Wednesday, May 13, 2009 7:10 PM > >>>> To: List for discussion and development of libvirt CIM > >>>> Subject: Re: [Libvirt-cim] What does NumberOfBlocks and > >>>> ConsumableBlocks in theXen_Memory class represent? > >>>> > >>>> Medlyn, Dayne (VSL - Ft Collins) wrote: > >>>>> All, > >>>>> > >>>>> I am trying to understand the use of NumberOfBlocks and > >>>> ConsumableBlocks in the Xen_Memory class, specifically for the Xen > >>>> host. > >>>>> What I have noticed is that between libvirt-cim-0.4.1 and > libvirt- > >>>> cim-0.5.2 the values for NumberOfBlock is now different than > >>>> ConsumableBlocks and > >>>> > much larger than the physical memory installed on the system. > >>>>> Is it the case that NumberOfBlocks represents the maximum > possible > >>>> blocks for the hardware, > >>>> > or some such number ConsumableBlocks is the memory that is > >>>>> actually installed in the system? On my system, however, > >>>> NumberOfBlocks reports 16TB where /proc/meminfo > >>>>> reports 32Tb for VmallocTotal. In short, should I be using > >>>> ConsumableBlocks to determine the total physical memory on the > >> system? > >>>> > >>>> Hi Dayne, > >>>> > >>>> It looks like there is a bug here. Currently, the providers use > the > >>>> following representation: > >>>> > >>>> NumberOfBlocks: max amount of memory that can be allocated to a > >> guest > >>>> ConsumableBlocks: current memory allocated to the guest > >>>> > >>>> However, these values should be reversed based on the attribute > >>>> definitions. > >>>> > >>>> Here's an example using one of the guests on my system: > >>>> > >>>> # virsh dominfo rstest_domainId: - > >>>> Name: rstest_domain > >>>> UUID: 746de06d-cb45-4efd-bc18-bf91d10bec84 > >>>> State: shut off > >>>> CPU(s): 1 > >>>> Max memory: 131072 kB > >>>> Used memory: 130048 kB > >>>> Autostart: disable > >>>> > >>>> We take the max and used memory values libvirt reports and then > >> convert > >>>> them based on the block size. > >>>> > >>>> # wbemcli gi > >>>> > >> > 'http://localhost:5988/root/virt:Xen_Memory.CreationClassName="Xen_Memo > >> > ry",DeviceID="rstest_domain/mem",SystemCreationClassName="Xen_ComputerS > >>>> ystem",SystemName="rstest_domain"' > >>>> -nl > >>>> > >> > localhost:5988/root/virt:Xen_Memory.CreationClassName="Xen_Memory",Devi > >> > ceID="rstest_domain/mem",SystemCreationClassName="Xen_ComputerSystem",S > >>>> ystemName="rstest_domain" > >>>> > >>>> > >>>> -TransitioningToState=12 > >>>> -SystemCreationClassName="Xen_ComputerSystem" > >>>> -SystemName="rstest_domain" > >>>> -CreationClassName="Xen_Memory" > >>>> -DeviceID="rstest_domain/mem" > >>>> > >>>> > >>>> > >>>> -BlockSize=4096 > >>>> -NumberOfBlocks=32768 > >>>> -ConsumableBlocks=32512 > >>>> > >>>> > >>>> -- > >>>> Kaitlin Rupert > >>>> IBM Linux Technology Center > >>>> kaitlin at linux.vnet.ibm.com > >>>> > >>>> _______________________________________________ > >>>> Libvirt-cim mailing list > >>>> Libvirt-cim at redhat.com > >>>> https://www.redhat.com/mailman/listinfo/libvirt-cim > >>> _______________________________________________ > >>> Libvirt-cim mailing list > >>> Libvirt-cim at redhat.com > >>> https://www.redhat.com/mailman/listinfo/libvirt-cim > >> > >> -- > >> Kaitlin Rupert > >> IBM Linux Technology Center > >> kaitlin at linux.vnet.ibm.com > >> > >> _______________________________________________ > >> Libvirt-cim mailing list > >> Libvirt-cim at redhat.com > >> https://www.redhat.com/mailman/listinfo/libvirt-cim > > > > _______________________________________________ > > Libvirt-cim mailing list > > Libvirt-cim at redhat.com > > https://www.redhat.com/mailman/listinfo/libvirt-cim > > > -- > Kaitlin Rupert > IBM Linux Technology Center > kaitlin at linux.vnet.ibm.com > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim From kaitlin at linux.vnet.ibm.com Fri May 15 15:35:59 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 15 May 2009 08:35:59 -0700 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks in the Xen_Memory class represent? In-Reply-To: References: < 4A0B6F5D.4030700@linux.vnet.ibm.com> <4A0C96E4.6060301@linux.vnet.ibm.com> <4A0CBC2E.7080109@linux.vnet.ibm.com> Message-ID: <4A0D8BDF.9050202@linux.vnet.ibm.com> Medlyn, Dayne (VSL - Ft Collins) wrote: > Hi Kaitlin, > > >> Hi Dayne, >> >> Let me sort of work backwards here.. >> >> > Currently, on this system ConsumableBlocks represent MemTotal or the >> > current memory allocated to the guest, even though it is not >> > completely accurately representing the free memory on Dom0 that is >> > available to new DomUs. The issue is that between version 0.4.1 and >> > 0.5.2 NumberOfBlocks for a Dom0 changed to be MaxInt. It used to >> > match ConsumableBlocks. > > In your original e-mail you said: > > "NumberOfBlocks: max amount of memory that can be allocated to a guest > ConsumableBlocks: current memory allocated to the guest" > > This would mean that for a Dom0, according to the following output: > > $ virsh dominfo Domain > ... > Max memory: no limit > Used memory: 6595584 kB > ... > > Which seems to indicate that: > > NumberOfBlocks -> no limit (or max int as a reasonable representation) > ConsumableBlocks -> 6595584 / 4096 blocks > > Which are the values I get in 0.5.2. > > >> Correct - that's what the bug is here. A regression was introduced. > > Arguably, there may be a bug in that maxInt is the "max amount of memory that can be allocated to a guest" for Dom0. However, libvirt does report that there is "no limit" for Dom0. > The change between versions, if it is a regression, is that 0.5.2 is representing "no limit" by maxInt where 0.4.1 represented it as "current memory allocated to ..." Dom0, or the MemTotal on Dom0. > > >> In 0.4.1: >> -NumberOfBlocks the memory currently assigned to the guest >> -ConsumableBlocks the maximum memory allocated to the guest\ > > Isn't this backwards from your original message? Or did I just misunderstand? Interestingly, what I see for a DomU with maxmem=1024 and memory=512 in 0.4.1 is the following: In my original message, I was talking about the current implementation - which would be 0.5.1 and newer. > > -ConsumableBlocks=262144 > -NumberOfBlocks=131072 > -BlockSize=4096 > > Which is consistent with what you just stated about 0.4.1 and reverse of what you said in your original response. Correct - sorry for the confusion. My original response was indicating the way the providers behave currently. > > >> In 0.5.2: >> -NumberOfBlocks the maximum memory allocated to the guest >> -ConsumableBlocks the memory currently assigned to the guest > > What I see in 0.5.2 for a DomU with maxmem=1024 and memory=512 is: > > -ConsumableBlocks=131072 > -NumberOfBlocks=262144 > -BlockSize=4096 > It looks like the behavior you're seeing is consistent with the last email I sent - so we're on the same page, I think. =) > > I am pretty new at deciphering MOF files, but if I understand the mof, NumberOfBlocks should be the maximum as in the NumberOfBlock x BlockSize = total size of > memory (I think this mean maximum memory). I believe the MOF says > ConsumableBlocks is the number of blocks available for consumption, or the actual memory assigned to the guest. If I understand this right, it is actually 0.4.1 > that is reversed and 0.5.2 contains the correction, yes? I reread the mof, and you are correct. So this doesn't appear to be a regression at all. > > In either case I am going to have to figure out how to tell if I am talking to a 0.4.1 libvirt-CIM or 0.5.2 .. and where the change happened .. *sigh* ... so I can handle it appropriately as one of them is not right. Yes, unfortunately. You can get the version from the VirtualSystemManagementService - > > >> So this should definitely be fixed since we aren't adhering to the >> definitions in the mof... >> >> > I think there is something else going on here. As far as I can tell >> > the numbers are not swapped. Here is what I think is going on. The >> > NumberOfBlock x BlockSize equates to about 16TB (not what my system >> >> Ah, my mistake. When I read your previous message, I thought you were >> talking about the issue above. > > I thought I was talking about the same issue :-). This is quite confusing. > >> The values do seem strange. We use the following calculation: >> >> NumberOfBlocks = (max_mem * 1024) / BlockSize >> ConsumableBlocks = (used_mem * 1024) / BlockSize > > For DomU this seems to be what I am seeing. > > >> Agreed - NumberOfBlocks is puzzling: >> (4294967040 / 1024) * 4096 = 17179865088 KB >> > > For Dom0 this seems to be a new behavior somewhere between 0.4.1 and 0.5.2. I am fine with this as there really is not maximum and it appears the max is possibly represented by maxInt (seeing how virsh show it as "no limit"). > > >> ConsumableBlocks is correct though: (6595584 * 1024) / 4096 = 1648896 >> >> What does "xm list -l Domain-0" return for memory and maxmem? I don't >> have a Xen system with that much mem to test on. > > On the host with libvirt-CIM 0.5.2 / libvirt 0.4.6 > $ xm list -l Domain-0 | grep -i mem > (maxmem 16777215) > (memory 6441) > (shadow_memory 0) > > xm list -l sles11-HVM | grep -i mem > (maxmem 1024) > (memory 512) > (shadow_memory 0) > > On the host with libvirt-CIM 0.4.1 / libvirt > $ xm list -l Domain-0 | grep -i mem > (memory 3621) > (shadow_memory 0) > (maxmem 3621) > > $ xm list -l target | grep mem > (memory 512) > (shadow_memory 9) > (maxmem 1024) > > It looks like you are just reporting what libvirt is telling you ... very interesting. BTW: I don't actually have 16Tb of memory either, I only have 8Gb. Yes, we pull the value from libvirt and then convert that value in to blocks. In this case, libvirt is pulling from Xen. My guess is that the Xen or libvirt behavior has changed, which is why you may see the value reported change across different versions of Xen / libvirt. > > > It looks like we are honing in on the problem. Based on everything I said above, I believe libvirt-CIM 0.4.1 to be flawed and libvirt-CIM 0.5.2 to be correct. Does this sound about right? Do you have any idea where it may have changed? Thanks for your patience. Yes, agreed. Here's the change: http://libvirt.org/hg/libvirt-cim/rev/2796fd3e2eaa We parse the guest XML (essentially virsh dumpxml) to get the various attributes of a guest. The problem in 0.4.1 is that we were storing the memory values in the wrong structures. We later use these struct values in src/Virt_Device.c (starting at line 143). My concern in my original email was that the values were reversed here as well, but as you pointed out, the values are set correctly. From veillard at redhat.com Fri May 15 15:40:40 2009 From: veillard at redhat.com (Daniel Veillard) Date: Fri, 15 May 2009 17:40:40 +0200 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Add general functions forpoolverification In-Reply-To: <4A0CC555.4020408@linux.vnet.ibm.com> References: <4A0BF82F.3050506@linux.vnet.ibm.com> <4A0CC555.4020408@linux.vnet.ibm.com> Message-ID: <20090515154040.GP1589@redhat.com> On Thu, May 14, 2009 at 06:28:53PM -0700, Kaitlin Rupert wrote: > Deepti B Kalakeri wrote: >> +1 >> > > This patch looks good. libvirt.org is down, but I'll check it in > tomorrow when it's backup. Same goes for RPCS/07 Nahh, it's working for me [root at xmlsoft ~]# w 17:39:53 up 39 days, 3:23, 1 user, load average: 0.01, 0.15, 0.21 I just had to restart apache for some reason Daniel -- Daniel Veillard | libxml Gnome XML XSLT toolkit http://xmlsoft.org/ daniel at veillard.com | Rpmfind RPM search engine http://rpmfind.net/ http://veillard.com/ | virtualization library http://libvirt.org/ From kaitlin at linux.vnet.ibm.com Fri May 15 15:43:02 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 15 May 2009 08:43:02 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Add general functions forpoolverification In-Reply-To: <20090515154040.GP1589@redhat.com> References: <4A0BF82F.3050506@linux.vnet.ibm.com> <4A0CC555.4020408@linux.vnet.ibm.com> <20090515154040.GP1589@redhat.com> Message-ID: <4A0D8D86.5070805@linux.vnet.ibm.com> Daniel Veillard wrote: > On Thu, May 14, 2009 at 06:28:53PM -0700, Kaitlin Rupert wrote: >> Deepti B Kalakeri wrote: >>> +1 >>> >> This patch looks good. libvirt.org is down, but I'll check it in >> tomorrow when it's backup. Same goes for RPCS/07 > > Nahh, it's working for me > > [root at xmlsoft ~]# w > 17:39:53 up 39 days, 3:23, 1 user, load average: 0.01, 0.15, 0.21 > > I just had to restart apache for some reason > Ah, that would explain why I couldn't pull from the repo ;) No worries, though. Thanks for the apache restart! -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Fri May 15 17:43:41 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Fri, 15 May 2009 23:13:41 +0530 Subject: [Libvirt-cim] [PATCH 2 of 2] [TEST] (#2) Add VSSS 03_create_snapshot.py In-Reply-To: <0fd8e977083a6fd6c6c4.1242351781@localhost.localdomain> References: <0fd8e977083a6fd6c6c4.1242351781@localhost.localdomain> Message-ID: <4A0DA9CD.2020507@linux.vnet.ibm.com> This tc fails with the latest src revision 874 on F10 with KVM with the following error: VirtualSystemSnapshotService - 03_create_snapshot.py: FAIL ERROR - Exception: 'ResultingSnapshot' Seems like we have a missing ResultingSnapshot field in the o/p from CreateSnapshot() function. See below: ERROR - DEBUG output is (0L, {u'Job': CIMInstanceName(classname=u'CIM_ConcreteJob', keybindings=NocaseDict({u'InstanceID': u'eaa405ad-459f-4d08-b76a-6ac3529cdbf2'}))}) -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Fri May 15 17:55:58 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 15 May 2009 10:55:58 -0700 Subject: [Libvirt-cim] [PATCH 2 of 2] [TEST] (#2) Add VSSS 03_create_snapshot.py In-Reply-To: <4A0DA9CD.2020507@linux.vnet.ibm.com> References: <0fd8e977083a6fd6c6c4.1242351781@localhost.localdomain> <4A0DA9CD.2020507@linux.vnet.ibm.com> Message-ID: <4A0DACAE.5030609@linux.vnet.ibm.com> Deepti B Kalakeri wrote: > > This tc fails with the latest src revision 874 on F10 with KVM with the > following error: > > VirtualSystemSnapshotService - 03_create_snapshot.py: FAIL > ERROR - Exception: 'ResultingSnapshot' > > Seems like we have a missing ResultingSnapshot field in the o/p from > CreateSnapshot() function. > See below: > ERROR - DEBUG output is (0L, {u'Job': > CIMInstanceName(classname=u'CIM_ConcreteJob', > keybindings=NocaseDict({u'InstanceID': > u'eaa405ad-459f-4d08-b76a-6ac3529cdbf2'}))}) > Sorry Deepti - I submitted this the same time I submitted the "Return VSSD reference from CreateSnapshot" patch, which isn't in the tree yet. You can wait on reviewing until this is in the tree. -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Fri May 15 18:17:35 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 15 May 2009 11:17:35 -0700 Subject: [Libvirt-cim] [PATCH] Work around MOF typo in VSSnapshotServiceCapabilities Message-ID: # HG changeset patch # User Kaitlin Rupert # Date 1242411431 25200 # Node ID d5155c4b965ddc209e37a59ddfa8de6f649a51b0 # Parent 4ac0b029df21346d5f958d1b5a29c7360b7850ae Work around MOF typo in VSSnapshotServiceCapabilities Also set array to NULL before reallocating. Signed-off-by: Kaitlin Rupert diff -r 4ac0b029df21 -r d5155c4b965d src/Virt_VirtualSystemSnapshotServiceCapabilities.c --- a/src/Virt_VirtualSystemSnapshotServiceCapabilities.c Mon May 11 16:47:15 2009 -0300 +++ b/src/Virt_VirtualSystemSnapshotServiceCapabilities.c Fri May 15 11:17:11 2009 -0700 @@ -62,9 +62,13 @@ element = (uint16_t)APPLY_SNAPSHOT; CMSetArrayElementAt(array, 1, &element, CMPI_uint16); - CMSetProperty(inst, "AsynchronousMethodsSupported", + /* There is a typo in the mof - the attribute name in the mof is: + AynchronousMethodsSupported, not AsynchronousMethodsSupported. + Making a note incase this changes later. */ + CMSetProperty(inst, "AynchronousMethodsSupported", (CMPIValue *)&array, CMPI_uint16A); - + + array = NULL; array = CMNewArray(broker, 1, CMPI_uint16, &s); if ((s.rc != CMPI_RC_OK) || (array == NULL)) goto out; From dayne.medlyn at hp.com Fri May 15 19:55:14 2009 From: dayne.medlyn at hp.com (Medlyn, Dayne (VSL - Ft Collins)) Date: Fri, 15 May 2009 19:55:14 +0000 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks in the Xen_Memory class represent? In-Reply-To: <4A0D8BDF.9050202@linux.vnet.ibm.com> References: < 4A0B6F5D.4030700@linux.vnet.ibm.com> <4A0C96E4.6060301@linux.vnet.ibm.com> <4A0CBC2E.7080109@linux.vnet.ibm.com> <4A0D8BDF.9050202@linux.vnet.ibm.com> Message-ID: Kaitlin, Thanks for working through this with me. One last question: > > Isn't this backwards from your original message? Or did I just > misunderstand? Interestingly, what I see for a DomU with maxmem=1024 > and memory=512 in 0.4.1 is the following: > > In my original message, I was talking about the current implementation > - > which would be 0.5.1 and newer. Just for confirmation, does this mean that for anything 0.5.1 and newer I should expect: -NumberOfBlocks the maximum memory allocated to the guest -ConsumableBlocks the memory currently assigned to the guest And for anything older than 0.5.1 I should expect: -NumberOfBlocks the memory currently assigned to the guest -ConsumableBlocks the maximum memory allocated to the guest I will need to put this in my code so I can handle both (which I can tell by looking in VirtualSystemManagementService). > > > > -ConsumableBlocks=262144 > > -NumberOfBlocks=131072 > > -BlockSize=4096 > > > > Which is consistent with what you just stated about 0.4.1 and reverse > of what you said in your original response. > > Correct - sorry for the confusion. My original response was indicating > the way the providers behave currently. No worries. Thanks for the clarification. > > > >> In 0.5.2: > >> -NumberOfBlocks the maximum memory allocated to the guest > >> -ConsumableBlocks the memory currently assigned to the guest > > > > What I see in 0.5.2 for a DomU with maxmem=1024 and memory=512 is: > > > > -ConsumableBlocks=131072 > > -NumberOfBlocks=262144 > > -BlockSize=4096 > > > > It looks like the behavior you're seeing is consistent with the last > email I sent - so we're on the same page, I think. =) I think so too. Hopefully we are not both wrong :-). > > > > I am pretty new at deciphering MOF files, but if I understand the > mof, NumberOfBlocks should be the maximum as in the NumberOfBlock x > BlockSize = total size of > > memory (I think this mean maximum memory). I believe the MOF says > > ConsumableBlocks is the number of blocks available for consumption, > or the actual memory assigned to the guest. If I understand this > right, > it is actually 0.4.1 > > that is reversed and 0.5.2 contains the correction, yes? > > I reread the mof, and you are correct. So this doesn't appear to be a > regression at all. Again, thanks for the confirmation. > > > > In either case I am going to have to figure out how to tell if I am > talking to a 0.4.1 libvirt-CIM or 0.5.2 .. and where the change > happened .. *sigh* ... so I can handle it appropriately as one of them > is not right. > > Yes, unfortunately. You can get the version from the > VirtualSystemManagementService - Just what I needed. > > It looks like you are just reporting what libvirt is telling you ... > very interesting. BTW: I don't actually have 16Tb of memory either, I > only have 8Gb. > > Yes, we pull the value from libvirt and then convert that value in to > blocks. > > In this case, libvirt is pulling from Xen. My guess is that the Xen or > libvirt behavior has changed, which is why you may see the value > reported change across different versions of Xen / libvirt. Good to know. > > > > It looks like we are honing in on the problem. Based on everything I > said above, I believe libvirt-CIM 0.4.1 to be flawed and libvirt-CIM > 0.5.2 to be correct. Does this sound about right? Do you have any > idea where it may have changed? Thanks for your patience. > > Yes, agreed. Here's the change: > http://libvirt.org/hg/libvirt-cim/rev/2796fd3e2eaa > > We parse the guest XML (essentially virsh dumpxml) to get the various > attributes of a guest. The problem in 0.4.1 is that we were storing the > memory values in the wrong structures. > > We later use these struct values in src/Virt_Device.c (starting at line > 143). My concern in my original email was that the values were > reversed > here as well, but as you pointed out, the values are set correctly. Thanks again for all the clarification. Dayne From kaitlin at linux.vnet.ibm.com Fri May 15 20:37:27 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 15 May 2009 13:37:27 -0700 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks in the Xen_Memory class represent? In-Reply-To: References: < 4A0B6F5D.4030700@linux.vnet.ibm.com> <4A0C96E4.6060301@linux.vnet.ibm.com> <4A0CBC2E.7080109@linux.vnet.ibm.com> <4A0D8BDF.9050202@linux.vnet.ibm.com> Message-ID: <4A0DD287.9030709@linux.vnet.ibm.com> Medlyn, Dayne (VSL - Ft Collins) wrote: > Kaitlin, > > Thanks for working through this with me. One last question: > >>> Isn't this backwards from your original message? Or did I just >> misunderstand? Interestingly, what I see for a DomU with maxmem=1024 >> and memory=512 in 0.4.1 is the following: >> >> In my original message, I was talking about the current implementation >> - >> which would be 0.5.1 and newer. > > Just for confirmation, does this mean that for anything 0.5.1 and newer I should expect: > > -NumberOfBlocks the maximum memory allocated to the guest > -ConsumableBlocks the memory currently assigned to the guest > > And for anything older than 0.5.1 I should expect: > > -NumberOfBlocks the memory currently assigned to the guest > -ConsumableBlocks the maximum memory allocated to the guest Yes, that's correct. We're one the same page here. Sorry for the confusing detour there. ;) -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From rmaciel at linux.vnet.ibm.com Fri May 15 22:04:30 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Fri, 15 May 2009 19:04:30 -0300 Subject: [Libvirt-cim] [PATCH] Set the InitPath for LXC guests In-Reply-To: <1a625ee4280f4f55fbed.1242280156@localhost.localdomain> References: <1a625ee4280f4f55fbed.1242280156@localhost.localdomain> Message-ID: <4A0DE6EE.9020706@linux.vnet.ibm.com> +1 Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1242279945 25200 > # Node ID 1a625ee4280f4f55fbed04c2eeb973e3af3e5816 > # Parent 032d34765a83b2f6784fd33e488b8829f3391603 > Set the InitPath for LXC guests > > This was missed when InitPath support for LXC guests was added. > > Signed-off-by: Kaitlin Rupert > > diff -r 032d34765a83 -r 1a625ee4280f src/Virt_VSSD.c > --- a/src/Virt_VSSD.c Wed May 13 07:40:47 2009 -0700 > +++ b/src/Virt_VSSD.c Wed May 13 22:45:45 2009 -0700 > @@ -88,6 +88,16 @@ > CMPI_chars); > } > > +static void _set_lxc_prop(struct domain *dominfo, > + CMPIInstance *inst) > +{ > + if (dominfo->os_info.lxc.init != NULL) > + CMSetProperty(inst, > + "InitPath", > + (CMPIValue *)dominfo->os_info.lxc.init, > + CMPI_chars); > +} > + > static int instance_from_dom(virDomainPtr dom, > CMPIInstance *inst) > { > @@ -151,6 +161,8 @@ > _set_fv_prop(dominfo, inst); > else if (dominfo->type == DOMAIN_XENPV) > _set_pv_prop(dominfo, inst); > + else if (dominfo->type == DOMAIN_LXC) > + _set_lxc_prop(dominfo, inst); > else > CU_DEBUG("Unknown domain type %i for creating VSSD", > dominfo->type); > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From yunguol at cn.ibm.com Mon May 18 01:38:49 2009 From: yunguol at cn.ibm.com (Guo Lian Yun) Date: Mon, 18 May 2009 09:38:49 +0800 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Add general functions forpoolverification In-Reply-To: <4A0D8D86.5070805@linux.vnet.ibm.com> Message-ID: libvirt-cim-bounces at redhat.com wrote on 2009-05-15 23:43:02: > Daniel Veillard wrote: > > On Thu, May 14, 2009 at 06:28:53PM -0700, Kaitlin Rupert wrote: > >> Deepti B Kalakeri wrote: > >>> +1 > >>> > >> This patch looks good. libvirt.org is down, but I'll check it in > >> tomorrow when it's backup. Same goes for RPCS/07 > > > > Nahh, it's working for me > > > > [root at xmlsoft ~]# w > > 17:39:53 up 39 days, 3:23, 1 user, load average: 0.01, 0.15, 0.21 > > > > I just had to restart apache for some reason > > > > Ah, that would explain why I couldn't pull from the repo ;) No worries, > though. Thanks for the apache restart! > I can't pull updates from the repo, and it reports error as follows: [root at localhost cimtest]# hg pull -u abort: error: Temporary failure in name resolution Thoughts? > -- > Kaitlin Rupert > IBM Linux Technology Center > kaitlin at linux.vnet.ibm.com > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -------------- next part -------------- An HTML attachment was scrubbed... URL: From deeptik at linux.vnet.ibm.com Mon May 18 06:11:43 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Mon, 18 May 2009 11:41:43 +0530 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Add general functions forpoolverification In-Reply-To: References: Message-ID: <4A10FC1F.1060108@linux.vnet.ibm.com> Guo Lian Yun wrote: > > libvirt-cim-bounces at redhat.com wrote on 2009-05-15 23:43:02: > > > Daniel Veillard wrote: > > > On Thu, May 14, 2009 at 06:28:53PM -0700, Kaitlin Rupert wrote: > > >> Deepti B Kalakeri wrote: > > >>> +1 > > >>> > > >> This patch looks good. libvirt.org is down, but I'll check it in > > >> tomorrow when it's backup. Same goes for RPCS/07 > > > > > > Nahh, it's working for me > > > > > > [root at xmlsoft ~]# w > > > 17:39:53 up 39 days, 3:23, 1 user, load average: 0.01, 0.15, 0.21 > > > > > > I just had to restart apache for some reason > > > > > > > Ah, that would explain why I couldn't pull from the repo ;) No > worries, > > though. Thanks for the apache restart! > > > I can't pull updates from the repo, and it reports error as follows: > [root at localhost cimtest]# hg pull -u > abort: error: Temporary failure in name resolution > > Thoughts? I was able to do a hg clone and hg pull this morning. May be the problem was temporary. Please try again. > > > -- > > Kaitlin Rupert > > IBM Linux Technology Center > > kaitlin at linux.vnet.ibm.com > > > > _______________________________________________ > > Libvirt-cim mailing list > > Libvirt-cim at redhat.com > > https://www.redhat.com/mailman/listinfo/libvirt-cim > ------------------------------------------------------------------------ > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Mon May 18 06:36:29 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Sun, 17 May 2009 23:36:29 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Fixing SAE/01_forward.py SAE tc Message-ID: <6dc2d815e480237c9111.1242628589@localhost.localdomain> # HG changeset patch # User Deepti B. Kalakeri # Date 1242628498 25200 # Node ID 6dc2d815e480237c91115cd0d86f6325503e33f7 # Parent 43fb40db432952d38509a76e92e61d7d3d3702f7 [TEST] Fixing SAE/01_forward.py SAE tc. Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 43fb40db4329 -r 6dc2d815e480 suites/libvirt-cim/cimtest/ServiceAffectsElement/01_forward.py --- a/suites/libvirt-cim/cimtest/ServiceAffectsElement/01_forward.py Fri May 15 14:03:39 2009 -0700 +++ b/suites/libvirt-cim/cimtest/ServiceAffectsElement/01_forward.py Sun May 17 23:34:58 2009 -0700 @@ -115,7 +115,7 @@ assoc_insts = {} try: assoc_insts, status = get_dom_records(an, assoc_info, assoc_insts) - if status != PASS or len(assoc_insts) != 3: + if status != PASS or len(assoc_insts) < 1 : raise Exception("Failed to get insts for domain %s" % test_dom) in_list, status = init_list_for_compare(server, virt) @@ -124,7 +124,7 @@ in_list_keys = Set(in_list.keys()) assoc_list_keys = Set(assoc_insts.keys()) - if len(in_list_keys & assoc_list_keys) != 3: + if len(in_list_keys & assoc_list_keys) < 1 : raise Exception("Mistmatching Class Names, expected %s, got %s" \ % (in_list_keys, assoc_list_keys)) From yunguol at cn.ibm.com Mon May 18 09:28:43 2009 From: yunguol at cn.ibm.com (yunguol at cn.ibm.com) Date: Mon, 18 May 2009 02:28:43 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Update RPCS/04 with the latest updatesof pool verification Message-ID: # HG changeset patch # User Yun Guo Lian # Date 1242638914 25200 # Node ID eb0bbc2200a1b3c1649dcbe921f7e7103c2345a0 # Parent 43fb40db432952d38509a76e92e61d7d3d3702f7 [TEST] #2 Update RPCS/04 with the latest updatesof pool verification Tested for KVM with current sources Signed-off-by: Guolian Yun Message-ID: This tc fails for routed and isolated mode because that the forward mode is "nat" always when we dumpxml in verify_pool() function. Although the networkpool is created with routed/isolated ForwardMode, the forward mode of dump netxml is "nat". Is there any error in mode setting on cimtest? Thanks! libvirt-cim-bounces at redhat.com wrote on 2009-05-18 17:28:43: > # HG changeset patch > # User Yun Guo Lian > # Date 1242638914 25200 > # Node ID eb0bbc2200a1b3c1649dcbe921f7e7103c2345a0 > # Parent 43fb40db432952d38509a76e92e61d7d3d3702f7 > [TEST] #2 Update RPCS/04 with the latest updatesof pool verification > > > Tested for KVM with current sources > Signed-off-by: Guolian Yun > diff -r 43fb40db4329 -r eb0bbc2200a1 suites/libvirt- > cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py > --- a/suites/libvirt- > cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool. > py Fri May 15 14:03:39 2009 -0700 > +++ b/suites/libvirt- > cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool. > py Mon May 18 02:28:34 2009 -0700 > @@ -39,45 +39,73 @@ > # OUT -- Error -- String -- Encoded error instance if the operation > # failed and did not return a job > # > -# REVISIT : > -# -------- > -# As of now the CreateChildResourcePool() simply throws an Exception. > -# We must improve this tc once the service is implemented. > -# > -# > -Date: 20.02.2008 > - > +# Exception details before Revision 837 > +# ----- > +# Error code: CIM_ERR_NOT_SUPPORTED > +# > +# After revision 837, the service is implemented > +# > +# -Date: 20.02.2008 > > import sys > -import pywbem > -from XenKvmLib import rpcs_service > +import random > from CimTest.Globals import logger > from CimTest.ReturnCodes import FAIL, PASS > from XenKvmLib.const import do_main, platform_sup > from XenKvmLib.classes import get_typed_class > +from XenKvmLib.common_util import destroy_netpool > +from XenKvmLib.pool import create_netpool, verify_pool, undefine_netpool > > -cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED > -cim_mname = "CreateChildResourcePool" > +test_pool = "testpool" > +test_mode = ["None", "nat", "route eth1"] > > @do_main(platform_sup) > def main(): > + status = PASS > options = main.options > - rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \ > - "ResourcePoolConfigurationService"))(options.ip) > - try: > - rpcs_conn.CreateChildResourcePool() > - except pywbem.CIMError, (err_no, desc): > - if err_no == cim_errno : > - logger.info("Got expected exception for '%s' service", cim_mname) > - logger.info("Errno is '%s' ", err_no) > - logger.info("Error string is '%s'", desc) > - return PASS > - else: > - logger.error("Unexpected rc code %s and description %s\n", > - err_no, desc) > + > + np = get_typed_class(options.virt, 'NetworkPool') > + np_id = "NetworkPool/%s" % test_pool > + > + subnet = '192.168.0.' > + ip_base = random.randint(1, 100) > + addr = subnet+'%d' % ip_base > + range_addr_start = subnet+'%d' % (ip_base + 1) > + range_addr_end = subnet+'%d' %(ip_base + 10) > + pool_attr = { > + "Address" : addr, > + "Netmask" : "255.255.255.0", > + "IPRangeStart" : range_addr_start, > + "IPRangeEnd" : range_addr_end > + } > + for i in range(0, len(test_mode)): > + pool_attr["ForwardMode"] = test_mode[i] > + > + status = create_netpool(options.ip, options.virt, > + test_pool, pool_attr) > + if status != PASS: > + logger.error("Error in networkpool creation") > return FAIL > - > - logger.error("The execution should not have reached here!!") > - return FAIL > + > + status = verify_pool(options.ip, options.virt, np, > + test_pool, pool_attr) > + if status != PASS: > + logger.error("Error in networkpool verification") > + destroy_netpool(options.ip, options.virt, test_pool) > + undefine_netpool(options.ip, options.virt, test_pool) > + return FAIL > + > + status = destroy_netpool(options.ip, options.virt, test_pool) > + if status != PASS: > + logger.error("Unable to destroy networkpool %s", test_pool) > + return FAIL > + > + status = undefine_netpool(options.ip, options.virt, test_pool) > + if status != PASS: > + logger.error("Unable to undefine networkpool %s", test_pool) > + return FAIL > + > + return status > + > if __name__ == "__main__": > sys.exit(main()) > - > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -------------- next part -------------- An HTML attachment was scrubbed... URL: From yunguol at cn.ibm.com Mon May 18 10:05:47 2009 From: yunguol at cn.ibm.com (Guo Lian Yun) Date: Mon, 18 May 2009 18:05:47 +0800 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Update RPCS/04 with the latest updatesof pool verification In-Reply-To: Message-ID: libvirt-cim-bounces at redhat.com wrote on 2009-05-18 17:27:04: > > This tc fails for routed and isolated mode because that the forward > mode is "nat" always when we dumpxml in verify_pool() function. > Although the networkpool is created with routed/isolated > ForwardMode, the forward mode of dump netxml is "nat". > > Is there any error in mode setting on cimtest? > Thanks! > I'm not sure if this failure because of the test_mode = ["None", "nat", "route eth1"] setting in tc. Below are the part code of libvirt-cim provider. I tried to define the test_mode as [0, 1, 2], but it fails yet. How to set the test_mode in cimtest for different types? if (cu_get_u16_prop(inst, "ForwardMode", &type) != CMPI_RC_OK) { pool->pool_info.net.forward_mode = strdup("nat"); } else { free(pool->pool_info.net.forward_mode); switch (type) { case NETPOOL_FORWARD_NONE: pool->pool_info.net.forward_mode = NULL; break; case NETPOOL_FORWARD_NAT: pool->pool_info.net.forward_mode = strdup("nat"); break; case NETPOOL_FORWARD_ROUTED: pool->pool_info.net.forward_mode = strdup("route"); break; default: return "Storage pool type not supported"; > > libvirt-cim-bounces at redhat.com wrote on 2009-05-18 17:28:43: > > > # HG changeset patch > > # User Yun Guo Lian > > # Date 1242638914 25200 > > # Node ID eb0bbc2200a1b3c1649dcbe921f7e7103c2345a0 > > # Parent 43fb40db432952d38509a76e92e61d7d3d3702f7 > > [TEST] #2 Update RPCS/04 with the latest updatesof pool verification > > > > > > Tested for KVM with current sources > > Signed-off-by: Guolian Yun > > > diff -r 43fb40db4329 -r eb0bbc2200a1 suites/libvirt- > > cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py > > --- a/suites/libvirt- > > cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool. > > py Fri May 15 14:03:39 2009 -0700 > > +++ b/suites/libvirt- > > cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool. > > py Mon May 18 02:28:34 2009 -0700 > > @@ -39,45 +39,73 @@ > > # OUT -- Error -- String -- Encoded error instance if the operation > > # failed and did not return a job > > # > > -# REVISIT : > > -# -------- > > -# As of now the CreateChildResourcePool() simply throws an Exception. > > -# We must improve this tc once the service is implemented. > > -# > > -# > > -Date: 20.02.2008 > > - > > +# Exception details before Revision 837 > > +# ----- > > +# Error code: CIM_ERR_NOT_SUPPORTED > > +# > > +# After revision 837, the service is implemented > > +# > > +# -Date: 20.02.2008 > > > > import sys > > -import pywbem > > -from XenKvmLib import rpcs_service > > +import random > > from CimTest.Globals import logger > > from CimTest.ReturnCodes import FAIL, PASS > > from XenKvmLib.const import do_main, platform_sup > > from XenKvmLib.classes import get_typed_class > > +from XenKvmLib.common_util import destroy_netpool > > +from XenKvmLib.pool import create_netpool, verify_pool, undefine_netpool > > > > -cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED > > -cim_mname = "CreateChildResourcePool" > > +test_pool = "testpool" > > +test_mode = ["None", "nat", "route eth1"] > > > > @do_main(platform_sup) > > def main(): > > + status = PASS > > options = main.options > > - rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \ > > - "ResourcePoolConfigurationService"))(options.ip) > > - try: > > - rpcs_conn.CreateChildResourcePool() > > - except pywbem.CIMError, (err_no, desc): > > - if err_no == cim_errno : > > - logger.info("Got expected exception for '%s' > service", cim_mname) > > - logger.info("Errno is '%s' ", err_no) > > - logger.info("Error string is '%s'", desc) > > - return PASS > > - else: > > - logger.error("Unexpected rc code %s and description %s\n", > > - err_no, desc) > > + > > + np = get_typed_class(options.virt, 'NetworkPool') > > + np_id = "NetworkPool/%s" % test_pool > > + > > + subnet = '192.168.0.' > > + ip_base = random.randint(1, 100) > > + addr = subnet+'%d' % ip_base > > + range_addr_start = subnet+'%d' % (ip_base + 1) > > + range_addr_end = subnet+'%d' %(ip_base + 10) > > + pool_attr = { > > + "Address" : addr, > > + "Netmask" : "255.255.255.0", > > + "IPRangeStart" : range_addr_start, > > + "IPRangeEnd" : range_addr_end > > + } > > + for i in range(0, len(test_mode)): > > + pool_attr["ForwardMode"] = test_mode[i] > > + > > + status = create_netpool(options.ip, options.virt, > > + test_pool, pool_attr) > > + if status != PASS: > > + logger.error("Error in networkpool creation") > > return FAIL > > - > > - logger.error("The execution should not have reached here!!") > > - return FAIL > > + > > + status = verify_pool(options.ip, options.virt, np, > > + test_pool, pool_attr) > > + if status != PASS: > > + logger.error("Error in networkpool verification") > > + destroy_netpool(options.ip, options.virt, test_pool) > > + undefine_netpool(options.ip, options.virt, test_pool) > > + return FAIL > > + > > + status = destroy_netpool(options.ip, options.virt, test_pool) > > + if status != PASS: > > + logger.error("Unable to destroy networkpool %s", test_pool) > > + return FAIL > > + > > + status = undefine_netpool(options.ip, options.virt, test_pool) > > + if status != PASS: > > + logger.error("Unable to undefine networkpool %s", test_pool) > > + return FAIL > > + > > + return status > > + > > if __name__ == "__main__": > > sys.exit(main()) > > - > > > > _______________________________________________ > > Libvirt-cim mailing list > > Libvirt-cim at redhat.com > > https://www.redhat.com/mailman/listinfo/libvirt-cim > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -------------- next part -------------- An HTML attachment was scrubbed... URL: From deeptik at linux.vnet.ibm.com Mon May 18 11:35:51 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Mon, 18 May 2009 17:05:51 +0530 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Update RPCS/04 with the latest updatesof pool verification In-Reply-To: References: Message-ID: <4A114817.8020306@linux.vnet.ibm.com> Guo Lian Yun wrote: > > libvirt-cim-bounces at redhat.com wrote on 2009-05-18 17:27:04: > > > > > This tc fails for routed and isolated mode because that the forward > > mode is "nat" always when we dumpxml in verify_pool() function. > > Although the networkpool is created with routed/isolated > > ForwardMode, the forward mode of dump netxml is "nat". > > > > Is there any error in mode setting on cimtest? > > Thanks! > > > I'm not sure if this failure because of the test_mode = ["None", > "nat", "route eth1"] setting in tc. > Below are the part code of libvirt-cim provider. I tried to define > the test_mode as [0, 1, 2], but it > fails yet. How to set the test_mode in cimtest for different types? > > > if (cu_get_u16_prop(inst, "ForwardMode", &type) != CMPI_RC_OK) { > pool->pool_info.net.forward_mode = strdup("nat"); > } else { > free(pool->pool_info.net.forward_mode); > > switch (type) { > case NETPOOL_FORWARD_NONE: > pool->pool_info.net.forward_mode = NULL; > break; > case NETPOOL_FORWARD_NAT: > pool->pool_info.net.forward_mode = strdup("nat"); > break; > case NETPOOL_FORWARD_ROUTED: > pool->pool_info.net.forward_mode = > strdup("route"); > break; > default: > return "Storage pool type not supported"; > I checked this too in Virt_RPCS.c file.. Here is the XML that is generated for the route type: xmlgen.c(981): Created pool XML: testpool Seems like only the if condition is getting executed for some reason. Daisy, In your test case I think we need to specify pool_attr["ForwardMode"] = "route" and pool_attr["ForwardDevice"] = "eth1" for route type. > > > > libvirt-cim-bounces at redhat.com wrote on 2009-05-18 17:28:43: > > > > > # HG changeset patch > > > # User Yun Guo Lian > > > # Date 1242638914 25200 > > > # Node ID eb0bbc2200a1b3c1649dcbe921f7e7103c2345a0 > > > # Parent 43fb40db432952d38509a76e92e61d7d3d3702f7 > > > [TEST] #2 Update RPCS/04 with the latest updatesof pool verification > > > > > > > > > Tested for KVM with current sources > > > Signed-off-by: Guolian Yun > > > > > diff -r 43fb40db4329 -r eb0bbc2200a1 suites/libvirt- > > > > cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py > > > --- a/suites/libvirt- > > > > cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool. > > > py Fri May 15 14:03:39 2009 -0700 > > > +++ b/suites/libvirt- > > > > cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool. > > > py Mon May 18 02:28:34 2009 -0700 > > > @@ -39,45 +39,73 @@ > > > # OUT -- Error -- String -- Encoded error instance if the operation > > > # failed and did not return a job > > > # > > > -# REVISIT : > > > -# -------- > > > -# As of now the CreateChildResourcePool() simply throws an Exception. > > > -# We must improve this tc once the service is implemented. > > > -# > > > -# > > > -Date: 20.02.2008 > > > - > > > +# Exception details before Revision 837 > > > +# ----- > > > +# Error code: CIM_ERR_NOT_SUPPORTED > > > +# > > > +# After revision 837, the service is implemented > > > +# > > > +# -Date: 20.02.2008 > > > > > > import sys > > > -import pywbem > > > -from XenKvmLib import rpcs_service > > > +import random > > > from CimTest.Globals import logger > > > from CimTest.ReturnCodes import FAIL, PASS > > > from XenKvmLib.const import do_main, platform_sup > > > from XenKvmLib.classes import get_typed_class > > > +from XenKvmLib.common_util import destroy_netpool > > > +from XenKvmLib.pool import create_netpool, verify_pool, > undefine_netpool > > > > > > -cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED > > > -cim_mname = "CreateChildResourcePool" > > > +test_pool = "testpool" > > > +test_mode = ["None", "nat", "route eth1"] > > > > > > @do_main(platform_sup) > > > def main(): > > > + status = PASS > > > options = main.options > > > - rpcs_conn = eval("rpcs_service." + > get_typed_class(options.virt, \ > > > - > "ResourcePoolConfigurationService"))(options.ip) > > > - try: > > > - rpcs_conn.CreateChildResourcePool() > > > - except pywbem.CIMError, (err_no, desc): > > > - if err_no == cim_errno : > > > - logger.info("Got expected exception for '%s' > > service", cim_mname) > > > - logger.info("Errno is '%s' ", err_no) > > > - logger.info("Error string is '%s'", desc) > > > - return PASS > > > - else: > > > - logger.error("Unexpected rc code %s and description > %s\n", > > > - err_no, desc) > > > + > > > + np = get_typed_class(options.virt, 'NetworkPool') > > > + np_id = "NetworkPool/%s" % test_pool > > > + > > > + subnet = '192.168.0.' > > > + ip_base = random.randint(1, 100) > > > + addr = subnet+'%d' % ip_base > > > + range_addr_start = subnet+'%d' % (ip_base + 1) > > > + range_addr_end = subnet+'%d' %(ip_base + 10) > > > + pool_attr = { > > > + "Address" : addr, > > > + "Netmask" : "255.255.255.0", > > > + "IPRangeStart" : range_addr_start, > > > + "IPRangeEnd" : range_addr_end > > > + } > > > + for i in range(0, len(test_mode)): > > > + pool_attr["ForwardMode"] = test_mode[i] > > > + > > > + status = create_netpool(options.ip, options.virt, > > > + test_pool, pool_attr) > > > + if status != PASS: > > > + logger.error("Error in networkpool creation") > > > return FAIL > > > - > > > - logger.error("The execution should not have reached here!!") > > > - return FAIL > > > + > > > + status = verify_pool(options.ip, options.virt, np, > > > + test_pool, pool_attr) > > > + if status != PASS: > > > + logger.error("Error in networkpool verification") > > > + destroy_netpool(options.ip, options.virt, test_pool) > > > + undefine_netpool(options.ip, options.virt, test_pool) > > > + return FAIL > > > + > > > + status = destroy_netpool(options.ip, options.virt, test_pool) > > > + if status != PASS: > > > + logger.error("Unable to destroy networkpool %s", > test_pool) > > > + return FAIL > > > + > > > + status = undefine_netpool(options.ip, options.virt, > test_pool) > > > + if status != PASS: > > > + logger.error("Unable to undefine networkpool %s", > test_pool) > > > + return FAIL > > > + > > > + return status > > > + > > > if __name__ == "__main__": > > > sys.exit(main()) > > > - > > > > > > _______________________________________________ > > > Libvirt-cim mailing list > > > Libvirt-cim at redhat.com > > > https://www.redhat.com/mailman/listinfo/libvirt-cim > > _______________________________________________ > > Libvirt-cim mailing list > > Libvirt-cim at redhat.com > > https://www.redhat.com/mailman/listinfo/libvirt-cim > ------------------------------------------------------------------------ > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Mon May 18 17:18:55 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 18 May 2009 10:18:55 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Update RPCS/04 with the latest updatesof pool verification In-Reply-To: <4A114817.8020306@linux.vnet.ibm.com> References: <4A114817.8020306@linux.vnet.ibm.com> Message-ID: <4A11987F.6080101@linux.vnet.ibm.com> >> Below are the part code of libvirt-cim provider. I tried to define >> the test_mode as [0, 1, 2], but it What failure are you seeing? This works for me.. here's my pywbem script: from pywbem import WBEMConnection, CIMInstanceName, CIMInstance, cim_types c = WBEMConnection("http://localhost", ("root", "pass"), "root/virt") c.debug = True iname = CIMInstanceName('KVM_NetPoolResourceAllocationSettingData', namespace='root/virt',keybindings = {'InstanceID':'DiskPool/meep-net'}) rasd = CIMInstance('KVM_NetPoolResourceAllocationSettingData', path=iname, properties={"Address":"192.168.0.4", "Netmask":"255.255.254.0", "IPRangeStart":"192.168.0.5", "IPRangeEnd":"192.168.0.27", "ForwardMode":cim_types.Uint16(2), "ForwardDevice":"eth1"}) rasds = [rasd.tomof()] print rasds res = c.InvokeMethod("CreateChildResourcePool", "KVM_ResourcePoolConfigurationService", Settings=rasds, ElementName="meep-net") print c.last_request >> fails yet. How to set the test_mode in cimtest for different types? >> >> >> if (cu_get_u16_prop(inst, "ForwardMode", &type) != CMPI_RC_OK) { >> pool->pool_info.net.forward_mode = strdup("nat"); >> } else { >> free(pool->pool_info.net.forward_mode); >> >> switch (type) { >> case NETPOOL_FORWARD_NONE: >> pool->pool_info.net.forward_mode = NULL; >> break; >> case NETPOOL_FORWARD_NAT: >> pool->pool_info.net.forward_mode = strdup("nat"); >> break; >> case NETPOOL_FORWARD_ROUTED: >> pool->pool_info.net.forward_mode = >> strdup("route"); >> break; >> default: >> return "Storage pool type not supported"; >> > I checked this too in Virt_RPCS.c file.. > Here is the XML that is generated for the route type: > xmlgen.c(981): Created pool XML: > > testpool > > > > > > > > > > Seems like only the if condition is getting executed for some reason. > > Daisy, > > In your test case I think we need to specify pool_attr["ForwardMode"] = > "route" and pool_attr["ForwardDevice"] = "eth1" for route type. > The ForwardDevice is a uint16 - it needs to take a number. If you look at the schema: [Description ("Network pool forwarding mode"), ValueMap {"0", "1", "2"}, Values {"None", "NAT", "Routed"}] uint16 ForwardMode; In Daisy's test, she is using strings, but it should be the following: pool_attr["ForwardMode"] = 2 #This will give you a route pool type. -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Mon May 18 17:24:35 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 18 May 2009 10:24:35 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Update RPCS/04 with the latest updatesof pool verification In-Reply-To: References: Message-ID: <4A1199D3.3060708@linux.vnet.ibm.com> > +test_mode = ["None", "nat", "route eth1"] If you want to set the forward device as eth1, you'll need to use the ForwardDevice attribute. See the schema (or the template NetPoolRASDs) for more info. > + for i in range(0, len(test_mode)): > + pool_attr["ForwardMode"] = test_mode[i] ForwardMode needs to be an int, not a string. Really, you should be using the template NetPoolRASD for this. These have the values set appropriately. You can call get_pool_rasds(), and then pull the RASD you want from the list that is returned. Then you won't have to worry about setting the properties appropriately. > + > + status = create_netpool(options.ip, options.virt, > + test_pool, pool_attr) > + if status != PASS: > + logger.error("Error in networkpool creation") > return FAIL > - > - logger.error("The execution should not have reached here!!") > - return FAIL > + > + status = verify_pool(options.ip, options.virt, np, > + test_pool, pool_attr) > + if status != PASS: > + logger.error("Error in networkpool verification") > + destroy_netpool(options.ip, options.virt, test_pool) > + undefine_netpool(options.ip, options.virt, test_pool) > + return FAIL > + > + status = destroy_netpool(options.ip, options.virt, test_pool) > + if status != PASS: > + logger.error("Unable to destroy networkpool %s", test_pool) > + return FAIL > + > + status = undefine_netpool(options.ip, options.virt, test_pool) > + if status != PASS: > + logger.error("Unable to undefine networkpool %s", test_pool) > + return FAIL Instead of setting PASS at the top of the test case, set status = PASS here. This helps prevent returning a false positive. -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Mon May 18 23:39:46 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 18 May 2009 16:39:46 -0700 Subject: [Libvirt-cim] [PATCH 3 of 5] Remove migration related super classes In-Reply-To: References: Message-ID: <278f70bf80e0b192f79d.1242689986@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242225647 25200 # Node ID 278f70bf80e0b192f79d9af225b161ea795e852a # Parent 1492b21f3650f7febcfcade71c75b58a6c25982f Remove migration related super classes These classes are now a part of the 2.21 schema. Also remove the TransportType attribute - this is part of the VirtualSystemMigrationSettingData class. The valuemap has changed, so the UNIX transport type needs to have a value of 32768 instead of 6. Add new parameters to migration methods - make these optional since we don't currently use them. Signed-off-by: Kaitlin Rupert diff -r 1492b21f3650 -r 278f70bf80e0 schema/VSMigrationCapabilities.mof --- a/schema/VSMigrationCapabilities.mof Wed May 13 07:40:47 2009 -0700 +++ b/schema/VSMigrationCapabilities.mof Wed May 13 07:40:47 2009 -0700 @@ -1,15 +1,5 @@ // Copyright IBM Corp. 2007 -class CIM_VirtualSystemMigrationCapabilities : CIM_Capabilities { - - uint16 DestinationHostFormatsSupported[]; - - uint16 SynchronousMethodsSupported[]; - - uint16 AsynchronousMethodsSupported[]; - -}; - [Provider("cmpi::Virt_VSMigrationCapabilities")] class Xen_VirtualSystemMigrationCapabilities : CIM_VirtualSystemMigrationCapabilities { }; diff -r 1492b21f3650 -r 278f70bf80e0 schema/VSMigrationService.mof --- a/schema/VSMigrationService.mof Wed May 13 07:40:47 2009 -0700 +++ b/schema/VSMigrationService.mof Wed May 13 07:40:47 2009 -0700 @@ -1,70 +1,5 @@ // Copyright IBM Corp. 2007 -// Placeholder definition until schema is available upstream - -class CIM_VirtualSystemMigrationService : CIM_Service { - uint32 CheckVirtualSystemIsMigratableToHost( - [In] - CIM_ComputerSystem REF ComputerSystem, - [In] - string DestinationHost, - [In, EmbeddedInstance("CIM_SettingData")] - string MigrationSettingData, - [In, EmbeddedInstance("CIM_VirtualSystemSettingData")] - string NewSystemSettingData, - [In, EmbeddedInstance("CIM_ResourceAllocationSettingData")] - string NewResourceSettingData[], - [Out] - boolean IsMigratable - ); - - uint32 CheckVirtualSystemIsMigratableToSystem( - [In] - CIM_ComputerSystem REF ComputerSystem, - [In] - CIM_System REF DestinationSystem, - [In, EmbeddedInstance("CIM_SettingData")] - string MigrationSettingData, - [In, EmbeddedInstance("CIM_VirtualSystemSettingData")] - string NewSystemSettingData, - [In, EmbeddedInstance("CIM_ResourceAllocationSettingData")] - string NewResourceSettingData[], - [Out] - boolean IsMigratable - ); - - - uint32 MigrateVirtualSystemToHost( - [In] - CIM_ComputerSystem REF ComputerSystem, - [In] - string DestinationHost, - [In, EmbeddedInstance("CIM_SettingData")] - string MigrationSettingData, - [In, EmbeddedInstance("CIM_VirtualSystemSettingData")] - string NewSystemSettingData, - [In, EmbeddedInstance("CIM_ResourceAllocationSettingData")] - string NewResourceSettingData[], - [Out] - CIM_ConcreteJob REF Job - ); - - uint32 MigrateVirtualSystemToSystem( - [In] - CIM_ComputerSystem REF ComputerSystem, - [In] - CIM_System REF DestinationSystem, - [In, EmbeddedInstance("CIM_SettingData")] - string MigrationSettingData, - [In, EmbeddedInstance("CIM_VirtualSystemSettingData")] - string NewSystemSettingData, - [In, EmbeddedInstance("CIM_ResourceAllocationSettingData")] - string NewResourceSettingData[], - [Out] - CIM_ConcreteJob REF Job - ); -}; - class Xen_MigrationJob : CIM_ConcreteJob { }; diff -r 1492b21f3650 -r 278f70bf80e0 schema/VSMigrationSettingData.mof --- a/schema/VSMigrationSettingData.mof Wed May 13 07:40:47 2009 -0700 +++ b/schema/VSMigrationSettingData.mof Wed May 13 07:40:47 2009 -0700 @@ -1,45 +1,16 @@ // Copyright IBM Corp. 2007 -class CIM_VirtualSystemMigrationSettingData : CIM_SettingData { - [ Description( - "MigrationType describes a type of migration operation " - "to be performed.\n" - "A value of 2 - Virtual System is to be migrated in a 'live' " - "manner such that the running of the Virtual System is " - "minimally impacted during the move.\n" - "A value of 3 - Virtual System will be temporarily paused " - "prior to migration and then resume running after it is " - "moved.\n" - "A value of 4 - The Virtual System will be quiesced to a " - "stopped state prior to migration and then restarted after " - "it is moved."), - ValueMap {"0","1","2","3","4"}, - Values { "Unknown", "Other", "Live", "Resume", "Restart" }] - uint16 MigrationType; - - uint16 Priority; -}; - [Provider("cmpi::Virt_VSMigrationSettingData")] class Xen_VirtualSystemMigrationSettingData : CIM_VirtualSystemMigrationSettingData { - [ ValueMap {"0","1","2","3","4","5","6"}, - Values { "Unknown", "Other", "SSH", "TLS", "TLS Strict", "TCP", "UNIX" }] - uint16 TransportType; string CheckParameters[]; }; [Provider("cmpi::Virt_VSMigrationSettingData")] class KVM_VirtualSystemMigrationSettingData : CIM_VirtualSystemMigrationSettingData { - [ ValueMap {"0","1","2","3","4","5","6"}, - Values { "Unknown", "Other", "SSH", "TLS", "TLS Strict", "TCP", "UNIX" }] - uint16 TransportType; string CheckParameters[]; }; [Provider("cmpi::Virt_VSMigrationSettingData")] class LXC_VirtualSystemMigrationSettingData : CIM_VirtualSystemMigrationSettingData { - [ ValueMap {"0","1","2","3","4","5","6"}, - Values { "Unknown", "Other", "SSH", "TLS", "TLS Strict", "TCP", "UNIX" }] - uint16 TransportType; string CheckParameters[]; }; diff -r 1492b21f3650 -r 278f70bf80e0 src/Virt_VSMigrationService.c --- a/src/Virt_VSMigrationService.c Wed May 13 07:40:47 2009 -0700 +++ b/src/Virt_VSMigrationService.c Wed May 13 07:40:47 2009 -0700 @@ -1510,6 +1510,8 @@ .args = {{"ComputerSystem", CMPI_ref, false}, {"DestinationHost", CMPI_string, false}, {"MigrationSettingData", CMPI_instance, true}, + {"NewSystemSettingData", CMPI_instance, true}, + {"NewResourceSettingData", CMPI_instanceA, true}, ARG_END } }; @@ -1520,6 +1522,8 @@ .args = {{"ComputerSystem", CMPI_ref, false}, {"DestinationSystem", CMPI_ref, false}, {"MigrationSettingData", CMPI_instance, true}, + {"NewSystemSettingData", CMPI_instance, true}, + {"NewResourceSettingData", CMPI_instanceA, true}, ARG_END } }; @@ -1530,6 +1534,8 @@ .args = {{"ComputerSystem", CMPI_ref, false}, {"DestinationHost", CMPI_string, false}, {"MigrationSettingData", CMPI_instance, true}, + {"NewSystemSettingData", CMPI_instance, true}, + {"NewResourceSettingData", CMPI_instanceA, true}, ARG_END } }; @@ -1540,6 +1546,8 @@ .args = {{"ComputerSystem", CMPI_ref, false}, {"DestinationSystem", CMPI_ref, false}, {"MigrationSettingData", CMPI_instance, true}, + {"NewSystemSettingData", CMPI_instance, true}, + {"NewResourceSettingData", CMPI_instanceA, true}, ARG_END } }; diff -r 1492b21f3650 -r 278f70bf80e0 src/Virt_VSMigrationSettingData.h --- a/src/Virt_VSMigrationSettingData.h Wed May 13 07:40:47 2009 -0700 +++ b/src/Virt_VSMigrationSettingData.h Wed May 13 07:40:47 2009 -0700 @@ -30,7 +30,7 @@ CIM_MIGRATE_URI_TLS = 3, CIM_MIGRATE_URI_TLS_STRICT = 4, CIM_MIGRATE_URI_TCP = 5, - CIM_MIGRATE_URI_UNIX = 6, + CIM_MIGRATE_URI_UNIX = 32768, } transport_type; CMPIStatus get_migration_sd(const CMPIObjectPath *ref, From kaitlin at linux.vnet.ibm.com Mon May 18 23:39:45 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 18 May 2009 16:39:45 -0700 Subject: [Libvirt-cim] [PATCH 2 of 5] Change the names of the cimv2 and interop regiration files In-Reply-To: References: Message-ID: <1492b21f3650f7febcfc.1242689985@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242225647 25200 # Node ID 1492b21f3650f7febcfcade71c75b58a6c25982f # Parent 9e56460627520ce02a57b854fb440eb26ba6aaff Change the names of the cimv2 and interop regiration files This is so the names more closely align with the name of the 2.21 zip file provided by the DMTF. Signed-off-by: Kaitlin Rupert diff -r 9e5646062752 -r 1492b21f3650 base_schema/cimv2.21.0-cimv2_mof --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/base_schema/cimv2.21.0-cimv2_mof Wed May 13 07:40:47 2009 -0700 @@ -0,0 +1,11 @@ +Qualifier IsPUnit : boolean = false, + Scope(property, method, parameter); + +Qualifier Experimental : boolean = false, + Scope(any), + Flavor(EnableOverride, Restricted); + +#pragma include ("Core/CIM_ResourcePool.mof") +#pragma include ("Core/CIM_HostedResourcePool.mof") +#pragma include ("Core/CIM_ElementCapabilities.mof") +#pragma include ("Core/CIM_HostedService.mof") diff -r 9e5646062752 -r 1492b21f3650 base_schema/cimv2.21.0-interop_mof --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/base_schema/cimv2.21.0-interop_mof Wed May 13 07:40:47 2009 -0700 @@ -0,0 +1,28 @@ +#pragma locale ("en_US") +#pragma include ("qualifiers.mof") +#pragma include ("qualifiers_optional.mof") +#pragma include ("Core/CIM_ManagedElement.mof") +#pragma include ("Core/CIM_ManagedSystemElement.mof") +#pragma include ("Core/CIM_LogicalElement.mof") +#pragma include ("Core/CIM_EnabledLogicalElement.mof") +#pragma include ("Core/CIM_System.mof") +#pragma include ("System/CIM_ComputerSystem.mof") +#pragma include ("Interop/CIM_RegisteredSpecification.mof") +#pragma include ("Interop/CIM_RegisteredProfile.mof") +#pragma include ("Interop/CIM_RegisteredSubProfile.mof") +#pragma include ("Core/CIM_Dependency.mof") +#pragma include ("Interop/CIM_ElementConformsToProfile.mof") +#pragma include ("Interop/CIM_ReferencedSpecification.mof") +#pragma include ("Interop/CIM_ReferencedProfile.mof") +#pragma include ("Interop/CIM_SubProfileRequiresProfile.mof") +#pragma include ("Core/CIM_Service.mof") +#pragma include ("Core/CIM_SettingData.mof") +#pragma include ("Core/CIM_VirtualSystemSettingData.mof") +#pragma include ("Core/CIM_LogicalDevice.mof") +#pragma include ("Core/CIM_ResourceAllocationSettingData.mof") +#pragma include ("Interop/CIM_Error.mof") +#pragma include ("Core/CIM_Job.mof") +#pragma include ("Core/CIM_ConcreteJob.mof") +#pragma include ("Core/CIM_ResourcePool.mof") +#pragma include ("Core/CIM_Capabilities.mof") +#pragma include ("Core/CIM_AllocationCapabilities.mof") diff -r 9e5646062752 -r 1492b21f3650 base_schema/cimv216-cimv2_mof --- a/base_schema/cimv216-cimv2_mof Wed May 13 07:40:47 2009 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,11 +0,0 @@ -Qualifier IsPUnit : boolean = false, - Scope(property, method, parameter); - -Qualifier Experimental : boolean = false, - Scope(any), - Flavor(EnableOverride, Restricted); - -#pragma include ("Core/CIM_ResourcePool.mof") -#pragma include ("Core/CIM_HostedResourcePool.mof") -#pragma include ("Core/CIM_ElementCapabilities.mof") -#pragma include ("Core/CIM_HostedService.mof") diff -r 9e5646062752 -r 1492b21f3650 base_schema/cimv216-interop_mof --- a/base_schema/cimv216-interop_mof Wed May 13 07:40:47 2009 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,26 +0,0 @@ -#pragma locale ("en_US") -#pragma include ("qualifiers.mof") -#pragma include ("qualifiers_optional.mof") -#pragma include ("Core/CIM_ManagedElement.mof") -#pragma include ("Core/CIM_ManagedSystemElement.mof") -#pragma include ("Core/CIM_LogicalElement.mof") -#pragma include ("Core/CIM_EnabledLogicalElement.mof") -#pragma include ("Core/CIM_System.mof") -#pragma include ("System/CIM_ComputerSystem.mof") -#pragma include ("Interop/CIM_RegisteredProfile.mof") -#pragma include ("Interop/CIM_RegisteredSubProfile.mof") -#pragma include ("Core/CIM_Dependency.mof") -#pragma include ("Interop/CIM_ElementConformsToProfile.mof") -#pragma include ("Interop/CIM_ReferencedProfile.mof") -#pragma include ("Interop/CIM_SubProfileRequiresProfile.mof") -#pragma include ("Core/CIM_Service.mof") -#pragma include ("Core/CIM_SettingData.mof") -#pragma include ("Core/CIM_VirtualSystemSettingData.mof") -#pragma include ("Core/CIM_LogicalDevice.mof") -#pragma include ("Core/CIM_ResourceAllocationSettingData.mof") -#pragma include ("Interop/CIM_Error.mof") -#pragma include ("Core/CIM_Job.mof") -#pragma include ("Core/CIM_ConcreteJob.mof") -#pragma include ("Core/CIM_ResourcePool.mof") -#pragma include ("Core/CIM_Capabilities.mof") -#pragma include ("Core/CIM_AllocationCapabilities.mof") From kaitlin at linux.vnet.ibm.com Mon May 18 23:39:47 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 18 May 2009 16:39:47 -0700 Subject: [Libvirt-cim] [PATCH 4 of 5] CRS attribute MaxConcurrentEnabledSAPs has changed to MaxCurrentEnabledSAPs In-Reply-To: References: Message-ID: <63db2ab071a4a4505393.1242689987@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242225647 25200 # Node ID 63db2ab071a4a4505393456de90217aaf8dc3a75 # Parent 278f70bf80e0b192f79d9af225b161ea795e852a CRS attribute MaxConcurrentEnabledSAPs has changed to MaxCurrentEnabledSAPs Fixing name change of attributes in ConsoleRedirectionService Signed-off-by: Kaitlin Rupert diff -r 278f70bf80e0 -r 63db2ab071a4 src/Virt_ConsoleRedirectionService.c --- a/src/Virt_ConsoleRedirectionService.c Wed May 13 07:40:47 2009 -0700 +++ b/src/Virt_ConsoleRedirectionService.c Wed May 13 07:40:47 2009 -0700 @@ -84,7 +84,7 @@ (CMPIValue *)&array, CMPI_uint16A); prop_val = (uint16_t)MAX_SAP_SESSIONS; - CMSetProperty(inst, "MaxConcurrentEnabledSAPs", + CMSetProperty(inst, "MaxCurrentEnabledSAPs", (CMPIValue *)&prop_val, CMPI_uint16); prop_val = (uint16_t)CIM_CRS_SHARING_MODE; From kaitlin at linux.vnet.ibm.com Mon May 18 23:39:43 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 18 May 2009 16:39:43 -0700 Subject: [Libvirt-cim] [PATCH 0 of 5] Migrate from scheam 2.16 to 2.21 Message-ID: This patchset provides the set of changes needed to upgrade to scheam 2.21. The easiest way to test this is to do the following: 1) make preuninstall 2) make uninstall 3) Recongifure your source tree to pick up the makefile changes 5) make 6) make preinstall 7) Restart your CIMOM (to be sure to pick up the new schema) 8) make install 9) make postinstall I've also testing this by uninstalling Pegasus / sfcb and removing the repository. Then reinstalling Pegasus / sfcb and doing the steps above. From kaitlin at linux.vnet.ibm.com Mon May 18 23:39:48 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 18 May 2009 16:39:48 -0700 Subject: [Libvirt-cim] [PATCH 5 of 5] Remove VSSS definition from implementation specific mof In-Reply-To: References: Message-ID: <853db606f33508671061.1242689988@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242689958 25200 # Node ID 853db606f335086710612450a93bfd67c1293922 # Parent 63db2ab071a4a4505393456de90217aaf8dc3a75 Remove VSSS definition from implementation specific mof The VirtualSystemSnapshotService class is available in the 2.21 schema. Signed-off-by: Kaitlin Rupert diff -r 63db2ab071a4 -r 853db606f335 schema/VirtualSystemSnapshotService.mof --- a/schema/VirtualSystemSnapshotService.mof Wed May 13 07:40:47 2009 -0700 +++ b/schema/VirtualSystemSnapshotService.mof Mon May 18 16:39:18 2009 -0700 @@ -1,59 +1,5 @@ // Copyright IBM Corp. 2008 -[Description ( - "Service to create, apply and destroy snapshots of virtual systems." - )] -class Virt_VirtualSystemSnapshotService: CIM_VirtualSystemSnapshotService { - [Override, Description ( "Creates a snapshot of a virtual system." ), - ValueMap { "0", "1", "2", "3", "4", "5", "6", "..", "4096", - "4097..32767", "32768..65535" }, - Values { "Completed with No Error", "Not Supported", - "Failed", "Timeout", "Invalid Parameter", "Invalid State", - "Invalid Type", "DMTF Reserved", - "Method Parameters Checked - Job Started", - "Method Reserved", "Vendor Specific" }] - uint32 CreateSnapshot( - [In, Description ( - "Reference to the affected virtual system." )] - CIM_ComputerSystem REF AffectedSystem, - [In, Description ( "Parameter settings." ), - EmbeddedInstance ( "CIM_SettingData" )] - string SnapshotSettings, - [In, Description ( - "Requested snapshot type:\n" - "Full Snapshot: Complete snapshot of the virtual system.\n" - "Disk Snapshot: Snapshot of virtual system disks.\n" - "Memory Snapshot: Snapshot of virtual system memory only. " - "The virtual system remains active after memory snapshot " - "is complete.\n" - "Memory Snapshot Terminal: Snapshot of virtual system " - "memory only. The virtual system is transitioned to " - "disabled state after the memory snapshot is complete.\n" ), - ValueMap { "2", "3", "..", "32768", "32769", "32770..65535" }, - Values { "Full Snapshot", "Disk Snapshot", - "DMTF Reserved", "Memory Snapshot", "Memory Snapshot Terminal", - "Vendor Specific" }, - ModelCorrespondence { - "CIM_VirtualSystemSnapshotCapabilities.SnapshotTypesEnabled", - "CIM_VirtualSystemSnapshotServiceCapabilities.SnapshotTypesSupported" }] - uint16 SnapshotType, - [In, Out, Description ( - "Resulting virtual system snapshot" )] - CIM_VirtualSystemSettingData REF ResultingSnapshot, - [In, Out, Description ( - "If the operation is long running, then optionally " - "a job may be returned. In this case, the instance " - "of the CIM_VirtualSystemSettingData class " - "representing the new virtual system snapshot is " - "presented via the CIM_AffectedJobElement " - "association with the value of the AffectedElement " - "property referring to the new instance of the " - "CIM_VirtualSystemSettingData class representing " - "the virtual system snapshot and and the value of " - "the ElementEffects set to 5 (Create)." )] - CIM_ConcreteJob REF Job); -}; - class Xen_VirtualSystemSnapshotService : Virt_VirtualSystemSnapshotService { }; class KVM_VirtualSystemSnapshotService : Virt_VirtualSystemSnapshotService { }; class LXC_VirtualSystemSnapshotService : Virt_VirtualSystemSnapshotService { }; From kaitlin at linux.vnet.ibm.com Mon May 18 23:43:25 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 18 May 2009 16:43:25 -0700 Subject: [Libvirt-cim] [PATCH] Update SAE to use the proper association attributes Message-ID: <61b3f36a8a0294c916b7.1242690205@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242689960 25200 # Node ID 61b3f36a8a0294c916b778774e58319fbf24b4b5 # Parent 853db606f335086710612450a93bfd67c1293922 Update SAE to use the proper association attributes Signed-off-by: Kaitlin Rupert diff -r 853db606f335 -r 61b3f36a8a02 src/Virt_ServiceAffectsElement.c --- a/src/Virt_ServiceAffectsElement.c Mon May 18 16:39:18 2009 -0700 +++ b/src/Virt_ServiceAffectsElement.c Mon May 18 16:39:20 2009 -0700 @@ -136,7 +136,7 @@ LIBVIRT_CIM_DEFAULT_MAKEREF() -static char* antecedent[] = { +static char* affected_ele[] = { "Xen_ComputerSystem", "KVM_ComputerSystem", "LXC_ComputerSystem", @@ -149,7 +149,7 @@ NULL }; -static char* dependent[] = { +static char* affecting_ele[] = { "Xen_ConsoleRedirectionService", "KVM_ConsoleRedirectionService", "LXC_ConsoleRedirectionService", @@ -164,11 +164,11 @@ }; static struct std_assoc _cs_to_service = { - .source_class = (char**)&antecedent, - .source_prop = "Antecedent", + .source_class = (char**)&affected_ele, + .source_prop = "AffectedElement", - .target_class = (char**)&dependent, - .target_prop = "Dependent", + .target_class = (char**)&affecting_ele, + .target_prop = "AffectingElement", .assoc_class = (char**)&assoc_classname, @@ -177,11 +177,11 @@ }; static struct std_assoc _service_to_cs = { - .source_class = (char**)&dependent, - .source_prop = "Dependent", + .source_class = (char**)&affecting_ele, + .source_prop = "AffectingElement", - .target_class = (char**)&antecedent, - .target_prop = "Antecedent", + .target_class = (char**)&affected_ele, + .target_prop = "AffectedElement", .assoc_class = (char**)&assoc_classname, From kaitlin at linux.vnet.ibm.com Mon May 18 23:39:44 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Mon, 18 May 2009 16:39:44 -0700 Subject: [Libvirt-cim] [PATCH 1 of 5] Use 2.21 schema instead of 2.16 In-Reply-To: References: Message-ID: <9e56460627520ce02a57.1242689984@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242225647 25200 # Node ID 9e56460627520ce02a57b854fb440eb26ba6aaff # Parent f1b8c4c8e95b1e74e77a0b2930215bb18efd0aaa Use 2.21 schema instead of 2.16 Instead of pulling 2.16 from the DMTF site, pull down 2.21. Also, the format of the DMTF zip file has changed slightly. These are the necessary formatting changes. Signed-off-by: Kaitlin Rupert diff -r f1b8c4c8e95b -r 9e5646062752 base_schema/Makefile.am --- a/base_schema/Makefile.am Fri May 08 10:53:01 2009 -0700 +++ b/base_schema/Makefile.am Wed May 13 07:40:47 2009 -0700 @@ -1,12 +1,13 @@ -CIM_SCHEMA_VER = 216 +CIM_SCHEMA_VER = 2.21.0 +CIM_SCHEMA_DIR = 2210 CIM_SCHEMA_REL = $(CIM_SCHEMA_VER)Experimental -CIM_SCHEMA_ZIP = cimv$(CIM_SCHEMA_REL)-MOFs.zip +CIM_SCHEMA_ZIP = cim_schema_$(CIM_SCHEMA_REL)-MOFs.zip -dist_pkgdata_DATA = $(CIM_SCHEMA_ZIP) fix_schema.patch cimv216-interop_mof \ - cimv216-cimv2_mof +dist_pkgdata_DATA = $(CIM_SCHEMA_ZIP) cimv2.21.0-interop_mof \ + cimv2.21.0-cimv2_mof dist_pkgdata_SCRIPTS = install_base_schema.sh $(CIM_SCHEMA_ZIP): - wget http://www.dmtf.org/standards/cim/cim_schema_v$(CIM_SCHEMA_VER)/$(CIM_SCHEMA_ZIP) + wget http://www.dmtf.org/standards/cim/cim_schema_v$(CIM_SCHEMA_DIR)/$(CIM_SCHEMA_ZIP) EXTRA_DIST = README.DMTF diff -r f1b8c4c8e95b -r 9e5646062752 base_schema/install_base_schema.sh.in --- a/base_schema/install_base_schema.sh.in Fri May 08 10:53:01 2009 -0700 +++ b/base_schema/install_base_schema.sh.in Wed May 13 07:40:47 2009 -0700 @@ -3,7 +3,7 @@ DATA="$1" NS=@CIM_VIRT_NS@ CIMOM=@CIMSERVER@ -SCHEMA_VERSION="2.16" +SCHEMA_VERSION="2.21.0" TMPDIR=$(mktemp -d /tmp/cim_schema.XXXXX) chmod a+x $TMPDIR @@ -16,13 +16,12 @@ fi unpack_schema() { - cd ${TMPDIR} && unzip ${DATA}/cimv*-MOFs.zip + cd ${TMPDIR} && unzip ${DATA}/cim_schema_*-MOFs.zip } fix_schema() { - (cd ${TMPDIR} && patch -p0 < ${DATA}/fix_schema.patch) - cp -a ${DATA}/cimv216-interop_mof ${TMPDIR}/cimv216-interop.mof - cp -a ${DATA}/cimv216-cimv2_mof ${TMPDIR}/cimv216-cimv2.mof + cp -a ${DATA}/cimv2.21.0-interop_mof ${TMPDIR}/cimv2.21.0-interop.mof + cp -a ${DATA}/cimv2.21.0-cimv2_mof ${TMPDIR}/cimv2.21.0-cimv2.mof } @@ -70,11 +69,11 @@ cd ${TMPDIR} - cimmofl -uc -aEV -R$repo -n $NS cimv???.mof + cimmofl -uc -aEV -R$repo -n $NS cim_schema_?.??.?.mof cimmofl -uc -aEV -R$repo -n $NS qualifiers.mof cimmofl -uc -aEV -R$repo -n $NS qualifiers_optional.mof - cimmofl -uc -aEV -R$repo -n /root/interop cimv???-interop.mof - cimmofl -uc -aEV -R$repo -n /root/cimv2 cimv???-cimv2.mof + cimmofl -uc -aEV -R$repo -n /root/interop cimv?.??.?-interop.mof + cimmofl -uc -aEV -R$repo -n /root/cimv2 cimv?.??.?-cimv2.mof } install_schema_sfcb() { @@ -87,7 +86,7 @@ return fi - mv ${TMPDIR}/cimv???.mof ${TMPDIR}/CIM_Schema.mof + mv ${TMPDIR}/cim_schema_?.??.?.mof ${TMPDIR}/CIM_Schema.mof cp -ra ${TMPDIR}/* ${dir}/CIM sfcbrepos -f } From deeptik at linux.vnet.ibm.com Tue May 19 05:22:28 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Tue, 19 May 2009 10:52:28 +0530 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Update RPCS/04 with the latest updatesof pool verification In-Reply-To: <4A11987F.6080101@linux.vnet.ibm.com> References: <4A114817.8020306@linux.vnet.ibm.com> <4A11987F.6080101@linux.vnet.ibm.com> Message-ID: <4A124214.8070704@linux.vnet.ibm.com> Kaitlin Rupert wrote: >>> Below are the part code of libvirt-cim provider. I tried to define >>> the test_mode as [0, 1, 2], but it > > What failure are you seeing? This works for me.. here's my pywbem > script: > > from pywbem import WBEMConnection, CIMInstanceName, CIMInstance, > cim_types > > c = WBEMConnection("http://localhost", > ("root", "pass"), > "root/virt") > > c.debug = True > > iname = CIMInstanceName('KVM_NetPoolResourceAllocationSettingData', > namespace='root/virt',keybindings = {'InstanceID':'DiskPool/meep-net'}) > > rasd = CIMInstance('KVM_NetPoolResourceAllocationSettingData', > path=iname, properties={"Address":"192.168.0.4", > "Netmask":"255.255.254.0", "IPRangeStart":"192.168.0.5", > "IPRangeEnd":"192.168.0.27", "ForwardMode":cim_types.Uint16(2), > "ForwardDevice":"eth1"}) > > rasds = [rasd.tomof()] > > print rasds > > res = c.InvokeMethod("CreateChildResourcePool", > "KVM_ResourcePoolConfigurationService", > Settings=rasds, > ElementName="meep-net") > > print c.last_request > > >>> fails yet. How to set the test_mode in cimtest for different types? >>> >>> >>> if (cu_get_u16_prop(inst, "ForwardMode", &type) != >>> CMPI_RC_OK) { >>> pool->pool_info.net.forward_mode = strdup("nat"); >>> } else { >>> free(pool->pool_info.net.forward_mode); >>> >>> switch (type) { >>> case NETPOOL_FORWARD_NONE: >>> pool->pool_info.net.forward_mode = NULL; >>> break; >>> case NETPOOL_FORWARD_NAT: >>> pool->pool_info.net.forward_mode = >>> strdup("nat"); >>> break; >>> case NETPOOL_FORWARD_ROUTED: >>> pool->pool_info.net.forward_mode = >>> strdup("route"); >>> break; >>> default: >>> return "Storage pool type not supported"; >>> >> I checked this too in Virt_RPCS.c file.. >> Here is the XML that is generated for the route type: >> xmlgen.c(981): Created pool XML: >> >> testpool >> >> >> >> >> >> >> >> >> >> Seems like only the if condition is getting executed for some reason. >> >> Daisy, >> >> In your test case I think we need to specify pool_attr["ForwardMode"] >> = "route" and pool_attr["ForwardDevice"] = "eth1" for route type. >> > > The ForwardDevice is a uint16 - it needs to take a number. If you > look at the schema: > > [Description ("Network pool forwarding mode"), > ValueMap {"0", "1", "2"}, > Values {"None", "NAT", "Routed"}] > uint16 ForwardMode; > > In Daisy's test, she is using strings, but it should be the following: > > > pool_attr["ForwardMode"] = 2 #This will give you a route pool type. Oh! Thanks so much. Yup! I had tried with the numbers as well which had not worked. May be I missed something. From yunguol at cn.ibm.com Tue May 19 06:04:52 2009 From: yunguol at cn.ibm.com (yunguol at cn.ibm.com) Date: Mon, 18 May 2009 23:04:52 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] #3 Update RPCS/04 with the latest updatesof pool verification Message-ID: <848683648c95f9367486.1242713092@localhost.localdomain> # HG changeset patch # User Yun Guo Lian # Date 1242713086 25200 # Node ID 848683648c95f9367486db4d027b1d93683561dd # Parent 6dc2d815e480237c91115cd0d86f6325503e33f7 [TEST] #3 Update RPCS/04 with the latest updatesof pool verification Updates from 2 to 3: Pull the NetPoolRASDs from get_pool_rasds() instead of setting the properties appropriately and then create netpool for each types Tested for KVM with current sources Signed-off-by: Guolian Yun Message-ID: libvirt-cim-bounces at redhat.com wrote on 2009-05-19 01:24:35: > > +test_mode = ["None", "nat", "route eth1"] > > If you want to set the forward device as eth1, you'll need to use the > ForwardDevice attribute. See the schema (or the template NetPoolRASDs) > for more info. > > > + for i in range(0, len(test_mode)): > > + pool_attr["ForwardMode"] = test_mode[i] > > ForwardMode needs to be an int, not a string. Really, you should be > using the template NetPoolRASD for this. These have the values set > appropriately. > > You can call get_pool_rasds(), and then pull the RASD you want from the > list that is returned. Then you won't have to worry about setting the > properties appropriately. Good idea. I get an array of NetPoolRASDs from get_pool_rasds, which including all net types. And then create them by create_netpool. This tc pass for me now. Thanks!! > > > + > > + status = create_netpool(options.ip, options.virt, > > + test_pool, pool_attr) > > + if status != PASS: > > + logger.error("Error in networkpool creation") > > return FAIL > > - > > - logger.error("The execution should not have reached here!!") > > - return FAIL > > + > > + status = verify_pool(options.ip, options.virt, np, > > + test_pool, pool_attr) > > + if status != PASS: > > + logger.error("Error in networkpool verification") > > + destroy_netpool(options.ip, options.virt, test_pool) > > + undefine_netpool(options.ip, options.virt, test_pool) > > + return FAIL > > + > > + status = destroy_netpool(options.ip, options.virt, test_pool) > > + if status != PASS: > > + logger.error("Unable to destroy networkpool %s", test_pool) > > + return FAIL > > + > > + status = undefine_netpool(options.ip, options.virt, test_pool) > > + if status != PASS: > > + logger.error("Unable to undefine networkpool %s", test_pool) > > + return FAIL > > Instead of setting PASS at the top of the test case, set status = PASS > here. This helps prevent returning a false positive. > > -- > Kaitlin Rupert > IBM Linux Technology Center > kaitlin at linux.vnet.ibm.com > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -------------- next part -------------- An HTML attachment was scrubbed... URL: From deeptik at linux.vnet.ibm.com Tue May 19 06:33:32 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Tue, 19 May 2009 12:03:32 +0530 Subject: [Libvirt-cim] [PATCH] [TEST] #3 Update RPCS/04 with the latest updatesof pool verification In-Reply-To: <848683648c95f9367486.1242713092@localhost.localdomain> References: <848683648c95f9367486.1242713092@localhost.localdomain> Message-ID: <4A1252BC.1030607@linux.vnet.ibm.com> yunguol at cn.ibm.com wrote: > # HG changeset patch > # User Yun Guo Lian > # Date 1242713086 25200 > # Node ID 848683648c95f9367486db4d027b1d93683561dd > # Parent 6dc2d815e480237c91115cd0d86f6325503e33f7 > [TEST] #3 Update RPCS/04 with the latest updatesof pool verification > > > Updates from 2 to 3: > Pull the NetPoolRASDs from get_pool_rasds() instead of setting the > properties appropriately and then create netpool for each types > > Tested for KVM with current sources > Signed-off-by: Guolian Yun > diff -r 6dc2d815e480 -r 848683648c95 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py > --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Sun May 17 23:34:58 2009 -0700 > +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Mon May 18 23:04:46 2009 -0700 > @@ -39,45 +39,70 @@ > # OUT -- Error -- String -- Encoded error instance if the operation > # failed and did not return a job > # > -# REVISIT : > -# -------- > -# As of now the CreateChildResourcePool() simply throws an Exception. > -# We must improve this tc once the service is implemented. > -# > -# -Date: 20.02.2008 > - > +# Exception details before Revision 837 > +# ----- > +# Error code: CIM_ERR_NOT_SUPPORTED > +# > +# After revision 837, the service is implemented > +# > +# -Date: 20.02.2008 > > import sys > -import pywbem > -from XenKvmLib import rpcs_service > +import random > from CimTest.Globals import logger > from CimTest.ReturnCodes import FAIL, PASS > from XenKvmLib.const import do_main, platform_sup > from XenKvmLib.classes import get_typed_class > +from XenKvmLib.common_util import destroy_netpool > +from XenKvmLib.pool import create_netpool, verify_pool, undefine_netpool > > -cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED > -cim_mname = "CreateChildResourcePool" > +test_pool = "testpool" > > @do_main(platform_sup) > def main(): > options = main.options > - rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \ > - "ResourcePoolConfigurationService"))(options.ip) > - try: > - rpcs_conn.CreateChildResourcePool() > - except pywbem.CIMError, (err_no, desc): > - if err_no == cim_errno : > - logger.info("Got expected exception for '%s' service", cim_mname) > - logger.info("Errno is '%s' ", err_no) > - logger.info("Error string is '%s'", desc) > - return PASS > - else: > - logger.error("Unexpected rc code %s and description %s\n", > - err_no, desc) > - return FAIL > - > - logger.error("The execution should not have reached here!!") > - return FAIL > + > + np = get_typed_class(options.virt, 'NetworkPool') > + np_id = "NetworkPool/%s" % test_pool > + > + subnet = '192.168.0.' > + ip_base = random.randint(1, 100) > + addr = subnet+'%d' % ip_base > + range_addr_start = subnet+'%d' % (ip_base + 1) > + range_addr_end = subnet+'%d' %(ip_base + 10) > + pool_attr = { > + "Address" : addr, > + "Netmask" : "255.255.255.0", > + "IPRangeStart" : range_addr_start, > + "IPRangeEnd" : range_addr_end > + } > + > + status = create_netpool(options.ip, options.virt, > + test_pool, pool_attr) > + if status != PASS: > + logger.error("Error in networkpool creation") > + return FAIL > + > + status = verify_pool(options.ip, options.virt, np, > + test_pool, pool_attr) > + if status != PASS: > + logger.error("Error in networkpool verification") > + destroy_netpool(options.ip, options.virt, test_pool) > + undefine_netpool(options.ip, options.virt, test_pool) > + return FAIL > + > + status = destroy_netpool(options.ip, options.virt, test_pool) > + if status != PASS: > + logger.error("Unable to destroy networkpool %s", test_pool) > + return FAIL > + > + status = undefine_netpool(options.ip, options.virt, test_pool) > + if status != PASS: > + logger.error("Unable to undefine networkpool %s", test_pool) > + return FAIL > + > + status = PASS > + return status > No need to assign status separately. > + > if __name__ == "__main__": > sys.exit(main()) > - > diff -r 6dc2d815e480 -r 848683648c95 suites/libvirt-cim/lib/XenKvmLib/pool.py > --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Sun May 17 23:34:58 2009 -0700 > +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Mon May 18 23:04:46 2009 -0700 > @@ -177,15 +177,16 @@ > logger.error("We can not get NetPoolRASDs") > return FAIL > else: > - net_pool_rasds[0]['PoolID'] = "NetworkPool/%s" % test_pool > - for attr, val in pool_attr_list.iteritems(): > - net_pool_rasds[0][attr] = val > - > - pool_settings = inst_to_mof(net_pool_rasds[0]) > + pool_settings = [] > + for i in range(0, len(net_pool_rasds)): > + net_pool_rasds[i]['PoolID'] = "NetworkPool/%s" % test_pool > + for attr, val in pool_attr_list.iteritems(): > + net_pool_rasds[i][attr] = val > + pool_settings.append(inst_to_mof(net_pool_rasds[i])) > The above changes is creating only an Isolated type of Pool. I think Kaitlin wanted you to use the NetPoolRASD in the 04_RPCS test case and then call CreateChildResourcePool() for each of the NetPoolRASD's and verify them. Sending all the NetPoolRASD at once will not verify each of the Pool types. > > try: > rpcs_conn.CreateChildResourcePool(ElementName=test_pool, > - Settings=[pool_settings]) > + Settings=pool_settings) > except Exception, details: > logger.error("Error in childpool creation") > logger.error(details) > diff -r 6dc2d815e480 -r 848683648c95 suites/libvirt-cim/lib/XenKvmLib/vxml.py > --- a/suites/libvirt-cim/lib/XenKvmLib/vxml.py Sun May 17 23:34:58 2009 -0700 > +++ b/suites/libvirt-cim/lib/XenKvmLib/vxml.py Mon May 18 23:04:46 2009 -0700 > @@ -274,13 +274,11 @@ > def xml_get_netpool_attr_list(self): > pool_attr_list = [] > > - npoolmode = self.get_value_xpath('/network/forward/@mode') > npooladdr = self.get_value_xpath('/network/ip/@address') > npoolmask = self.get_value_xpath('/network/ip/@netmask') > npoolstart = self.get_value_xpath('/network/ip/dhcp/range/@start') > npoolend = self.get_value_xpath('/network/ip/dhcp/range/@end') > > - pool_attr_list.append(npoolmode) > pool_attr_list.append(npooladdr) > pool_attr_list.append(npoolmask) > pool_attr_list.append(npoolstart) > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim > From yunguol at cn.ibm.com Tue May 19 08:12:32 2009 From: yunguol at cn.ibm.com (yunguol at cn.ibm.com) Date: Tue, 19 May 2009 01:12:32 -0700 Subject: [Libvirt-cim] [PATCH] [TEST]Add new tc to validate that the Disk child pool can be deleted through the providers Message-ID: <0666f518db98e4406521.1242720752@localhost.localdomain> # HG changeset patch # User Yun Guo Lian # Date 1242720742 25200 # Node ID 0666f518db98e4406521f1a618318a98aead6974 # Parent 6dc2d815e480237c91115cd0d86f6325503e33f7 [TEST]Add new tc to validate that the Disk child pool can be deleted through the providers Tested for KVM with current sources Signed-off-by: Guolian Yun diff -r 6dc2d815e480 -r 0666f518db98 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py Tue May 19 01:12:22 2009 -0700 @@ -0,0 +1,122 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Guolian Yun +# +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# This test case should test the DeleteResourcePool service +# supplied by the RPCS provider. +# The DeleteResourcePool is used to delete a resource pool. +# DeleteResourcePool() details: +# Input +# ----- +# IN -- Pool -- CIM_ResourcePool REF -- The resource pool to delete +# +# Output +# ------ +# OUT -- Job -- CIM_ConcreteJob REF -- Returned job if started +# OUT -- Error-- String -- Encoded error instance if the operation +# failed and did not return a job. +# +# Exception details before Revision 841 +# ----- +# Error code: CIM_ERR_NOT_SUPPORTED +# +# After revision 841, the service is implemented +# +# -Date: 19.05.2009 + +import sys +import pywbem +from XenKvmLib import rpcs_service +from CimTest.Globals import logger +from CimTest.ReturnCodes import FAIL, PASS +from XenKvmLib.const import do_main, platform_sup, get_provider_version +from XenKvmLib.enumclass import EnumInstances, EnumNames +from XenKvmLib.classes import get_typed_class +from XenKvmLib.pool import create_netpool, verify_pool + +cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED +cim_mname = "DeleteResourcePool" +libvirt_cim_child_pool_rev = 841 +test_pool = "pool" + + at do_main(platform_sup) +def main(): + status = FAIL + options = main.options + rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \ + "ResourcePoolConfigurationService"))(options.ip) + curr_cim_rev, changeset = get_provider_version(options.virt, options.ip) + if curr_cim_rev < libvirt_cim_child_pool_rev: + try: + rpcs_conn.DeleteResourcePool() + except pywbem.CIMError, (err_no, desc): + if err_no == cim_errno : + logger.info("Got expected exception for '%s' service", cim_mname) + logger.info("Errno is '%s' ", err_no) + logger.info("Error string is '%s'", desc) + return PASS + else: + logger.error("Unexpected rc code %s and description %s\n", + err_no, desc) + return FAIL + elif curr_cim_rev >= libvirt_cim_child_pool_rev: + pool_attr = { + "Path" : "/tmp" + } + dp = get_typed_class(options.virt, 'DiskPool') + dp_id = "DiskPool/%s" % test_pool + + status = create_netpool(options.ip, options.virt, test_pool, pool_attr, + pool_type="DiskPool") + if status != PASS: + logger.error("Error in diskpool creation") + return FAIL + + status = verify_pool(options.ip, options.virt, dp, + test_pool, pool_attr, + pool_type="DiskPool") + if status != PASS: + logger.error("Error in diskpool verification") + destroy_netpool(options.ip, options.virt, test_pool) + return FAIL + + pool = EnumNames(options.ip, dp) + for i in range(0, len(pool)): + ret_pool = pool[i].keybindings['InstanceID'] + if ret_pool == dp_id: + pool_settings = pool[i] + break + try: + rpcs_conn.DeleteResourcePool(Pool = pool_settings) + pool = EnumInstances(options.ip, dp) + for i in range(0, len(pool)): + ret_pool = pool[i].InstanceID + if ret_pool == dp_id: + raise Exception("Failed to delete %s" % test_pool) + status = PASS + except Exception, details: + logger.error(details) + return FAIL + + return status + +if __name__ == "__main__": + sys.exit(main()) diff -r 6dc2d815e480 -r 0666f518db98 suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Sun May 17 23:34:58 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Tue May 19 01:12:22 2009 -0700 @@ -32,7 +32,7 @@ from XenKvmLib import rpcs_service import pywbem from CimTest.CimExt import CIMClassMOF -from XenKvmLib.vxml import NetXML +from XenKvmLib.vxml import NetXML, PoolXML cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED cim_mname = "CreateChildResourcePool" @@ -105,12 +105,12 @@ return volume -def get_pool_rasds(server, virt): +def get_pool_rasds(server, virt, pool_type="NetworkPool"): net_pool_rasds = [] ac_cn = get_typed_class(virt, "AllocationCapabilities") an_cn = get_typed_class(virt, "SettingsDefineCapabilities") - key_list = {"InstanceID" : "NetworkPool/0" } + key_list = {"InstanceID" : "%s/0" % pool_type} try: inst = GetInstance(server, ac_cn, key_list) @@ -144,7 +144,8 @@ return PASS -def create_netpool(server, virt, test_pool, pool_attr_list): +def create_netpool(server, virt, test_pool, pool_attr_list, + pool_type="NetworkPool"): status = PASS rpcs = get_typed_class(virt, "ResourcePoolConfigurationService") rpcs_conn = eval("rpcs_service." + rpcs)(server) @@ -162,22 +163,23 @@ logger.error("Unexpected rc code %s and description %s\n", err_no, desc) return FAIL - elif curr_cim_rev >= libvirt_cim_child_pool_rev: - n_list = net_list(server, virt) - for _net_name in n_list: - net_xml = NetXML(server=server, networkname=_net_name, - virt=virt, is_new_net=False) - pool_use_attr = net_xml.xml_get_netpool_attr_list() - if pool_attr_list['Address'] in pool_use_attr: - logger.error("IP address is in use by a different network") - return FAIL + elif curr_cim_rev >= libvirt_cim_child_pool_rev: + if pool_type == "NetworkPool": + n_list = net_list(server, virt) + for _net_name in n_list: + net_xml = NetXML(server=server, networkname=_net_name, + virt=virt, is_new_net=False) + pool_use_attr = net_xml.xml_get_netpool_attr_list() + if pool_attr_list['Address'] in pool_use_attr: + logger.error("IP address is in use by a different network") + return FAIL - net_pool_rasds = get_pool_rasds(server, virt) + net_pool_rasds = get_pool_rasds(server, virt, pool_type) if len(net_pool_rasds) == 0: - logger.error("We can not get NetPoolRASDs") + logger.error("We can not get PoolRASDs") return FAIL else: - net_pool_rasds[0]['PoolID'] = "NetworkPool/%s" % test_pool + net_pool_rasds[0]['PoolID'] = "%s/%s" % (pool_type, test_pool) for attr, val in pool_attr_list.iteritems(): net_pool_rasds[0][attr] = val @@ -194,7 +196,8 @@ return status -def verify_pool(server, virt, pooltype, poolname, pool_attr_list): +def verify_pool(server, virt, pooltype, poolname, pool_attr_list, + pool_type="NetworkPool"): status = FAIL pool_list = EnumInstances(server, pooltype) if len(pool_list) < 1: @@ -202,16 +205,20 @@ len(pool_list)) return FAIL - poolid = "NetworkPool/%s" % poolname + poolid = "%s/%s" % (pool_type, poolname) for i in range(0, len(pool_list)): ret_pool = pool_list[i].InstanceID if ret_pool != poolid: continue - net_xml = NetXML(server, virt=virt, networkname=poolname, - is_new_net=False) - ret_pool_attr_list = net_xml.xml_get_netpool_attr_list() - + if pool_type == "NetworkPool": + net_xml = NetXML(server, virt=virt, networkname=poolname, + is_new_net=False) + ret_pool_attr_list = net_xml.xml_get_netpool_attr_list() + elif pool_type == "DiskPool": + disk_xml = PoolXML(server ,virt=virt, poolname=poolname, + is_new_pool=False) + ret_pool_attr_list = disk_xml.xml_get_pool_attr_list() for i in range(0, len(ret_pool_attr_list)): if ret_pool_attr_list[i] not in pool_attr_list.itervalues(): logger.error("Got error when parsing %s", ret_pool_attr_list[i]) diff -r 6dc2d815e480 -r 0666f518db98 suites/libvirt-cim/lib/XenKvmLib/vxml.py --- a/suites/libvirt-cim/lib/XenKvmLib/vxml.py Sun May 17 23:34:58 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/vxml.py Tue May 19 01:12:22 2009 -0700 @@ -291,7 +291,7 @@ class PoolXML(Virsh, XMLClass): def __init__(self, server, poolname=const.default_pool_name, - virt='xen'): + virt='xen', is_new_pool=True): XMLClass.__init__(self) if virt == 'XenFV': @@ -300,6 +300,17 @@ self.pool_name = poolname self.server = server + if is_new_pool is False: + cmd = "virsh pool-dumpxml %s" % self.pool_name + s, disk_xml = utils.run_remote(server, cmd) + if s != 0: + logger.error("Encounter error dump netxml") + return None + else: + self.xml_string = disk_xml + self.xdoc = minidom.parseString(self.xml_string) + return + pool = self.add_sub_node(self.xdoc, 'pool', type='dir') self.add_sub_node(pool, 'name', self.pool_name) target = self.add_sub_node(pool, 'target') @@ -315,6 +326,12 @@ dpoolname = self.get_value_xpath('/pool/name') return dpoolname + def xml_get_pool_attr_list(self): + pool_attr_list = [] + poolpath = self.get_value_xpath('/pool/target/path') + pool_attr_list.append(poolpath) + + return pool_attr_list class VirtXML(Virsh, XMLClass): """Base class for all XML generation & operation""" From deeptik at linux.vnet.ibm.com Tue May 19 09:12:57 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Tue, 19 May 2009 14:42:57 +0530 Subject: [Libvirt-cim] [PATCH] [TEST]Add new tc to validate that the Disk child pool can be deleted through the providers In-Reply-To: <0666f518db98e4406521.1242720752@localhost.localdomain> References: <0666f518db98e4406521.1242720752@localhost.localdomain> Message-ID: <4A127819.1060602@linux.vnet.ibm.com> yunguol at cn.ibm.com wrote: > # HG changeset patch > # User Yun Guo Lian > # Date 1242720742 25200 > # Node ID 0666f518db98e4406521f1a618318a98aead6974 > # Parent 6dc2d815e480237c91115cd0d86f6325503e33f7 > [TEST]Add new tc to validate that the Disk child pool can be deleted through the providers > > > Tested for KVM with current sources > Signed-off-by: Guolian Yun > > diff -r 6dc2d815e480 -r 0666f518db98 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py Tue May 19 01:12:22 2009 -0700 > @@ -0,0 +1,122 @@ > +#!/usr/bin/python > +# > +# Copyright 2009 IBM Corp. > +# > +# Authors: > +# Guolian Yun > +# > +# > +# This library is free software; you can redistribute it and/or > +# modify it under the terms of the GNU General Public > +# License as published by the Free Software Foundation; either > +# version 2.1 of the License, or (at your option) any later version. > +# > +# This library is distributed in the hope that it will be useful, > +# but WITHOUT ANY WARRANTY; without even the implied warranty of > +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > +# General Public License for more details. > +# > +# You should have received a copy of the GNU General Public > +# License along with this library; if not, write to the Free Software > +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA > +# > +# This test case should test the DeleteResourcePool service > +# supplied by the RPCS provider. > +# The DeleteResourcePool is used to delete a resource pool. > +# DeleteResourcePool() details: > +# Input > +# ----- > +# IN -- Pool -- CIM_ResourcePool REF -- The resource pool to delete > +# > +# Output > +# ------ > +# OUT -- Job -- CIM_ConcreteJob REF -- Returned job if started > +# OUT -- Error-- String -- Encoded error instance if the operation > +# failed and did not return a job. > +# > +# Exception details before Revision 841 > +# ----- > +# Error code: CIM_ERR_NOT_SUPPORTED > +# > +# After revision 841, the service is implemented > +# > +# -Date: 19.05.2009 > + > +import sys > +import pywbem > +from XenKvmLib import rpcs_service > +from CimTest.Globals import logger > +from CimTest.ReturnCodes import FAIL, PASS > +from XenKvmLib.const import do_main, platform_sup, get_provider_version > +from XenKvmLib.enumclass import EnumInstances, EnumNames > +from XenKvmLib.classes import get_typed_class > +from XenKvmLib.pool import create_netpool, verify_pool > + > +cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED > +cim_mname = "DeleteResourcePool" > +libvirt_cim_child_pool_rev = 841 > +test_pool = "pool" > + > + at do_main(platform_sup) > +def main(): > + status = FAIL > + options = main.options > + rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \ > + "ResourcePoolConfigurationService"))(options.ip) > + curr_cim_rev, changeset = get_provider_version(options.virt, options.ip) > + if curr_cim_rev < libvirt_cim_child_pool_rev: > + try: > + rpcs_conn.DeleteResourcePool() > + except pywbem.CIMError, (err_no, desc): > + if err_no == cim_errno : > + logger.info("Got expected exception for '%s' service", cim_mname) > + logger.info("Errno is '%s' ", err_no) > + logger.info("Error string is '%s'", desc) > + return PASS > + else: > + logger.error("Unexpected rc code %s and description %s\n", > + err_no, desc) > + return FAIL > + elif curr_cim_rev >= libvirt_cim_child_pool_rev: > + pool_attr = { > + "Path" : "/tmp" > + } > + dp = get_typed_class(options.virt, 'DiskPool') > + dp_id = "DiskPool/%s" % test_pool > + > + status = create_netpool(options.ip, options.virt, test_pool, pool_attr, > + pool_type="DiskPool") > you can rename create_netpool() to something generic if you are planning to use for network and diskpool > + if status != PASS: > + logger.error("Error in diskpool creation") > + return FAIL > + > + status = verify_pool(options.ip, options.virt, dp, > + test_pool, pool_attr, > + pool_type="DiskPool") > + if status != PASS: > + logger.error("Error in diskpool verification") > + destroy_netpool(options.ip, options.virt, test_pool) > + return FAIL > + > + pool = EnumNames(options.ip, dp) > + for i in range(0, len(pool)): > + ret_pool = pool[i].keybindings['InstanceID'] > + if ret_pool == dp_id: > + pool_settings = pool[i] > + break > Please initialize the pool_settings, otherwise we will get an exception with pool_settings will not be set if ret_pool != dp_id as pool_settings will not be set. > + try: > + rpcs_conn.DeleteResourcePool(Pool = pool_settings) > + pool = EnumInstances(options.ip, dp) > + for i in range(0, len(pool)): > + ret_pool = pool[i].InstanceID > + if ret_pool == dp_id: > + raise Exception("Failed to delete %s" % test_pool) > Need to destroy the pool if its found before returning failure. > + status = PASS > + except Exception, details: > + logger.error(details) > + return FAIL > + > + return status > + > +if __name__ == "__main__": > + sys.exit(main()) > diff -r 6dc2d815e480 -r 0666f518db98 suites/libvirt-cim/lib/XenKvmLib/pool.py > --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Sun May 17 23:34:58 2009 -0700 > +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Tue May 19 01:12:22 2009 -0700 > @@ -32,7 +32,7 @@ > from XenKvmLib import rpcs_service > import pywbem > from CimTest.CimExt import CIMClassMOF > -from XenKvmLib.vxml import NetXML > +from XenKvmLib.vxml import NetXML, PoolXML > > cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED > cim_mname = "CreateChildResourcePool" > @@ -105,12 +105,12 @@ > > return volume > > -def get_pool_rasds(server, virt): > +def get_pool_rasds(server, virt, pool_type="NetworkPool"): > net_pool_rasds = [] > > ac_cn = get_typed_class(virt, "AllocationCapabilities") > an_cn = get_typed_class(virt, "SettingsDefineCapabilities") > - key_list = {"InstanceID" : "NetworkPool/0" } > + key_list = {"InstanceID" : "%s/0" % pool_type} > > try: > inst = GetInstance(server, ac_cn, key_list) > @@ -144,7 +144,8 @@ > > return PASS > > -def create_netpool(server, virt, test_pool, pool_attr_list): > +def create_netpool(server, virt, test_pool, pool_attr_list, > + pool_type="NetworkPool"): > status = PASS > rpcs = get_typed_class(virt, "ResourcePoolConfigurationService") > rpcs_conn = eval("rpcs_service." + rpcs)(server) > @@ -162,22 +163,23 @@ > logger.error("Unexpected rc code %s and description %s\n", > err_no, desc) > return FAIL > - elif curr_cim_rev >= libvirt_cim_child_pool_rev: > - n_list = net_list(server, virt) > - for _net_name in n_list: > - net_xml = NetXML(server=server, networkname=_net_name, > - virt=virt, is_new_net=False) > - pool_use_attr = net_xml.xml_get_netpool_attr_list() > - if pool_attr_list['Address'] in pool_use_attr: > - logger.error("IP address is in use by a different network") > - return FAIL > + elif curr_cim_rev >= libvirt_cim_child_pool_rev: > + if pool_type == "NetworkPool": > + n_list = net_list(server, virt) > + for _net_name in n_list: > + net_xml = NetXML(server=server, networkname=_net_name, > + virt=virt, is_new_net=False) > + pool_use_attr = net_xml.xml_get_netpool_attr_list() > + if pool_attr_list['Address'] in pool_use_attr: > + logger.error("IP address is in use by a different network") > + return FAIL > > - net_pool_rasds = get_pool_rasds(server, virt) > + net_pool_rasds = get_pool_rasds(server, virt, pool_type) > if len(net_pool_rasds) == 0: > - logger.error("We can not get NetPoolRASDs") > + logger.error("We can not get PoolRASDs") > return FAIL > else: > - net_pool_rasds[0]['PoolID'] = "NetworkPool/%s" % test_pool > + net_pool_rasds[0]['PoolID'] = "%s/%s" % (pool_type, test_pool) > for attr, val in pool_attr_list.iteritems(): > net_pool_rasds[0][attr] = val > These changes conflict with the latest changes submitted for the "#3 Update RPCS/04 with the latest updatesof pool verification". Are you planning to modify "#3 Update RPCS/04 with the latest updatesof pool verification" to use the above changes as well. > > @@ -194,7 +196,8 @@ > return status > > > -def verify_pool(server, virt, pooltype, poolname, pool_attr_list): > +def verify_pool(server, virt, pooltype, poolname, pool_attr_list, > + pool_type="NetworkPool"): > status = FAIL > pool_list = EnumInstances(server, pooltype) > if len(pool_list) < 1: > @@ -202,16 +205,20 @@ > len(pool_list)) > return FAIL > > - poolid = "NetworkPool/%s" % poolname > + poolid = "%s/%s" % (pool_type, poolname) > for i in range(0, len(pool_list)): > ret_pool = pool_list[i].InstanceID > if ret_pool != poolid: > continue > > - net_xml = NetXML(server, virt=virt, networkname=poolname, > - is_new_net=False) > - ret_pool_attr_list = net_xml.xml_get_netpool_attr_list() > - > + if pool_type == "NetworkPool": > + net_xml = NetXML(server, virt=virt, networkname=poolname, > + is_new_net=False) > + ret_pool_attr_list = net_xml.xml_get_netpool_attr_list() > + elif pool_type == "DiskPool": > + disk_xml = PoolXML(server ,virt=virt, poolname=poolname, > + is_new_pool=False) > + ret_pool_attr_list = disk_xml.xml_get_pool_attr_list() > for i in range(0, len(ret_pool_attr_list)): > if ret_pool_attr_list[i] not in pool_attr_list.itervalues(): > logger.error("Got error when parsing %s", ret_pool_attr_list[i]) > diff -r 6dc2d815e480 -r 0666f518db98 suites/libvirt-cim/lib/XenKvmLib/vxml.py > --- a/suites/libvirt-cim/lib/XenKvmLib/vxml.py Sun May 17 23:34:58 2009 -0700 > +++ b/suites/libvirt-cim/lib/XenKvmLib/vxml.py Tue May 19 01:12:22 2009 -0700 > @@ -291,7 +291,7 @@ > class PoolXML(Virsh, XMLClass): > > def __init__(self, server, poolname=const.default_pool_name, > - virt='xen'): > + virt='xen', is_new_pool=True): > > XMLClass.__init__(self) > if virt == 'XenFV': > @@ -300,6 +300,17 @@ > self.pool_name = poolname > self.server = server > > + if is_new_pool is False: > + cmd = "virsh pool-dumpxml %s" % self.pool_name > + s, disk_xml = utils.run_remote(server, cmd) > + if s != 0: > + logger.error("Encounter error dump netxml") > + return None > + else: > + self.xml_string = disk_xml > + self.xdoc = minidom.parseString(self.xml_string) > + return > + > pool = self.add_sub_node(self.xdoc, 'pool', type='dir') > self.add_sub_node(pool, 'name', self.pool_name) > target = self.add_sub_node(pool, 'target') > @@ -315,6 +326,12 @@ > dpoolname = self.get_value_xpath('/pool/name') > return dpoolname > > + def xml_get_pool_attr_list(self): > + pool_attr_list = [] > + poolpath = self.get_value_xpath('/pool/target/path') > + pool_attr_list.append(poolpath) > + > + return pool_attr_list > > class VirtXML(Virsh, XMLClass): > """Base class for all XML generation & operation""" > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim > From yunguol at cn.ibm.com Tue May 19 10:21:35 2009 From: yunguol at cn.ibm.com (yunguol at cn.ibm.com) Date: Tue, 19 May 2009 03:21:35 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] #4 Update RPCS/04 with the latest updates of pool verification Message-ID: <8de9a3fab292ea47563b.1242728495@localhost.localdomain> # HG changeset patch # User Yun Guo Lian # Date 1242728480 25200 # Node ID 8de9a3fab292ea47563bc3c3617c7a8f9ea6753b # Parent 6dc2d815e480237c91115cd0d86f6325503e33f7 [TEST] #4 Update RPCS/04 with the latest updates of pool verification Tested for KVM with current sources Signed-off-by: Guolian Yun Message-ID: libvirt-cim-bounces at redhat.com wrote on 2009-05-19 17:12:57: > > > yunguol at cn.ibm.com wrote: > > # HG changeset patch > > # User Yun Guo Lian > > # Date 1242720742 25200 > > # Node ID 0666f518db98e4406521f1a618318a98aead6974 > > # Parent 6dc2d815e480237c91115cd0d86f6325503e33f7 > > [TEST]Add new tc to validate that the Disk child pool can be > deleted through the providers > > > > > > Tested for KVM with current sources > > Signed-off-by: Guolian Yun > > > > diff -r 6dc2d815e480 -r 0666f518db98 suites/libvirt- > cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py > > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > > +++ b/suites/libvirt- > cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py > Tue May 19 01:12:22 2009 -0700 > > @@ -0,0 +1,122 @@ > > +#!/usr/bin/python > > +# > > +# Copyright 2009 IBM Corp. > > +# > > +# Authors: > > +# Guolian Yun > > +# > > +# > > +# This library is free software; you can redistribute it and/or > > +# modify it under the terms of the GNU General Public > > +# License as published by the Free Software Foundation; either > > +# version 2.1 of the License, or (at your option) any later version. > > +# > > +# This library is distributed in the hope that it will be useful, > > +# but WITHOUT ANY WARRANTY; without even the implied warranty of > > +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > > +# General Public License for more details. > > +# > > +# You should have received a copy of the GNU General Public > > +# License along with this library; if not, write to the Free Software > > +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA > > +# > > +# This test case should test the DeleteResourcePool service > > +# supplied by the RPCS provider. > > +# The DeleteResourcePool is used to delete a resource pool. > > +# DeleteResourcePool() details: > > +# Input > > +# ----- > > +# IN -- Pool -- CIM_ResourcePool REF -- The resource pool to delete > > +# > > +# Output > > +# ------ > > +# OUT -- Job -- CIM_ConcreteJob REF -- Returned job if started > > +# OUT -- Error-- String -- Encoded error instance if the operation > > +# failed and did not return a job. > > +# > > +# Exception details before Revision 841 > > +# ----- > > +# Error code: CIM_ERR_NOT_SUPPORTED > > +# > > +# After revision 841, the service is implemented > > +# > > +# -Date: 19.05.2009 > > + > > +import sys > > +import pywbem > > +from XenKvmLib import rpcs_service > > +from CimTest.Globals import logger > > +from CimTest.ReturnCodes import FAIL, PASS > > +from XenKvmLib.const import do_main, platform_sup, get_provider_version > > +from XenKvmLib.enumclass import EnumInstances, EnumNames > > +from XenKvmLib.classes import get_typed_class > > +from XenKvmLib.pool import create_netpool, verify_pool > > + > > +cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED > > +cim_mname = "DeleteResourcePool" > > +libvirt_cim_child_pool_rev = 841 > > +test_pool = "pool" > > + > > + at do_main(platform_sup) > > +def main(): > > + status = FAIL > > + options = main.options > > + rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \ > > + "ResourcePoolConfigurationService"))(options.ip) > > + curr_cim_rev, changeset = get_provider_version(options.virt, > options.ip) > > + if curr_cim_rev < libvirt_cim_child_pool_rev: > > + try: > > + rpcs_conn.DeleteResourcePool() > > + except pywbem.CIMError, (err_no, desc): > > + if err_no == cim_errno : > > + logger.info("Got expected exception for '%s' > service", cim_mname) > > + logger.info("Errno is '%s' ", err_no) > > + logger.info("Error string is '%s'", desc) > > + return PASS > > + else: > > + logger.error("Unexpected rc code %s and description %s\n", > > + err_no, desc) > > + return FAIL > > + elif curr_cim_rev >= libvirt_cim_child_pool_rev: > > + pool_attr = { > > + "Path" : "/tmp" > > + } > > + dp = get_typed_class(options.virt, 'DiskPool') > > + dp_id = "DiskPool/%s" % test_pool > > + > > + status = create_netpool(options.ip, options.virt, > test_pool, pool_attr, > > + pool_type="DiskPool") > > > you can rename create_netpool() to something generic if you are planning > to use for network and diskpool > > + if status != PASS: > > + logger.error("Error in diskpool creation") > > + return FAIL > > + > > + status = verify_pool(options.ip, options.virt, dp, > > + test_pool, pool_attr, > > + pool_type="DiskPool") > > + if status != PASS: > > + logger.error("Error in diskpool verification") > > + destroy_netpool(options.ip, options.virt, test_pool) > > + return FAIL > > + > > + pool = EnumNames(options.ip, dp) > > + for i in range(0, len(pool)): > > + ret_pool = pool[i].keybindings['InstanceID'] > > + if ret_pool == dp_id: > > + pool_settings = pool[i] > > + break > > > Please initialize the pool_settings, otherwise we will get an exception > with pool_settings will not be set if ret_pool != dp_id as pool_settings > will not be set. > > + try: > > + rpcs_conn.DeleteResourcePool(Pool = pool_settings) > > + pool = EnumInstances(options.ip, dp) > > + for i in range(0, len(pool)): > > + ret_pool = pool[i].InstanceID > > + if ret_pool == dp_id: > > + raise Exception("Failed to delete %s" % test_pool) > > > Need to destroy the pool if its found before returning failure. > > + status = PASS > > + except Exception, details: > > + logger.error(details) > > + return FAIL > > + > > + return status > > + > > +if __name__ == "__main__": > > + sys.exit(main()) > > diff -r 6dc2d815e480 -r 0666f518db98 suites/libvirt- > cim/lib/XenKvmLib/pool.py > > --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Sun May 17 23:34: > 58 2009 -0700 > > +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Tue May 19 01:12: > 22 2009 -0700 > > @@ -32,7 +32,7 @@ > > from XenKvmLib import rpcs_service > > import pywbem > > from CimTest.CimExt import CIMClassMOF > > -from XenKvmLib.vxml import NetXML > > +from XenKvmLib.vxml import NetXML, PoolXML > > > > cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED > > cim_mname = "CreateChildResourcePool" > > @@ -105,12 +105,12 @@ > > > > return volume > > > > -def get_pool_rasds(server, virt): > > +def get_pool_rasds(server, virt, pool_type="NetworkPool"): > > net_pool_rasds = [] > > > > ac_cn = get_typed_class(virt, "AllocationCapabilities") > > an_cn = get_typed_class(virt, "SettingsDefineCapabilities") > > - key_list = {"InstanceID" : "NetworkPool/0" } > > + key_list = {"InstanceID" : "%s/0" % pool_type} > > > > try: > > inst = GetInstance(server, ac_cn, key_list) > > @@ -144,7 +144,8 @@ > > > > return PASS > > > > -def create_netpool(server, virt, test_pool, pool_attr_list): > > +def create_netpool(server, virt, test_pool, pool_attr_list, > > + pool_type="NetworkPool"): > > status = PASS > > rpcs = get_typed_class(virt, "ResourcePoolConfigurationService") > > rpcs_conn = eval("rpcs_service." + rpcs)(server) > > @@ -162,22 +163,23 @@ > > logger.error("Unexpected rc code %s and description %s\n", > > err_no, desc) > > return FAIL > > - elif curr_cim_rev >= libvirt_cim_child_pool_rev: > > - n_list = net_list(server, virt) > > - for _net_name in n_list: > > - net_xml = NetXML(server=server, networkname=_net_name, > > - virt=virt, is_new_net=False) > > - pool_use_attr = net_xml.xml_get_netpool_attr_list() > > - if pool_attr_list['Address'] in pool_use_attr: > > - logger.error("IP address is in use by a differentnetwork") > > - return FAIL > > + elif curr_cim_rev >= libvirt_cim_child_pool_rev: > > + if pool_type == "NetworkPool": > > + n_list = net_list(server, virt) > > + for _net_name in n_list: > > + net_xml = NetXML(server=server, networkname=_net_name, > > + virt=virt, is_new_net=False) > > + pool_use_attr = net_xml.xml_get_netpool_attr_list() > > + if pool_attr_list['Address'] in pool_use_attr: > > + logger.error("IP address is in use by a > different network") > > + return FAIL > > > > - net_pool_rasds = get_pool_rasds(server, virt) > > + net_pool_rasds = get_pool_rasds(server, virt, pool_type) > > if len(net_pool_rasds) == 0: > > - logger.error("We can not get NetPoolRASDs") > > + logger.error("We can not get PoolRASDs") > > return FAIL > > else: > > - net_pool_rasds[0]['PoolID'] = "NetworkPool/%s" % test_pool > > + net_pool_rasds[0]['PoolID'] = "%s/%s" % (pool_type, test_pool) > > for attr, val in pool_attr_list.iteritems(): > > net_pool_rasds[0][attr] = val > > > These changes conflict with the latest changes submitted for the "#3 > Update RPCS/04 with the latest updatesof pool verification". > Are you planning to modify "#3 Update RPCS/04 with the latest updatesof > pool verification" to use the above changes as well. I send out a new patch of RPCS/04. To avoid conflict, I will rework on this patch when RPCS/04 patch is applied. Thanks! > > > > @@ -194,7 +196,8 @@ > > return status > > > > > > -def verify_pool(server, virt, pooltype, poolname, pool_attr_list): > > +def verify_pool(server, virt, pooltype, poolname, pool_attr_list, > > + pool_type="NetworkPool"): > > status = FAIL > > pool_list = EnumInstances(server, pooltype) > > if len(pool_list) < 1: > > @@ -202,16 +205,20 @@ > > len(pool_list)) > > return FAIL > > > > - poolid = "NetworkPool/%s" % poolname > > + poolid = "%s/%s" % (pool_type, poolname) > > for i in range(0, len(pool_list)): > > ret_pool = pool_list[i].InstanceID > > if ret_pool != poolid: > > continue > > > > - net_xml = NetXML(server, virt=virt, networkname=poolname, > > - is_new_net=False) > > - ret_pool_attr_list = net_xml.xml_get_netpool_attr_list() > > - > > + if pool_type == "NetworkPool": > > + net_xml = NetXML(server, virt=virt, networkname=poolname, > > + is_new_net=False) > > + ret_pool_attr_list = net_xml.xml_get_netpool_attr_list() > > + elif pool_type == "DiskPool": > > + disk_xml = PoolXML(server ,virt=virt, poolname=poolname, > > + is_new_pool=False) > > + ret_pool_attr_list = disk_xml.xml_get_pool_attr_list() > > for i in range(0, len(ret_pool_attr_list)): > > if ret_pool_attr_list[i] not in pool_attr_list.itervalues(): > > logger.error("Got error when parsing %s", > ret_pool_attr_list[i]) > > diff -r 6dc2d815e480 -r 0666f518db98 suites/libvirt- > cim/lib/XenKvmLib/vxml.py > > --- a/suites/libvirt-cim/lib/XenKvmLib/vxml.py Sun May 17 23:34: > 58 2009 -0700 > > +++ b/suites/libvirt-cim/lib/XenKvmLib/vxml.py Tue May 19 01:12: > 22 2009 -0700 > > @@ -291,7 +291,7 @@ > > class PoolXML(Virsh, XMLClass): > > > > def __init__(self, server, poolname=const.default_pool_name, > > - virt='xen'): > > + virt='xen', is_new_pool=True): > > > > XMLClass.__init__(self) > > if virt == 'XenFV': > > @@ -300,6 +300,17 @@ > > self.pool_name = poolname > > self.server = server > > > > + if is_new_pool is False: > > + cmd = "virsh pool-dumpxml %s" % self.pool_name > > + s, disk_xml = utils.run_remote(server, cmd) > > + if s != 0: > > + logger.error("Encounter error dump netxml") > > + return None > > + else: > > + self.xml_string = disk_xml > > + self.xdoc = minidom.parseString(self.xml_string) > > + return > > + > > pool = self.add_sub_node(self.xdoc, 'pool', type='dir') > > self.add_sub_node(pool, 'name', self.pool_name) > > target = self.add_sub_node(pool, 'target') > > @@ -315,6 +326,12 @@ > > dpoolname = self.get_value_xpath('/pool/name') > > return dpoolname > > > > + def xml_get_pool_attr_list(self): > > + pool_attr_list = [] > > + poolpath = self.get_value_xpath('/pool/target/path') > > + pool_attr_list.append(poolpath) > > + > > + return pool_attr_list > > > > class VirtXML(Virsh, XMLClass): > > """Base class for all XML generation & operation""" > > > > _______________________________________________ > > Libvirt-cim mailing list > > Libvirt-cim at redhat.com > > https://www.redhat.com/mailman/listinfo/libvirt-cim > > > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -------------- next part -------------- An HTML attachment was scrubbed... URL: From deeptik at linux.vnet.ibm.com Tue May 19 10:38:56 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Tue, 19 May 2009 16:08:56 +0530 Subject: [Libvirt-cim] [PATCH] [TEST] #4 Update RPCS/04 with the latest updates of pool verification In-Reply-To: <8de9a3fab292ea47563b.1242728495@localhost.localdomain> References: <8de9a3fab292ea47563b.1242728495@localhost.localdomain> Message-ID: <4A128C40.5060106@linux.vnet.ibm.com> +1 :) -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From yunguol at cn.ibm.com Tue May 19 10:59:46 2009 From: yunguol at cn.ibm.com (Guo Lian Yun) Date: Tue, 19 May 2009 18:59:46 +0800 Subject: [Libvirt-cim] Test Run Summary (May 19 2009): KVM on Fedora release 10 (Cambridge) with sfcb Message-ID: ================================================= Test Run Summary (May 19 2009): KVM on Fedora release 10 (Cambridge) with sfcb ================================================= Distro: Fedora release 10 (Cambridge) Kernel: 2.6.27.15-170.2.24.fc10.x86_64 libvirt: 0.4.5 Hypervisor: QEMU 0.9.1 CIMOM: sfcb sfcbd 1.3.4preview Libvirt-cim revision: 875 Libvirt-cim changeset: cde25ad65c74 Cimtest revision: 683 Cimtest changeset: 6dc2d815e480 ================================================= FAIL : 5 XFAIL : 4 SKIP : 9 PASS : 135 ----------------- Total : 153 ================================================= FAIL Test Summary: ComputerSystemIndication - 01_created_indication.py: FAIL HostSystem - 03_hs_to_settdefcap.py: FAIL ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: FAIL ResourcePoolConfigurationService - 07_DeleteResourcePool.py: FAIL SettingsDefineCapabilities - 01_forward.py: FAIL ================================================= XFAIL Test Summary: ComputerSystem - 32_start_reboot.py: XFAIL ComputerSystem - 33_suspend_reboot.py: XFAIL VirtualSystemManagementService - 09_procrasd_persist.py: XFAIL VirtualSystemManagementService - 16_removeresource.py: XFAIL ================================================= SKIP Test Summary: ComputerSystem - 02_nosystems.py: SKIP LogicalDisk - 02_nodevs.py: SKIP VSSD - 02_bootldr.py: SKIP VirtualSystemMigrationService - 01_migratable_host.py: SKIP VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP ================================================= Full report: -------------------------------------------------------------------- AllocationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- AllocationCapabilities - 02_alloccap_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 01_enum.py: PASS -------------------------------------------------------------------- ComputerSystem - 02_nosystems.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- ComputerSystem - 03_defineVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 04_defineStartVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 05_activate_defined_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 06_paused_active_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 22_define_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 23_pause_pause.py: PASS -------------------------------------------------------------------- ComputerSystem - 27_define_pause_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 32_start_reboot.py: XFAIL ERROR - Got CIM error Unable to reboot domain: this function is not supported by the hypervisor: virDomainReboot with return code 1 ERROR - Exception: Unable reboot dom 'cs_test_domain' InvokeMethod(RequestStateChange): Unable to reboot domain: this function is not supported by the hypervisor: virDomainReboot Bug:<00005> -------------------------------------------------------------------- ComputerSystem - 33_suspend_reboot.py: XFAIL ERROR - Got CIM error State not supported with return code 7 ERROR - Exception: Unable Suspend dom 'test_domain' InvokeMethod(RequestStateChange): State not supported Bug:<00012> -------------------------------------------------------------------- ComputerSystem - 35_start_reset.py: PASS -------------------------------------------------------------------- ComputerSystem - 40_RSC_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 41_cs_to_settingdefinestate.py: PASS -------------------------------------------------------------------- ComputerSystem - 42_cs_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystemIndication - 01_created_indication.py: FAIL ERROR - Exception : Request Failed: 200 Traceback (most recent call last): File "./lib/XenKvmLib/const.py", line 139, in do_try File "01_created_indication.py", line 146, in main sub_list, ind_names, dict = sub_ind(ip, virt) File "01_created_indication.py", line 60, in sub_ind sub.subscribe(dict['default_url'], dict['default_auth']) File "/data/users/daisy/cimtest/suites/libvirt-cim/lib/XenKvmLib/indication_tester.py", line 345, in subscribe "CreateInstance", auth_hdr) File "/data/users/daisy/cimtest/suites/libvirt-cim/lib/XenKvmLib/indication_tester.py", line 330, in __do_cimpost (resp.status, resp.reason)) Exception: Request Failed: 200 ERROR - None -------------------------------------------------------------------- ElementAllocatedFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 03_reverse_errs.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 04_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 01_forward.py: PASS -------------------------------------------------------------------- ElementCapabilities - 02_reverse.py: PASS -------------------------------------------------------------------- ElementCapabilities - 03_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 05_hostsystem_cap.py: PASS -------------------------------------------------------------------- ElementConforms - 01_forward.py: PASS -------------------------------------------------------------------- ElementConforms - 02_reverse.py: PASS -------------------------------------------------------------------- ElementConforms - 03_ectp_fwd_errs.py: PASS -------------------------------------------------------------------- ElementConforms - 04_ectp_rev_errs.py: PASS -------------------------------------------------------------------- ElementSettingData - 01_forward.py: PASS -------------------------------------------------------------------- ElementSettingData - 03_esd_assoc_with_rasd_errs.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 02_elecap_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 01_enum.py: PASS -------------------------------------------------------------------- HostSystem - 02_hostsystem_to_rasd.py: PASS -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - 'KVM_SettingsDefineCapabilities' returned 8 RASD objects instead of 4 Class not found -------------------------------------------------------------------- HostSystem - 04_hs_to_EAPF.py: PASS -------------------------------------------------------------------- HostSystem - 05_hs_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 06_hs_to_vsms.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 01_forward.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 01_forward.py: PASS -------------------------------------------------------------------- HostedDependency - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 03_enabledstate.py: PASS -------------------------------------------------------------------- HostedDependency - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedResourcePool - 01_forward.py: PASS -------------------------------------------------------------------- HostedResourcePool - 02_reverse.py: PASS -------------------------------------------------------------------- HostedResourcePool - 03_forward_errs.py: PASS -------------------------------------------------------------------- HostedResourcePool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedService - 01_forward.py: PASS -------------------------------------------------------------------- HostedService - 02_reverse.py: PASS -------------------------------------------------------------------- HostedService - 03_forward_errs.py: PASS -------------------------------------------------------------------- HostedService - 04_reverse_errs.py: PASS -------------------------------------------------------------------- KVMRedirectionSAP - 01_enum_KVMredSAP.py: PASS -------------------------------------------------------------------- LogicalDisk - 01_disk.py: PASS -------------------------------------------------------------------- LogicalDisk - 02_nodevs.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- LogicalDisk - 03_ld_gi_errs.py: PASS -------------------------------------------------------------------- Memory - 01_memory.py: PASS -------------------------------------------------------------------- Memory - 02_defgetmem.py: PASS -------------------------------------------------------------------- Memory - 03_mem_gi_errs.py: PASS -------------------------------------------------------------------- NetworkPort - 01_netport.py: PASS -------------------------------------------------------------------- NetworkPort - 02_np_gi_errors.py: PASS -------------------------------------------------------------------- NetworkPort - 03_user_netport.py: PASS -------------------------------------------------------------------- Processor - 01_processor.py: PASS -------------------------------------------------------------------- Processor - 02_definesys_get_procs.py: PASS -------------------------------------------------------------------- Processor - 03_proc_gi_errs.py: PASS -------------------------------------------------------------------- Profile - 01_enum.py: PASS -------------------------------------------------------------------- Profile - 02_profile_to_elec.py: PASS -------------------------------------------------------------------- Profile - 03_rprofile_gi_errs.py: PASS -------------------------------------------------------------------- RASD - 01_verify_rasd_fields.py: PASS -------------------------------------------------------------------- RASD - 02_enum.py: PASS -------------------------------------------------------------------- RASD - 03_rasd_errs.py: PASS -------------------------------------------------------------------- RASD - 04_disk_rasd_size.py: PASS -------------------------------------------------------------------- RASD - 05_disk_rasd_emu_type.py: PASS -------------------------------------------------------------------- RedirectionService - 01_enum_crs.py: PASS -------------------------------------------------------------------- RedirectionService - 02_enum_crscap.py: PASS -------------------------------------------------------------------- RedirectionService - 03_RedirectionSAP_errs.py: PASS -------------------------------------------------------------------- ReferencedProfile - 01_verify_refprof.py: PASS -------------------------------------------------------------------- ReferencedProfile - 02_refprofile_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 03_forward_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 05_RAPF_err.py: PASS -------------------------------------------------------------------- ResourcePool - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePool - 02_rp_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 02_rpcc_gi_errs.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 02_rcps_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 03_CreateResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: FAIL ERROR - Unexpected rc code 4 and description One or more parameter values passed to the method were invalid InvokeMethod(CreateChildResourcePool): One or more parameter values passed to the method were invalid -------------------------------------------------------------------- ResourcePoolConfigurationService - 05_AddResourcesToResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 06_RemoveResourcesFromResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 07_DeleteResourcePool.py: FAIL ERROR - Error in childpool creation ERROR - (1, u'*** Provider Virt_ResourcePoolConfigurationService(7207) exiting due to a SIGSEGV signal ') ERROR - Error in networkpool creation InvokeMethod(CreateChildResourcePool): *** Provider Virt_ResourcePoolConfigurationService(7207) exiting due to a SIGSEGV signal -------------------------------------------------------------------- ServiceAccessBySAP - 01_forward.py: PASS -------------------------------------------------------------------- ServiceAccessBySAP - 02_reverse.py: PASS -------------------------------------------------------------------- ServiceAffectsElement - 01_forward.py: PASS -------------------------------------------------------------------- ServiceAffectsElement - 02_reverse.py: PASS -------------------------------------------------------------------- SettingsDefine - 01_forward.py: PASS -------------------------------------------------------------------- SettingsDefine - 02_reverse.py: PASS -------------------------------------------------------------------- SettingsDefine - 03_sds_fwd_errs.py: PASS -------------------------------------------------------------------- SettingsDefine - 04_sds_rev_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 01_forward.py: FAIL ERROR - KVM_SettingsDefineCapabilities returned 8 ResourcePool objects instead of 4 -------------------------------------------------------------------- SettingsDefineCapabilities - 03_forward_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 04_forward_vsmsdata.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 05_reverse_vsmcap.py: PASS -------------------------------------------------------------------- SystemDevice - 01_forward.py: PASS -------------------------------------------------------------------- SystemDevice - 02_reverse.py: PASS -------------------------------------------------------------------- SystemDevice - 03_fwderrs.py: PASS -------------------------------------------------------------------- VSSD - 01_enum.py: PASS -------------------------------------------------------------------- VSSD - 02_bootldr.py: SKIP -------------------------------------------------------------------- VSSD - 03_vssd_gi_errs.py: PASS -------------------------------------------------------------------- VSSD - 04_vssd_to_rasd.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 02_vsmcap_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 01_definesystem_name.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 02_destroysystem.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 03_definesystem_ess.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 04_definesystem_ers.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 05_destroysystem_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 06_addresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 07_addresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 08_modifyresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 09_procrasd_persist.py: XFAIL -------------------------------------------------------------------- VirtualSystemManagementService - 10_hv_version.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 11_define_memrasdunits.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 12_referenced_config.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 13_refconfig_additional_devs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 14_define_sys_disk.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 15_mod_system_settings.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 16_removeresource.py: XFAIL ERROR - 0 RASD insts for domain/mouse:ps2 No such instance (no device domain/mouse:ps2) Bug:<00014> -------------------------------------------------------------------- VirtualSystemManagementService - 17_removeresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 02_vsmc_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationService - 01_migratable_host.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 02_vsmsd_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 01_forward.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 02_reverse.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 03_vssdc_fwd_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 04_vssdc_rev_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 02_vs_sservice_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 02_vs_sservicecap_gi_errs.py: PASS -------------------------------------------------------------------- -------------- next part -------------- An HTML attachment was scrubbed... URL: From yunguol at cn.ibm.com Tue May 19 11:18:56 2009 From: yunguol at cn.ibm.com (Guo Lian Yun) Date: Tue, 19 May 2009 19:18:56 +0800 Subject: [Libvirt-cim] Test Run Summary (May 19 2009): KVM on Fedora release 10.90 (Rawhide) with Pegasus Message-ID: ================================================= Test Run Summary (May 19 2009): KVM on Fedora release 10.90 (Rawhide) with Pegasus ================================================= Distro: Fedora release 10.90 (Rawhide) Kernel: 2.6.29-0.38.rc1.git4.fc11.x86_64 libvirt: 0.6.2 Hypervisor: QEMU 0.10.0 CIMOM: Pegasus 2.7.2 Libvirt-cim revision: 875 Libvirt-cim changeset: cde25ad65c74+ Cimtest revision: 683 Cimtest changeset: 6dc2d815e480 ================================================= FAIL : 3 XFAIL : 3 SKIP : 9 PASS : 138 ----------------- Total : 153 ================================================= FAIL Test Summary: HostSystem - 03_hs_to_settdefcap.py: FAIL ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: FAIL SettingsDefineCapabilities - 01_forward.py: FAIL ================================================= XFAIL Test Summary: ComputerSystem - 32_start_reboot.py: XFAIL ComputerSystem - 33_suspend_reboot.py: XFAIL VirtualSystemManagementService - 16_removeresource.py: XFAIL ================================================= SKIP Test Summary: ComputerSystem - 02_nosystems.py: SKIP LogicalDisk - 02_nodevs.py: SKIP VSSD - 02_bootldr.py: SKIP VirtualSystemMigrationService - 01_migratable_host.py: SKIP VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP ================================================= Full report: -------------------------------------------------------------------- AllocationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- AllocationCapabilities - 02_alloccap_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 01_enum.py: PASS -------------------------------------------------------------------- ComputerSystem - 02_nosystems.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- ComputerSystem - 03_defineVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 04_defineStartVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 05_activate_defined_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 06_paused_active_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 22_define_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 23_pause_pause.py: PASS -------------------------------------------------------------------- ComputerSystem - 27_define_pause_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 32_start_reboot.py: XFAIL ERROR - Got CIM error CIM_ERR_FAILED: Unable to reboot domain: this function is not supported by the hypervisor: virDomainReboot with return code 1 ERROR - Exception: Unable reboot dom 'cs_test_domain' InvokeMethod(RequestStateChange): CIM_ERR_FAILED: Unable to reboot domain: this function is not supported by the hypervisor: virDomainReboot Bug:<00005> -------------------------------------------------------------------- ComputerSystem - 33_suspend_reboot.py: XFAIL ERROR - Got CIM error CIM_ERR_NOT_SUPPORTED: State not supported with return code 7 ERROR - Exception: Unable Suspend dom 'test_domain' InvokeMethod(RequestStateChange): CIM_ERR_NOT_SUPPORTED: State not supported Bug:<00012> -------------------------------------------------------------------- ComputerSystem - 35_start_reset.py: PASS -------------------------------------------------------------------- ComputerSystem - 40_RSC_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 41_cs_to_settingdefinestate.py: PASS -------------------------------------------------------------------- ComputerSystem - 42_cs_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystemIndication - 01_created_indication.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 03_reverse_errs.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 04_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 01_forward.py: PASS -------------------------------------------------------------------- ElementCapabilities - 02_reverse.py: PASS -------------------------------------------------------------------- ElementCapabilities - 03_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 05_hostsystem_cap.py: PASS -------------------------------------------------------------------- ElementConforms - 01_forward.py: PASS -------------------------------------------------------------------- ElementConforms - 02_reverse.py: PASS -------------------------------------------------------------------- ElementConforms - 03_ectp_fwd_errs.py: PASS -------------------------------------------------------------------- ElementConforms - 04_ectp_rev_errs.py: PASS -------------------------------------------------------------------- ElementSettingData - 01_forward.py: PASS -------------------------------------------------------------------- ElementSettingData - 03_esd_assoc_with_rasd_errs.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 02_elecap_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 01_enum.py: PASS -------------------------------------------------------------------- HostSystem - 02_hostsystem_to_rasd.py: PASS -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - 'KVM_SettingsDefineCapabilities' returned 8 RASD objects instead of 4 CIM_ERR_INVALID_CLASS: Linux_ComputerSystem -------------------------------------------------------------------- HostSystem - 04_hs_to_EAPF.py: PASS -------------------------------------------------------------------- HostSystem - 05_hs_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 06_hs_to_vsms.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 01_forward.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 01_forward.py: PASS -------------------------------------------------------------------- HostedDependency - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 03_enabledstate.py: PASS -------------------------------------------------------------------- HostedDependency - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedResourcePool - 01_forward.py: PASS -------------------------------------------------------------------- HostedResourcePool - 02_reverse.py: PASS -------------------------------------------------------------------- HostedResourcePool - 03_forward_errs.py: PASS -------------------------------------------------------------------- HostedResourcePool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedService - 01_forward.py: PASS -------------------------------------------------------------------- HostedService - 02_reverse.py: PASS -------------------------------------------------------------------- HostedService - 03_forward_errs.py: PASS -------------------------------------------------------------------- HostedService - 04_reverse_errs.py: PASS -------------------------------------------------------------------- KVMRedirectionSAP - 01_enum_KVMredSAP.py: PASS -------------------------------------------------------------------- LogicalDisk - 01_disk.py: PASS -------------------------------------------------------------------- LogicalDisk - 02_nodevs.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- LogicalDisk - 03_ld_gi_errs.py: PASS -------------------------------------------------------------------- Memory - 01_memory.py: PASS -------------------------------------------------------------------- Memory - 02_defgetmem.py: PASS -------------------------------------------------------------------- Memory - 03_mem_gi_errs.py: PASS -------------------------------------------------------------------- NetworkPort - 01_netport.py: PASS -------------------------------------------------------------------- NetworkPort - 02_np_gi_errors.py: PASS -------------------------------------------------------------------- NetworkPort - 03_user_netport.py: PASS -------------------------------------------------------------------- Processor - 01_processor.py: PASS -------------------------------------------------------------------- Processor - 02_definesys_get_procs.py: PASS -------------------------------------------------------------------- Processor - 03_proc_gi_errs.py: PASS -------------------------------------------------------------------- Profile - 01_enum.py: PASS -------------------------------------------------------------------- Profile - 02_profile_to_elec.py: PASS -------------------------------------------------------------------- Profile - 03_rprofile_gi_errs.py: PASS -------------------------------------------------------------------- RASD - 01_verify_rasd_fields.py: PASS -------------------------------------------------------------------- RASD - 02_enum.py: PASS -------------------------------------------------------------------- RASD - 03_rasd_errs.py: PASS -------------------------------------------------------------------- RASD - 04_disk_rasd_size.py: PASS -------------------------------------------------------------------- RASD - 05_disk_rasd_emu_type.py: PASS -------------------------------------------------------------------- RedirectionService - 01_enum_crs.py: PASS -------------------------------------------------------------------- RedirectionService - 02_enum_crscap.py: PASS -------------------------------------------------------------------- RedirectionService - 03_RedirectionSAP_errs.py: PASS -------------------------------------------------------------------- ReferencedProfile - 01_verify_refprof.py: PASS -------------------------------------------------------------------- ReferencedProfile - 02_refprofile_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 03_forward_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 05_RAPF_err.py: PASS -------------------------------------------------------------------- ResourcePool - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePool - 02_rp_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 02_rpcc_gi_errs.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 02_rcps_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 03_CreateResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: FAIL ERROR - Unexpected rc code 4 and description CIM_ERR_INVALID_PARAMETER InvokeMethod(CreateChildResourcePool): CIM_ERR_INVALID_PARAMETER -------------------------------------------------------------------- ResourcePoolConfigurationService - 05_AddResourcesToResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 06_RemoveResourcesFromResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 07_DeleteResourcePool.py: PASS -------------------------------------------------------------------- ServiceAccessBySAP - 01_forward.py: PASS -------------------------------------------------------------------- ServiceAccessBySAP - 02_reverse.py: PASS -------------------------------------------------------------------- ServiceAffectsElement - 01_forward.py: PASS -------------------------------------------------------------------- ServiceAffectsElement - 02_reverse.py: PASS -------------------------------------------------------------------- SettingsDefine - 01_forward.py: PASS -------------------------------------------------------------------- SettingsDefine - 02_reverse.py: PASS -------------------------------------------------------------------- SettingsDefine - 03_sds_fwd_errs.py: PASS -------------------------------------------------------------------- SettingsDefine - 04_sds_rev_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 01_forward.py: FAIL ERROR - KVM_SettingsDefineCapabilities returned 8 ResourcePool objects instead of 4 -------------------------------------------------------------------- SettingsDefineCapabilities - 03_forward_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 04_forward_vsmsdata.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 05_reverse_vsmcap.py: PASS -------------------------------------------------------------------- SystemDevice - 01_forward.py: PASS -------------------------------------------------------------------- SystemDevice - 02_reverse.py: PASS -------------------------------------------------------------------- SystemDevice - 03_fwderrs.py: PASS -------------------------------------------------------------------- VSSD - 01_enum.py: PASS -------------------------------------------------------------------- VSSD - 02_bootldr.py: SKIP -------------------------------------------------------------------- VSSD - 03_vssd_gi_errs.py: PASS -------------------------------------------------------------------- VSSD - 04_vssd_to_rasd.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 02_vsmcap_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 01_definesystem_name.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 02_destroysystem.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 03_definesystem_ess.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 04_definesystem_ers.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 05_destroysystem_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 06_addresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 07_addresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 08_modifyresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 09_procrasd_persist.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 10_hv_version.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 11_define_memrasdunits.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 12_referenced_config.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 13_refconfig_additional_devs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 14_define_sys_disk.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 15_mod_system_settings.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 16_removeresource.py: XFAIL ERROR - 0 RASD insts for domain/mouse:ps2 CIM_ERR_NOT_FOUND: No such instance (no device domain/mouse:ps2) Bug:<00014> -------------------------------------------------------------------- VirtualSystemManagementService - 17_removeresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 02_vsmc_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationService - 01_migratable_host.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 02_vsmsd_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 01_forward.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 02_reverse.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 03_vssdc_fwd_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 04_vssdc_rev_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 02_vs_sservice_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 02_vs_sservicecap_gi_errs.py: PASS -------------------------------------------------------------------- -------------- next part -------------- An HTML attachment was scrubbed... URL: From deeptik at linux.vnet.ibm.com Tue May 19 11:53:16 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Tue, 19 May 2009 04:53:16 -0700 Subject: [Libvirt-cim] [PATCH 0 of 2] Adding new tc to verify the template RASDs for parent NetPoolRASD. Message-ID: From deeptik at linux.vnet.ibm.com Tue May 19 11:53:17 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Tue, 19 May 2009 04:53:17 -0700 Subject: [Libvirt-cim] [PATCH 1 of 2] [TEST] Modifying get_pool_rasds() of pool.py In-Reply-To: References: Message-ID: <093bda6ad92c0d6b030c.1242733997@elm3a148.beaverton.ibm.com> # HG changeset patch # User Deepti B. Kalakeri # Date 1242733506 25200 # Node ID 093bda6ad92c0d6b030cfc1e9d98743335555dff # Parent 6dc2d815e480237c91115cd0d86f6325503e33f7 [TEST] Modifying get_pool_rasds() of pool.py. Modifying get_pool_rasds() of pool.py to return all Parent DiskPool and NetworkPool records when required. Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 6dc2d815e480 -r 093bda6ad92c suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Sun May 17 23:34:58 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Tue May 19 04:45:06 2009 -0700 @@ -105,23 +105,26 @@ return volume -def get_pool_rasds(server, virt): +def get_pool_rasds(server, virt, pool_type="NetworkPool", + filter_default=True): net_pool_rasds = [] ac_cn = get_typed_class(virt, "AllocationCapabilities") an_cn = get_typed_class(virt, "SettingsDefineCapabilities") - key_list = {"InstanceID" : "NetworkPool/0" } - + key_list = {"InstanceID" : "%s/0" %pool_type } + try: inst = GetInstance(server, ac_cn, key_list) rasd = Associators(server, an_cn, ac_cn, InstanceID=inst.InstanceID) except Exception, detail: logger.error("Exception: %s", detail) return None - - for item in rasd: - if item['InstanceID'] == "Default": - net_pool_rasds.append(item) + if filter_default == True: + for item in rasd: + if item['InstanceID'] == "Default": + net_pool_rasds.append(item) + else: + return rasd return net_pool_rasds @@ -211,7 +214,7 @@ net_xml = NetXML(server, virt=virt, networkname=poolname, is_new_net=False) ret_pool_attr_list = net_xml.xml_get_netpool_attr_list() - + for i in range(0, len(ret_pool_attr_list)): if ret_pool_attr_list[i] not in pool_attr_list.itervalues(): logger.error("Got error when parsing %s", ret_pool_attr_list[i]) From deeptik at linux.vnet.ibm.com Tue May 19 11:53:18 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Tue, 19 May 2009 04:53:18 -0700 Subject: [Libvirt-cim] [PATCH 2 of 2] [TEST] Adding new tc to verify the template RASDs for parent NetPoolRASD In-Reply-To: References: Message-ID: <4129e1f0e95aad2d1f61.1242733998@elm3a148.beaverton.ibm.com> # HG changeset patch # User Deepti B. Kalakeri # Date 1242733975 25200 # Node ID 4129e1f0e95aad2d1f61a0d96735e55e4dd8104f # Parent 093bda6ad92c0d6b030cfc1e9d98743335555dff [TEST] Adding new tc to verify the template RASDs for parent NetPoolRASD. Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 093bda6ad92c -r 4129e1f0e95a suites/libvirt-cim/cimtest/RASD/06_parent_net_pool.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/RASD/06_parent_net_pool.py Tue May 19 04:52:55 2009 -0700 @@ -0,0 +1,122 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Deepti B. Kalakeri +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +# This test case is used to verify the Parent NetPoolRASD and +# DiskPoolRASD properties in detail using the SettingsDefineCapabilities +# association. +# +# Ex: +# Command: +# wbemcli ai -ac SettingsDefineCapabilities \ +# 'http://localhost:5988/root/virt:KVM_AllocationCapabilties.InstanceID=\ +# "NetworkPool/0"' +# +# Output: +# localhost/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Default" +# -InstanceID="Default" [ verified for Maximum, Increment, Default as well ] +# -ResourceType=10 +# -PoolID="NetworkPool/0" +# -Address="192.168.122.1" +# -Netmask="255.255.255.0" +# -IPRangeStart="192.168.122.2" +# -IPRangeEnd="192.168.122.254" +# -ForwardDevice= [ verified for 'None' and "eth0" ] +# -ForwardMode=0 [ verified for 1,2 as well ] +# +# +# +# Date : 18-05-2009 + +import sys +from sets import Set +from CimTest.Globals import logger +from XenKvmLib.const import do_main +from CimTest.ReturnCodes import PASS, FAIL +from XenKvmLib.pool import get_pool_rasds + +sup_types = ['KVM', 'Xen', 'XenFV'] + +def get_rec(netpool_rasd, inst_id='Default'): + recs = [] + for np_rasd in netpool_rasd: + if np_rasd['InstanceID'] == inst_id and \ + 'NetworkPool/0' == np_rasd['PoolID']: + recs.append(np_rasd) + return recs + +def verify_rec(netpool_rasd, inst_type='Default'): + logger.info("Verifying '%s' records", inst_type) + try: + n_rec_val = { 'ResourceType' : 10, + 'PoolID' : "NetworkPool/0", + 'Address' : "192.168.122.1", + 'Netmask' : "255.255.255.0", + 'IPRangeStart' : "192.168.122.2", + 'IPRangeEnd' : "192.168.122.254" + } + + n_rec = get_rec(netpool_rasd, inst_id=inst_type) + if len(n_rec) != 5: + raise Exception("Got %s recs instead of 5" %(len(n_rec))) + return FAIL + + exp_mode_device = [('None', 0L), ('None', 1L), ('eth0', 1L), + ('None', 2L), ('eth0', 2L)] + + res_mode_device = [] + for rec in n_rec: + l = (str(rec['ForwardDevice']), rec['ForwardMode']) + res_mode_device.append(l) + + if len(Set(exp_mode_device) & Set(res_mode_device)) != 5 : + raise Exception("Mismatching Mode and device values, " \ + "Got %s, Expected %s" %(exp_mode_device, \ + res_mode_device)) + + for key in n_rec_val.keys(): + for rec in n_rec: + if n_rec_val[key] != rec[key]: + raise Exception("'%s' Mismatch, Got %s, Expected %s" \ + % (key, rec[key], n_rec_val[key])) + + except Exception, details: + logger.error("Exception details: %s", details) + return FAIL + + return PASS + + + at do_main(sup_types) +def main(): + options = main.options + virt = options.virt + server = options.ip + status = FAIL + netpool_rasd = get_pool_rasds(server, virt, filter_default=False) + inst_list = [ 'Default', 'Minimum', 'Maximum', 'Increment' ] + for inst_type in inst_list: + status = verify_rec(netpool_rasd, inst_type) + if status != PASS: + return FAIL + return status +if __name__ == "__main__": + sys.exit(main()) From rmaciel at linux.vnet.ibm.com Tue May 19 20:23:04 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Tue, 19 May 2009 17:23:04 -0300 Subject: [Libvirt-cim] [PATCH] Return VSSD reference from CreateSnapshot In-Reply-To: References: Message-ID: <4A131528.1060405@linux.vnet.ibm.com> +1 Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1241805181 25200 > # Node ID a8438f4413a007f0ae427072d1c9a603cc911341 > # Parent 9a16c7a7963cebddd145212e57ccc9defa5e0a0c > Return VSSD reference from CreateSnapshot > > This method has two outputs - Job and ResultingSnapshot. The provider was > returning a value for ResultingSnapshot. > > Signed-off-by: Kaitlin Rupert > > diff -r 9a16c7a7963c -r a8438f4413a0 src/Makefile.am > --- a/src/Makefile.am Fri May 08 10:53:01 2009 -0700 > +++ b/src/Makefile.am Fri May 08 10:53:01 2009 -0700 > @@ -197,9 +197,9 @@ > > libVirt_VSMigrationSettingData_la_SOURCES = Virt_VSMigrationSettingData.c > > -libVirt_VirtualSystemSnapshotService_la_DEPENDENCIES = libVirt_HostSystem.la > +libVirt_VirtualSystemSnapshotService_la_DEPENDENCIES = libVirt_HostSystem.la libVirt_VSSD.la > libVirt_VirtualSystemSnapshotService_la_SOURCES = Virt_VirtualSystemSnapshotService.c > -libVirt_VirtualSystemSnapshotService_la_LIBADD = -lVirt_HostSystem > +libVirt_VirtualSystemSnapshotService_la_LIBADD = -lVirt_HostSystem -lVirt_VSSD > > libVirt_VirtualSystemSnapshotServiceCapabilities_la_DEPENDENCIES = > libVirt_VirtualSystemSnapshotServiceCapabilities_la_SOURCES = Virt_VirtualSystemSnapshotServiceCapabilities.c > diff -r 9a16c7a7963c -r a8438f4413a0 src/Virt_VirtualSystemSnapshotService.c > --- a/src/Virt_VirtualSystemSnapshotService.c Fri May 08 10:53:01 2009 -0700 > +++ b/src/Virt_VirtualSystemSnapshotService.c Fri May 08 10:53:01 2009 -0700 > @@ -38,6 +38,7 @@ > > #include "Virt_VirtualSystemSnapshotService.h" > #include "Virt_HostSystem.h" > +#include "Virt_VSSD.h" > > #define CIM_JOBSTATE_STARTING 3 > #define CIM_JOBSTATE_RUNNING 4 > @@ -388,6 +389,8 @@ > struct snap_context *ctx; > CMPIStatus s; > CMPIObjectPath *job; > + CMPIObjectPath *vssd; > + CMPIInstance *inst; > > ctx = new_context(name, &s); > if (ctx == NULL) { > @@ -401,7 +404,21 @@ > ctx->restore = (type != VIR_VSSS_SNAPSHOT_MEMT); > > s = create_job(context, ref, ctx, &job); > + > + s = get_vssd_by_name(_BROKER, ref, name, &inst); > + if (s.rc != CMPI_RC_OK) { > + CU_DEBUG("Unable to get guest VSSD in start_snapshot_job()"); > + goto out; > + } > + > + vssd = CMGetObjectPath(inst, &s); > + if (s.rc != CMPI_RC_OK) { > + CU_DEBUG("Unable to get VSSD ref from instance"); > + goto out; > + } > + > CMAddArg(argsout, "Job", (CMPIValue *)&job, CMPI_ref); > + CMAddArg(argsout, "ResultingSnapshot", (CMPIValue *)&vssd, CMPI_ref); > > out: > return s; > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From rmaciel at linux.vnet.ibm.com Tue May 19 20:36:31 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Tue, 19 May 2009 17:36:31 -0300 Subject: [Libvirt-cim] [PATCH] Work around MOF typo in VSSnapshotServiceCapabilities In-Reply-To: References: Message-ID: <4A13184F.9070305@linux.vnet.ibm.com> +1 Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1242411431 25200 > # Node ID d5155c4b965ddc209e37a59ddfa8de6f649a51b0 > # Parent 4ac0b029df21346d5f958d1b5a29c7360b7850ae > Work around MOF typo in VSSnapshotServiceCapabilities > > Also set array to NULL before reallocating. > > Signed-off-by: Kaitlin Rupert > > diff -r 4ac0b029df21 -r d5155c4b965d src/Virt_VirtualSystemSnapshotServiceCapabilities.c > --- a/src/Virt_VirtualSystemSnapshotServiceCapabilities.c Mon May 11 16:47:15 2009 -0300 > +++ b/src/Virt_VirtualSystemSnapshotServiceCapabilities.c Fri May 15 11:17:11 2009 -0700 > @@ -62,9 +62,13 @@ > element = (uint16_t)APPLY_SNAPSHOT; > CMSetArrayElementAt(array, 1, &element, CMPI_uint16); > > - CMSetProperty(inst, "AsynchronousMethodsSupported", > + /* There is a typo in the mof - the attribute name in the mof is: > + AynchronousMethodsSupported, not AsynchronousMethodsSupported. > + Making a note incase this changes later. */ > + CMSetProperty(inst, "AynchronousMethodsSupported", > (CMPIValue *)&array, CMPI_uint16A); > - > + > + array = NULL; > array = CMNewArray(broker, 1, CMPI_uint16, &s); > if ((s.rc != CMPI_RC_OK) || (array == NULL)) > goto out; > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Tue May 19 23:12:45 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Tue, 19 May 2009 16:12:45 -0700 Subject: [Libvirt-cim] [PATCH 2 of 2] [TEST] Adding new tc to verify the template RASDs for parent NetPoolRASD In-Reply-To: <4129e1f0e95aad2d1f61.1242733998@elm3a148.beaverton.ibm.com> References: <4129e1f0e95aad2d1f61.1242733998@elm3a148.beaverton.ibm.com> Message-ID: <4A133CED.5010000@linux.vnet.ibm.com> > + > +def get_rec(netpool_rasd, inst_id='Default'): > + recs = [] > + for np_rasd in netpool_rasd: > + if np_rasd['InstanceID'] == inst_id and \ > + 'NetworkPool/0' == np_rasd['PoolID']: libvirt-cim will only generate NetPoolRASDs for the parent pool (which is NetworkPool/0"). So there's no need to verify the PoolID here. You have PoolID in the n_rec_val list anyway, so it'll get verified in verify_rec() > + recs.append(np_rasd) > + return recs > + > +def verify_rec(netpool_rasd, inst_type='Default'): > + logger.info("Verifying '%s' records", inst_type) > + try: > + n_rec_val = { 'ResourceType' : 10, > + 'PoolID' : "NetworkPool/0", > + 'Address' : "192.168.122.1", > + 'Netmask' : "255.255.255.0", > + 'IPRangeStart' : "192.168.122.2", > + 'IPRangeEnd' : "192.168.122.254" > + } > + > + n_rec = get_rec(netpool_rasd, inst_id=inst_type) > + if len(n_rec) != 5: > + raise Exception("Got %s recs instead of 5" %(len(n_rec))) > + return FAIL > + > + exp_mode_device = [('None', 0L), ('None', 1L), ('eth0', 1L), > + ('None', 2L), ('eth0', 2L)] > + > + res_mode_device = [] > + for rec in n_rec: > + l = (str(rec['ForwardDevice']), rec['ForwardMode']) > + res_mode_device.append(l) > + > + if len(Set(exp_mode_device) & Set(res_mode_device)) != 5 : > + raise Exception("Mismatching Mode and device values, " \ > + "Got %s, Expected %s" %(exp_mode_device, \ > + res_mode_device)) > + > + for key in n_rec_val.keys(): > + for rec in n_rec: > + if n_rec_val[key] != rec[key]: > + raise Exception("'%s' Mismatch, Got %s, Expected %s" \ > + % (key, rec[key], n_rec_val[key])) > + > + except Exception, details: > + logger.error("Exception details: %s", details) > + return FAIL > + > + return PASS > + > + > + at do_main(sup_types) > +def main(): > + options = main.options > + virt = options.virt > + server = options.ip > + status = FAIL > + netpool_rasd = get_pool_rasds(server, virt, filter_default=False) > + inst_list = [ 'Default', 'Minimum', 'Maximum', 'Increment' ] > + for inst_type in inst_list: > + status = verify_rec(netpool_rasd, inst_type) > + if status != PASS: > + return FAIL The bulk of the test is happening in a different function.. since there's not much going on in the test, you can just put the body of verify_rec() in main. -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Tue May 19 23:23:42 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Tue, 19 May 2009 16:23:42 -0700 Subject: [Libvirt-cim] [PATCH 0 of 2] [TEST] Add test to verifyCreateSnapshot() Message-ID: The necessary libvit-cim changes are now upstream. So this is ready to test. From kaitlin at linux.vnet.ibm.com Tue May 19 23:23:43 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Tue, 19 May 2009 16:23:43 -0700 Subject: [Libvirt-cim] [PATCH 1 of 2] [TEST] Add vsss.py module In-Reply-To: References: Message-ID: # HG changeset patch # User Kaitlin Rupert # Date 1242082293 25200 # Node ID f12adddd10e4f40bd338f17ca99b2d246b8ba176 # Parent 95b9360a2d1c3dce540ea9eeb965bec86ebbdfab [TEST] Add vsss.py module For keeping functions related to the VirtualSystemSnapshotService Signed-off-by: Kaitlin Rupert diff -r 95b9360a2d1c -r f12adddd10e4 suites/libvirt-cim/lib/XenKvmLib/vsss.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/lib/XenKvmLib/vsss.py Mon May 11 15:51:33 2009 -0700 @@ -0,0 +1,40 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Kaitlin Rupert +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import pywbem +from VirtLib.utils import run_remote +from CimTest.Globals import logger +from CimTest.ReturnCodes import FAIL, PASS + +#Path to snapshot save location +snapshot_save_loc = '/var/lib/libvirt/' + +def remove_snapshot(ip, vm_name): + snapshot = "%s%s" % (snapshot_save_loc, vm_name) + + cmd = "rm %s.save" % snapshot + ret, out = run_remote(ip, cmd) + if ret != 0: + logger.error("Failed to remove snapshot file for %s", vm_name) + return FAIL + + return PASS From kaitlin at linux.vnet.ibm.com Tue May 19 23:23:44 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Tue, 19 May 2009 16:23:44 -0700 Subject: [Libvirt-cim] [PATCH 2 of 2] [TEST] (#3) Add VSSS 03_create_snapshot.py In-Reply-To: References: Message-ID: <6b732da8f7fd1e36a7df.1242775424@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242082293 25200 # Node ID 6b732da8f7fd1e36a7df3080ace880acf482592f # Parent f12adddd10e4f40bd338f17ca99b2d246b8ba176 [TEST] (#3) Add VSSS 03_create_snapshot.py This test case attempts a guest snapshot and verifies the results. Updates from 2 to 3: -Set AynchronousMethodsSupported to "" - this value isn't needed for creating a snapshot Updates from 1 to 2: -Remove import of CIM_ERR_FAILED -Add test description -Add comment explaining which snapshot type is being used Signed-off-by: Kaitlin Rupert diff -r f12adddd10e4 -r 6b732da8f7fd suites/libvirt-cim/cimtest/VirtualSystemSnapshotService/03_create_snapshot.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/VirtualSystemSnapshotService/03_create_snapshot.py Mon May 11 15:51:33 2009 -0700 @@ -0,0 +1,147 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Kaitlin Rupert +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# Description: +# This test verfies that calling CreateSnapshot() on a running guest +# is successful and this it returns the proper Job and VSSD instances. +# + +import sys +from pywbem import cim_types +from CimTest.Globals import logger +from CimTest.ReturnCodes import PASS, FAIL +from XenKvmLib.const import do_main +from XenKvmLib.vxml import get_class +from XenKvmLib.classes import get_typed_class, inst_to_mof +from XenKvmLib.enumclass import EnumNames, EnumInstances, GetInstance +from XenKvmLib.vsss import remove_snapshot + +sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] + +#32769 - create a snapshot of the guest and leave the guest in a +# 'suspended' state +SNAPSHOT = cim_types.Uint16(32769) +test_dom = "snapshot_vm" + +def get_cs_ref(virt, ip): + cs_cn = get_typed_class(virt, "ComputerSystem") + + cs_refs = EnumNames(ip, cs_cn) + if cs_refs is None or len(cs_refs) < 1: + logger.error("Exp at least one domain defined on the system") + return FAIL, None + + cs_ref = None + for ref in cs_refs: + if ref['Name'] == test_dom: + cs_ref = ref + break + + if cs_ref is None: + logger.error("Enum of %s didn't return %s", cs_cn, test_dom) + return FAIL, None + + return PASS, cs_ref + +def get_vsssc_inst(virt, ip): + vsssc_cn = get_typed_class(virt, "VirtualSystemSnapshotServiceCapabilities") + + vsssc_insts = EnumInstances(ip, vsssc_cn, ret_cim_inst=True) + if vsssc_insts is None or len(vsssc_insts) < 1: + logger.error("Exp at least one %s", vsssc_cn) + return FAIL, None + + vsssc = vsssc_insts[0] + + #Override the additional instance values. We only care about the key + #values (eventhough CreateSnapshot takes a instance) + vsssc['SynchronousMethodsSupported'] = "" + vsssc['AynchronousMethodsSupported'] = "" + vsssc['SnapshotTypesSupported'] = "" + + vsssc = inst_to_mof(vsssc) + + return PASS, vsssc + + at do_main(sup_types) +def main(): + options = main.options + + cxml = get_class(options.virt)(test_dom) + + try: + ret = cxml.cim_define(options.ip) + if not ret: + raise Exception("Unable to define %s", test_dom) + + status = cxml.cim_start(options.ip) + if status != PASS: + raise Exception("Failed to start the defined domain: %s" % test_dom) + + status, cs_ref = get_cs_ref(options.virt, options.ip) + if status != PASS: + raise Exception("Unable to get reference for %s" % test_dom) + + status, vsssc = get_vsssc_inst(options.virt, options.ip) + if status != PASS: + raise Exception("Unable to get VSSSC instance") + + vsss_cn = get_typed_class(options.virt, "VirtualSystemSnapshotService") + vsss_refs = EnumNames(options.ip, vsss_cn) + if vsss_refs is None or len(vsss_refs) < 1: + raise Exception("Exp at least one %s" % vsss_cn) + + service = vsss_refs[0] + keys = { 'Name' : service['Name'], + 'CreationClassName' : service['CreationClassName'], + 'SystemCreationClassName' : service['SystemCreationClassName'], + 'SystemName' : service['SystemName'] + } + service = GetInstance(options.ip, vsss_cn, keys) + + output = service.CreateSnapshot(AffectedSystem=cs_ref, + SnapshotSettings=vsssc, + SnapshotType=SNAPSHOT) + + ret = output[0] + if ret != 0: + raise Exception("Snapshot of %s failed!" % test_dom) + + if output[1]['Job'] is None: + raise Exception("CreateSnapshot failed to return a CIM job inst") + + if output[1]['ResultingSnapshot'] is None: + raise Exception("CreateSnapshot failed to return ResultingSnapshot") + + except Exception, detail: + logger.error("Exception: %s", detail) + status = FAIL + + cxml.cim_destroy(options.ip) + cxml.undefine(options.ip) + + remove_snapshot(options.ip, test_dom) + + return status + +if __name__ == "__main__": + sys.exit(main()) + From rmaciel at linux.vnet.ibm.com Tue May 19 23:18:50 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Tue, 19 May 2009 20:18:50 -0300 Subject: [Libvirt-cim] [PATCH 0 of 5] Migrate from scheam 2.16 to 2.21 In-Reply-To: References: Message-ID: <4A133E5A.8000202@linux.vnet.ibm.com> +1 Kaitlin Rupert wrote: > This patchset provides the set of changes needed to upgrade to scheam 2.21. > > The easiest way to test this is to do the following: > > 1) make preuninstall > 2) make uninstall > 3) Recongifure your source tree to pick up the makefile changes > 5) make > 6) make preinstall > 7) Restart your CIMOM (to be sure to pick up the new schema) > 8) make install > 9) make postinstall > > I've also testing this by uninstalling Pegasus / sfcb and removing the repository. Then reinstalling Pegasus / sfcb and doing the steps above. > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From rmaciel at linux.vnet.ibm.com Tue May 19 23:27:00 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Tue, 19 May 2009 20:27:00 -0300 Subject: [Libvirt-cim] [PATCH] Update SAE to use the proper association attributes In-Reply-To: <61b3f36a8a0294c916b7.1242690205@localhost.localdomain> References: <61b3f36a8a0294c916b7.1242690205@localhost.localdomain> Message-ID: <4A134044.6040202@linux.vnet.ibm.com> +1 Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1242689960 25200 > # Node ID 61b3f36a8a0294c916b778774e58319fbf24b4b5 > # Parent 853db606f335086710612450a93bfd67c1293922 > Update SAE to use the proper association attributes > > Signed-off-by: Kaitlin Rupert > > diff -r 853db606f335 -r 61b3f36a8a02 src/Virt_ServiceAffectsElement.c > --- a/src/Virt_ServiceAffectsElement.c Mon May 18 16:39:18 2009 -0700 > +++ b/src/Virt_ServiceAffectsElement.c Mon May 18 16:39:20 2009 -0700 > @@ -136,7 +136,7 @@ > > LIBVIRT_CIM_DEFAULT_MAKEREF() > > -static char* antecedent[] = { > +static char* affected_ele[] = { > "Xen_ComputerSystem", > "KVM_ComputerSystem", > "LXC_ComputerSystem", > @@ -149,7 +149,7 @@ > NULL > }; > > -static char* dependent[] = { > +static char* affecting_ele[] = { > "Xen_ConsoleRedirectionService", > "KVM_ConsoleRedirectionService", > "LXC_ConsoleRedirectionService", > @@ -164,11 +164,11 @@ > }; > > static struct std_assoc _cs_to_service = { > - .source_class = (char**)&antecedent, > - .source_prop = "Antecedent", > + .source_class = (char**)&affected_ele, > + .source_prop = "AffectedElement", > > - .target_class = (char**)&dependent, > - .target_prop = "Dependent", > + .target_class = (char**)&affecting_ele, > + .target_prop = "AffectingElement", > > .assoc_class = (char**)&assoc_classname, > > @@ -177,11 +177,11 @@ > }; > > static struct std_assoc _service_to_cs = { > - .source_class = (char**)&dependent, > - .source_prop = "Dependent", > + .source_class = (char**)&affecting_ele, > + .source_prop = "AffectingElement", > > - .target_class = (char**)&antecedent, > - .target_prop = "Antecedent", > + .target_class = (char**)&affected_ele, > + .target_prop = "AffectedElement", > > .assoc_class = (char**)&assoc_classname, > > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Wed May 20 10:28:37 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Wed, 20 May 2009 03:28:37 -0700 Subject: [Libvirt-cim] [PATCH 1 of 2] [TEST] Modifying get_pool_rasds() of pool.py In-Reply-To: References: Message-ID: <68817f9e1c6a078942f6.1242815317@localhost.localdomain> # HG changeset patch # User Deepti B. Kalakeri # Date 1242814409 25200 # Node ID 68817f9e1c6a078942f616c33aa69e773a23beaf # Parent 95b9360a2d1c3dce540ea9eeb965bec86ebbdfab [TEST] Modifying get_pool_rasds() of pool.py. Modifying get_pool_rasds() of pool.py to return all Parent DiskPool and NetworkPool records when required. Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 95b9360a2d1c -r 68817f9e1c6a suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Tue May 19 03:21:20 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Wed May 20 03:13:29 2009 -0700 @@ -106,12 +106,13 @@ return volume -def get_pool_rasds(server, virt): +def get_pool_rasds(server, virt, + pool_type="NetworkPool", filter_default=True): net_pool_rasds = [] ac_cn = get_typed_class(virt, "AllocationCapabilities") an_cn = get_typed_class(virt, "SettingsDefineCapabilities") - key_list = {"InstanceID" : "NetworkPool/0" } + key_list = {"InstanceID" : "%s/0" %pool_type } try: inst = GetInstance(server, ac_cn, key_list) @@ -119,10 +120,13 @@ except Exception, detail: logger.error("Exception: %s", detail) return None - - for item in rasd: - if item['InstanceID'] == "Default": - net_pool_rasds.append(item) + + if filter_default == True: + for item in rasd: + if item['InstanceID'] == "Default": + net_pool_rasds.append(item) + else: + return rasd return net_pool_rasds From deeptik at linux.vnet.ibm.com Wed May 20 10:28:36 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Wed, 20 May 2009 03:28:36 -0700 Subject: [Libvirt-cim] [PATCH 0 of 2] Adding new tc to verify the template RASDs for parent NetPoolRASD. Message-ID: From deeptik at linux.vnet.ibm.com Wed May 20 10:28:38 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Wed, 20 May 2009 03:28:38 -0700 Subject: [Libvirt-cim] [PATCH 2 of 2] [TEST] #2 Adding new tc to verify the template RASDs for parent NetPoolRASD In-Reply-To: References: Message-ID: <031c5ff76b755b7f8047.1242815318@localhost.localdomain> # HG changeset patch # User Deepti B. Kalakeri # Date 1242815265 25200 # Node ID 031c5ff76b755b7f804708281a9738af87320bda # Parent 68817f9e1c6a078942f616c33aa69e773a23beaf [TEST] #2 Adding new tc to verify the template RASDs for parent NetPoolRASD. Updates: -------- Merged the code of verify_rec() fn to main. Removed the PoolID check from get_rec(). Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 68817f9e1c6a -r 031c5ff76b75 suites/libvirt-cim/cimtest/RASD/06_parent_net_pool.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/RASD/06_parent_net_pool.py Wed May 20 03:27:45 2009 -0700 @@ -0,0 +1,116 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Deepti B. Kalakeri +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +# This test case is used to verify the Parent NetPoolRASD and +# DiskPoolRASD properties in detail using the SettingsDefineCapabilities +# association. +# +# Ex: +# Command: +# wbemcli ai -ac SettingsDefineCapabilities \ +# 'http://localhost:5988/root/virt:KVM_AllocationCapabilties.InstanceID=\ +# "NetworkPool/0"' +# +# Output: +# localhost/root/virt:KVM_NetPoolResourceAllocationSettingData.InstanceID="Default" +# -InstanceID="Default" [ verified for Maximum, Increment, Default as well ] +# -ResourceType=10 +# -PoolID="NetworkPool/0" +# -Address="192.168.122.1" +# -Netmask="255.255.255.0" +# -IPRangeStart="192.168.122.2" +# -IPRangeEnd="192.168.122.254" +# -ForwardDevice= [ verified for 'None' and "eth0" ] +# -ForwardMode=0 [ verified for 1,2 as well ] +# +# +# +# Date : 18-05-2009 + +import sys +from sets import Set +from CimTest.Globals import logger +from XenKvmLib.const import do_main +from CimTest.ReturnCodes import PASS, FAIL +from XenKvmLib.pool import get_pool_rasds + +sup_types = ['KVM', 'Xen', 'XenFV'] + +def get_rec(netpool_rasd, inst_id='Default'): + recs = [] + for np_rasd in netpool_rasd: + if np_rasd['InstanceID'] == inst_id : + recs.append(np_rasd) + return recs + + at do_main(sup_types) +def main(): + options = main.options + virt = options.virt + server = options.ip + status = FAIL + netpool_rasd = get_pool_rasds(server, virt, filter_default=False) + inst_list = [ 'Default', 'Minimum', 'Maximum', 'Increment' ] + n_rec_val = { 'ResourceType' : 10, + 'PoolID' : "NetworkPool/0", + 'Address' : "192.168.122.1", + 'Netmask' : "255.255.255.0", + 'IPRangeStart' : "192.168.122.2", + 'IPRangeEnd' : "192.168.122.254" + } + exp_mode_device = [('None', 0L), ('None', 1L), ('eth0', 1L), + ('None', 2L), ('eth0', 2L)] + for inst_type in inst_list: + logger.info("Verifying '%s' records", inst_type) + + try: + n_rec = get_rec(netpool_rasd, inst_id=inst_type) + if len(n_rec) != 5: + raise Exception("Got %s recs instead of 5" %(len(n_rec))) + + + res_mode_device = [] + for rec in n_rec: + l = (str(rec['ForwardDevice']), rec['ForwardMode']) + res_mode_device.append(l) + + if len(Set(exp_mode_device) & Set(res_mode_device)) != 5 : + raise Exception("Mismatching Mode and device values, " \ + "Got %s, Expected %s" %(exp_mode_device, \ + res_mode_device)) + + for key in n_rec_val.keys(): + for rec in n_rec: + if n_rec_val[key] != rec[key]: + raise Exception("'%s' Mismatch, Got %s, Expected %s" \ + % (key, rec[key], n_rec_val[key])) + + except Exception, details: + logger.error("Exception details: %s", details) + return FAIL + + return PASS + + + return status +if __name__ == "__main__": + sys.exit(main()) From deeptik at linux.vnet.ibm.com Wed May 20 13:34:51 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Wed, 20 May 2009 19:04:51 +0530 Subject: [Libvirt-cim] [PATCH 0 of 2] [TEST] Add test to verifyCreateSnapshot() In-Reply-To: References: Message-ID: <4A1406FB.2040701@linux.vnet.ibm.com> +1 Kaitlin Rupert wrote: > The necessary libvit-cim changes are now upstream. So this is ready to test. > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim > -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Wed May 20 17:42:04 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Wed, 20 May 2009 10:42:04 -0700 Subject: [Libvirt-cim] [PATCH] A few schema cleanups after migrating from 2.16 to 2.21 Message-ID: # HG changeset patch # User Kaitlin Rupert # Date 1242841306 25200 # Node ID aa8e071730d2ce20064f1c0295a8005e31ef2cea # Parent 10e45fca47f0d19eddcf7bb1559ba9e7397aea24 A few schema cleanups after migrating from 2.16 to 2.21 Fixes: -CIM_HostedResourcePool is needed in cimv2 to register Virt_HostedResourcePool as a cross-namespace provider -Implementation specific VirtualSystemSnapshotService should be a subclass of CIM_VirtualSystemSnapshotService, not Virt_VirtualSystemSnapshotService -CIM_VirtualSystemMigrationSettingData.mof and CIM_VirtualSystemMigrationService.mof are needed in interop to properly register ECTP asa cross-namespace provider Signed-off-by: Kaitlin Rupert diff -r 10e45fca47f0 -r aa8e071730d2 base_schema/cimv2.21.0-cimv2_mof --- a/base_schema/cimv2.21.0-cimv2_mof Mon May 18 16:39:20 2009 -0700 +++ b/base_schema/cimv2.21.0-cimv2_mof Wed May 20 10:41:46 2009 -0700 @@ -9,3 +9,4 @@ #pragma include ("Core/CIM_HostedResourcePool.mof") #pragma include ("Core/CIM_ElementCapabilities.mof") #pragma include ("Core/CIM_HostedService.mof") +#pragma include ("Core/CIM_HostedResourcePool.mof") diff -r 10e45fca47f0 -r aa8e071730d2 base_schema/cimv2.21.0-interop_mof --- a/base_schema/cimv2.21.0-interop_mof Mon May 18 16:39:20 2009 -0700 +++ b/base_schema/cimv2.21.0-interop_mof Wed May 20 10:41:46 2009 -0700 @@ -26,3 +26,5 @@ #pragma include ("Core/CIM_ResourcePool.mof") #pragma include ("Core/CIM_Capabilities.mof") #pragma include ("Core/CIM_AllocationCapabilities.mof") +#pragma include ("System/CIM_VirtualSystemMigrationSettingData.mof") +#pragma include ("System/CIM_VirtualSystemMigrationService.mof") diff -r 10e45fca47f0 -r aa8e071730d2 schema/VirtualSystemSnapshotService.mof --- a/schema/VirtualSystemSnapshotService.mof Mon May 18 16:39:20 2009 -0700 +++ b/schema/VirtualSystemSnapshotService.mof Wed May 20 10:41:46 2009 -0700 @@ -1,5 +1,5 @@ // Copyright IBM Corp. 2008 -class Xen_VirtualSystemSnapshotService : Virt_VirtualSystemSnapshotService { }; -class KVM_VirtualSystemSnapshotService : Virt_VirtualSystemSnapshotService { }; -class LXC_VirtualSystemSnapshotService : Virt_VirtualSystemSnapshotService { }; +class Xen_VirtualSystemSnapshotService : CIM_VirtualSystemSnapshotService { }; +class KVM_VirtualSystemSnapshotService : CIM_VirtualSystemSnapshotService { }; +class LXC_VirtualSystemSnapshotService : CIM_VirtualSystemSnapshotService { }; From dayne.medlyn at hp.com Wed May 20 20:39:32 2009 From: dayne.medlyn at hp.com (Medlyn, Dayne (VSL - Ft Collins)) Date: Wed, 20 May 2009 20:39:32 +0000 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks in the Xen_Memory class represent? In-Reply-To: <4A0DD287.9030709@linux.vnet.ibm.com> References: < 4A0B6F5D.4030700@linux.vnet.ibm.com> <4A0C96E4.6060301@linux.vnet.ibm.com> <4A0CBC2E.7080109@linux.vnet.ibm.com> <4A0D8BDF.9050202@linux.vnet.ibm.com> <4A0DD287.9030709@linux.vnet.ibm.com> Message-ID: Kaitlin, Do you know if I can rely on a standard format for the Revision field in VirtualSystemManagementService class? Looking at the code I see it is provided as a build variable "-DLIBVIRT_CIM_RV=" that gets set in the acinclude.m4 script. I suppose any of the distributions can label this any way they see fit. So far I have seen: SLES11: 0.5.2 RHEL5.3: 613+ Our 0.4.1 build: 590 Current testing builds: 875 Any thought on any standard format? Do you know what the build number was for 0.5.1 (somewhere between 590 and 613)? At the moment I am planning to handle the x.x.x and x\D+ cases. If anyone has any other thoughts or experiences I am open to them. Thanks. Dayne > > > > Just for confirmation, does this mean that for anything 0.5.1 and > newer I should expect: > > > > -NumberOfBlocks the maximum memory allocated to the guest > > -ConsumableBlocks the memory currently assigned to the guest > > > > And for anything older than 0.5.1 I should expect: > > > > -NumberOfBlocks the memory currently assigned to the guest > > -ConsumableBlocks the maximum memory allocated to the guest > > Yes, that's correct. We're one the same page here. Sorry for the > confusing detour there. ;) > > > -- > Kaitlin Rupert > IBM Linux Technology Center > kaitlin at linux.vnet.ibm.com > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim From kaitlin at linux.vnet.ibm.com Wed May 20 21:50:58 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Wed, 20 May 2009 14:50:58 -0700 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks in the Xen_Memory class represent? In-Reply-To: References: < 4A0B6F5D.4030700@linux.vnet.ibm.com> <4A0C96E4.6060301@linux.vnet.ibm.com> <4A0CBC2E.7080109@linux.vnet.ibm.com> <4A0D8BDF.9050202@linux.vnet.ibm.com> <4A0DD287.9030709@linux.vnet.ibm.com> Message-ID: <4A147B42.7020001@linux.vnet.ibm.com> Medlyn, Dayne (VSL - Ft Collins) wrote: > Kaitlin, > > Do you know if I can rely on a standard format for the Revision field in VirtualSystemManagementService class? Looking at the code I see it is provided as a build variable "-DLIBVIRT_CIM_RV=" that gets set in the acinclude.m4 script. > I suppose any of the distributions can label > this any way they see fit. Right - the distros have control over how they wish to set this value. > So far I have seen: > > SLES11: 0.5.2 > RHEL5.3: 613+ Upstream, we use the changeset and the revision numbers from mercurial. The problem the distros face is that that they may be using 0.5.2 as a base, but they will have their own patches applied - some from upstream libvirt-cim and some from in house. > > Our 0.4.1 build: 590 > Current testing builds: 875 > > Any thought on any standard format? I don't know of an easy way to force a standard format since its up to the distros discretion to change whatever method we go with. It might be possible to use a combination of the distro package version and the Revision values from the VirtualSystemManagementService class. > Do you know what the build number was for 0.5.1 (somewhere between 590 and 613)? At the moment I am planning to handle the x.x.x and x\D+ cases. If anyone has any other thoughts or experiences I am open to them. 0.5.1 is 657 - you can check out the release versions at: http://libvirt.org/hg/libvirt-cim/tags 613 is somewhere between 0.4.0 (590) and 0.5.0 (632). Unfortunately, I'm not coming up with an easy way as to how to handle the difference in the distros. We have to detect the libvirt version, which we do using a acinclude.m4 based on the version the distro reports. That might be a more reliable way, but it's less dynamic. We could add a release attribute to libvirt-cim (in addition to the changeset and revision values), but that won't help you with existing versions. > > Thanks. > > Dayne > > > >>> Just for confirmation, does this mean that for anything 0.5.1 and >> newer I should expect: >>> -NumberOfBlocks the maximum memory allocated to the guest >>> -ConsumableBlocks the memory currently assigned to the guest >>> >>> And for anything older than 0.5.1 I should expect: >>> >>> -NumberOfBlocks the memory currently assigned to the guest >>> -ConsumableBlocks the maximum memory allocated to the guest >> Yes, that's correct. We're one the same page here. Sorry for the >> confusing detour there. ;) >> >> -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From dayne.medlyn at hp.com Wed May 20 23:00:12 2009 From: dayne.medlyn at hp.com (Medlyn, Dayne (VSL - Ft Collins)) Date: Wed, 20 May 2009 23:00:12 +0000 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks in the Xen_Memory class represent? In-Reply-To: <4A147B42.7020001@linux.vnet.ibm.com> References: < 4A0B6F5D.4030700@linux.vnet.ibm.com> <4A0C96E4.6060301@linux.vnet.ibm.com> <4A0CBC2E.7080109@linux.vnet.ibm.com> <4A0D8BDF.9050202@linux.vnet.ibm.com> <4A0DD287.9030709@linux.vnet.ibm.com> <4A147B42.7020001@linux.vnet.ibm.com> Message-ID: Thanks Kaitlin, I think for now I may just have to base it off the RPM version. It would have been more convenient to get it through the provider, but this will work. Dayne > -----Original Message----- > From: libvirt-cim-bounces at redhat.com [mailto:libvirt-cim- > bounces at redhat.com] On Behalf Of Kaitlin Rupert > Sent: Wednesday, May 20, 2009 3:51 PM > To: List for discussion and development of libvirt CIM > Subject: Re: [Libvirt-cim] What does NumberOfBlocks and > ConsumableBlocks in the Xen_Memory class represent? > > Medlyn, Dayne (VSL - Ft Collins) wrote: > > Kaitlin, > > > > Do you know if I can rely on a standard format for the Revision field > in VirtualSystemManagementService class? Looking at the code I see it > is provided as a build variable "-DLIBVIRT_CIM_RV=" that gets set in > the acinclude.m4 script. > > I suppose any of the distributions can label > > this any way they see fit. > > Right - the distros have control over how they wish to set this value. > > > So far I have seen: > > > > SLES11: 0.5.2 > > RHEL5.3: 613+ > > Upstream, we use the changeset and the revision numbers from mercurial. > The problem the distros face is that that they may be using 0.5.2 as a > base, but they will have their own patches applied - some from upstream > libvirt-cim and some from in house. > > > > > Our 0.4.1 build: 590 > > Current testing builds: 875 > > > > Any thought on any standard format? > > I don't know of an easy way to force a standard format since its up to > the distros discretion to change whatever method we go with. > > It might be possible to use a combination of the distro package version > and the Revision values from the VirtualSystemManagementService class. > > > Do you know what the build number was for 0.5.1 (somewhere between > 590 and 613)? At the moment I am planning to handle the x.x.x and x\D+ > cases. If anyone has any other thoughts or experiences I am open to > them. > > 0.5.1 is 657 - you can check out the release versions at: > http://libvirt.org/hg/libvirt-cim/tags > > 613 is somewhere between 0.4.0 (590) and 0.5.0 (632). > > Unfortunately, I'm not coming up with an easy way as to how to handle > the difference in the distros. We have to detect the libvirt version, > which we do using a acinclude.m4 based on the version the distro > reports. That might be a more reliable way, but it's less dynamic. > > We could add a release attribute to libvirt-cim (in addition to the > changeset and revision values), but that won't help you with existing > versions. > > > > > Thanks. > > > > Dayne > > > > > > > >>> Just for confirmation, does this mean that for anything 0.5.1 and > >> newer I should expect: > >>> -NumberOfBlocks the maximum memory allocated to the guest > >>> -ConsumableBlocks the memory currently assigned to the guest > >>> > >>> And for anything older than 0.5.1 I should expect: > >>> > >>> -NumberOfBlocks the memory currently assigned to the guest > >>> -ConsumableBlocks the maximum memory allocated to the guest > >> Yes, that's correct. We're one the same page here. Sorry for the > >> confusing detour there. ;) > >> > >> > > > -- > Kaitlin Rupert > IBM Linux Technology Center > kaitlin at linux.vnet.ibm.com > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim From kaitlin at linux.vnet.ibm.com Wed May 20 23:33:06 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Wed, 20 May 2009 16:33:06 -0700 Subject: [Libvirt-cim] [PATCH] Add support for iscsi storage pools Message-ID: <9ac4e06aa21334cf25b8.1242862386@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242695586 25200 # Node ID 9ac4e06aa21334cf25b8096663dca6fea74dccd8 # Parent aa8e071730d2ce20064f1c0295a8005e31ef2cea Add support for iscsi storage pools You will need a system with iSCSI connected storage to test this Signed-off-by: Kaitlin Rupert diff -r aa8e071730d2 -r 9ac4e06aa213 src/Virt_ResourcePoolConfigurationService.c --- a/src/Virt_ResourcePoolConfigurationService.c Wed May 20 10:41:46 2009 -0700 +++ b/src/Virt_ResourcePoolConfigurationService.c Mon May 18 18:13:06 2009 -0700 @@ -179,6 +179,24 @@ return NULL; } +static const char *disk_iscsi_pool(CMPIInstance *inst, + struct virt_pool *pool) +{ + const char *val = NULL; + + if (cu_get_str_prop(inst, "DevicePath", &val) != CMPI_RC_OK) + return "Missing `DevicePath' property"; + + pool->pool_info.disk.device_path = strdup(val); + + if (cu_get_str_prop(inst, "Host", &val) != CMPI_RC_OK) + return "Missing `Host' property"; + + pool->pool_info.disk.host = strdup(val); + + return NULL; +} + static const char *disk_rasd_to_pool(CMPIInstance *inst, struct virt_pool *pool) { @@ -201,6 +219,9 @@ case DISK_POOL_NETFS: msg = disk_netfs_pool(inst, pool); break; + case DISK_POOL_ISCSI: + msg = disk_iscsi_pool(inst, pool); + break; default: return "Storage pool type not supported"; } From deeptik at linux.vnet.ibm.com Thu May 21 11:13:04 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Thu, 21 May 2009 04:13:04 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Adding new tc to verify the template RASDs for parent DiskPoolRASD Message-ID: # HG changeset patch # User Deepti B. Kalakeri # Date 1242904299 25200 # Node ID b88e452523d773787c9ea2e4b46505fd9df5ba76 # Parent 835ba5d083c620046dc16c29e1bf44932af759a7 [TEST] Adding new tc to verify the template RASDs for parent DiskPoolRASD Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 835ba5d083c6 -r b88e452523d7 suites/libvirt-cim/cimtest/RASD/07_parent_disk_pool.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/RASD/07_parent_disk_pool.py Thu May 21 04:11:39 2009 -0700 @@ -0,0 +1,112 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Deepti B. Kalakeri +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +# This test case is used to verify the Parent DiskPoolRASD properties +# in detail using the SettingsDefineCapabilities association. +# +# Ex: +# Command: +# wbemcli ai -ac SettingsDefineCapabilities \ +# 'http://localhost:5988/root/virt:KVM_AllocationCapabilties.InstanceID=\ +# "DiskPool/0"' +# +# Output: +# localhost/root/virt:KVM_DiskPoolResourceAllocationSettingData.InstanceID="Increment" +# -InstanceID="Default" [ verified for Minimum, Maximum, Increment as well ] +# -ResourceType=17 +# -PoolID="DiskPool/0" +# -Type=3 [ For Type 1 and 2 as well ] +# -Path="/dev/null" +# -DevicePath= +# -Host="host_sys.domain.com" +# -SourceDirectory="/var/lib/images" +# +# Date : 21-05-2009 + +import sys +from sets import Set +from CimTest.Globals import logger +from XenKvmLib.const import do_main +from CimTest.ReturnCodes import PASS, FAIL +from XenKvmLib.pool import get_pool_rasds + +sup_types = ['KVM', 'Xen', 'XenFV'] + +def get_rec(netpool_rasd, inst_id='Default'): + recs = [] + for np_rasd in netpool_rasd: + if np_rasd['InstanceID'] == inst_id : + recs.append(np_rasd) + return recs + + at do_main(sup_types) +def main(): + options = main.options + virt = options.virt + server = options.ip + status = FAIL + netpool_rasd = get_pool_rasds(server, virt, pool_type="DiskPool", + filter_default=False) + inst_list = [ 'Default', 'Minimum', 'Maximum', 'Increment' ] + n_rec_val = { 'ResourceType' : 17, + 'PoolID' : "DiskPool/0", + 'Path' : "/dev/null", + } + exp_type_path_host_dir = [('1', 'None', 'None', 'None'), + ('2', '/dev/sda100', 'None', 'None'), + ('3', 'None', 'host_sys.domain.com', + '/var/lib/images')] + + + for inst_type in inst_list: + logger.info("Verifying '%s' records", inst_type) + + try: + n_rec = get_rec(netpool_rasd, inst_id=inst_type) + if len(n_rec) != 3: + raise Exception("Got %s recs instead of 3" %(len(n_rec))) + + res_type_path_host_dir = [] + for rec in n_rec: + l = (str(rec['Type']), str(rec['DevicePath']), + str(rec['Host']), str(rec['SourceDirectory'])) + res_type_path_host_dir.append(l) + + if len(Set(exp_type_path_host_dir) & Set(res_type_path_host_dir)) != 3 : + raise Exception("Mismatching values, " \ + "\nGot %s, \nExpected %s" \ + %(exp_type_path_host_dir, \ + res_type_path_host_dir)) + + for key in n_rec_val.keys(): + for rec in n_rec: + if n_rec_val[key] != rec[key]: + raise Exception("'%s' Mismatch, Got %s, Expected %s" \ + % (key, rec[key], n_rec_val[key])) + + except Exception, details: + logger.error("Exception details: %s", details) + return FAIL + + return PASS +if __name__ == "__main__": + sys.exit(main()) From deeptik at linux.vnet.ibm.com Thu May 21 11:22:10 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Thu, 21 May 2009 04:22:10 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Removed the unused status and return statement Message-ID: <9b2b34f16bd6f04af4b3.1242904930@elm3a148.beaverton.ibm.com> # HG changeset patch # User Deepti B. Kalakeri # Date 1242904875 25200 # Node ID 9b2b34f16bd6f04af4b3b8909e258a6a8f168b03 # Parent b88e452523d773787c9ea2e4b46505fd9df5ba76 [TEST] Removed the unused status and return statement. Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r b88e452523d7 -r 9b2b34f16bd6 suites/libvirt-cim/cimtest/RASD/06_parent_net_pool.py --- a/suites/libvirt-cim/cimtest/RASD/06_parent_net_pool.py Thu May 21 04:11:39 2009 -0700 +++ b/suites/libvirt-cim/cimtest/RASD/06_parent_net_pool.py Thu May 21 04:21:15 2009 -0700 @@ -67,7 +67,6 @@ options = main.options virt = options.virt server = options.ip - status = FAIL netpool_rasd = get_pool_rasds(server, virt, filter_default=False) inst_list = [ 'Default', 'Minimum', 'Maximum', 'Increment' ] n_rec_val = { 'ResourceType' : 10, @@ -110,7 +109,5 @@ return PASS - - return status if __name__ == "__main__": sys.exit(main()) From dayne.medlyn at hp.com Thu May 21 17:06:38 2009 From: dayne.medlyn at hp.com (Medlyn, Dayne (VSL - Ft Collins)) Date: Thu, 21 May 2009 17:06:38 +0000 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks in the Xen_Memory class represent? In-Reply-To: <4A147B42.7020001@linux.vnet.ibm.com> References: < 4A0B6F5D.4030700@linux.vnet.ibm.com> <4A0C96E4.6060301@linux.vnet.ibm.com> <4A0CBC2E.7080109@linux.vnet.ibm.com> <4A0D8BDF.9050202@linux.vnet.ibm.com> <4A0DD287.9030709@linux.vnet.ibm.com> <4A147B42.7020001@linux.vnet.ibm.com> Message-ID: Kaitlin, Just a follow-up. I think your idea of adding a release attribute is a good one. All three values (release, revision, and changeset) would allow a more precise way to pin functionality to builds. I would expect the release to reflect the latest official release number (currently 0.5.2 I believe) with the running revisions and changesets demarking changes off the release, i.e. currently: release 0.5.2, revision 875, changeset: cde25ad65c74+. Thanks for all your help. Dayne > -----Original Message----- > From: libvirt-cim-bounces at redhat.com [mailto:libvirt-cim- > bounces at redhat.com] On Behalf Of Kaitlin Rupert > Sent: Wednesday, May 20, 2009 3:51 PM > To: List for discussion and development of libvirt CIM > Subject: Re: [Libvirt-cim] What does NumberOfBlocks and > ConsumableBlocks in the Xen_Memory class represent? > > Medlyn, Dayne (VSL - Ft Collins) wrote: > > Kaitlin, > > > > Do you know if I can rely on a standard format for the Revision field > in VirtualSystemManagementService class? Looking at the code I see it > is provided as a build variable "-DLIBVIRT_CIM_RV=" that gets set in > the acinclude.m4 script. > > I suppose any of the distributions can label > > this any way they see fit. > > Right - the distros have control over how they wish to set this value. > > > So far I have seen: > > > > SLES11: 0.5.2 > > RHEL5.3: 613+ > > Upstream, we use the changeset and the revision numbers from mercurial. > The problem the distros face is that that they may be using 0.5.2 as a > base, but they will have their own patches applied - some from upstream > libvirt-cim and some from in house. > > > > > Our 0.4.1 build: 590 > > Current testing builds: 875 > > > > Any thought on any standard format? > > I don't know of an easy way to force a standard format since its up to > the distros discretion to change whatever method we go with. > > It might be possible to use a combination of the distro package version > and the Revision values from the VirtualSystemManagementService class. > > > Do you know what the build number was for 0.5.1 (somewhere between > 590 and 613)? At the moment I am planning to handle the x.x.x and x\D+ > cases. If anyone has any other thoughts or experiences I am open to > them. > > 0.5.1 is 657 - you can check out the release versions at: > http://libvirt.org/hg/libvirt-cim/tags > > 613 is somewhere between 0.4.0 (590) and 0.5.0 (632). > > Unfortunately, I'm not coming up with an easy way as to how to handle > the difference in the distros. We have to detect the libvirt version, > which we do using a acinclude.m4 based on the version the distro > reports. That might be a more reliable way, but it's less dynamic. > > We could add a release attribute to libvirt-cim (in addition to the > changeset and revision values), but that won't help you with existing > versions. > > > > > Thanks. > > > > Dayne > > > > > > > >>> Just for confirmation, does this mean that for anything 0.5.1 and > >> newer I should expect: > >>> -NumberOfBlocks the maximum memory allocated to the guest > >>> -ConsumableBlocks the memory currently assigned to the guest > >>> > >>> And for anything older than 0.5.1 I should expect: > >>> > >>> -NumberOfBlocks the memory currently assigned to the guest > >>> -ConsumableBlocks the maximum memory allocated to the guest > >> Yes, that's correct. We're one the same page here. Sorry for the > >> confusing detour there. ;) > >> > >> > > > -- > Kaitlin Rupert > IBM Linux Technology Center > kaitlin at linux.vnet.ibm.com > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim From kaitlin at linux.vnet.ibm.com Thu May 21 17:58:00 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 21 May 2009 10:58:00 -0700 Subject: [Libvirt-cim] What does NumberOfBlocks and ConsumableBlocks in the Xen_Memory class represent? In-Reply-To: References: < 4A0B6F5D.4030700@linux.vnet.ibm.com> <4A0C96E4.6060301@linux.vnet.ibm.com> <4A0CBC2E.7080109@linux.vnet.ibm.com> <4A0D8BDF.9050202@linux.vnet.ibm.com> <4A0DD287.9030709@linux.vnet.ibm.com> <4A147B42.7020001@linux.vnet.ibm.com> Message-ID: <4A159628.2050006@linux.vnet.ibm.com> Medlyn, Dayne (VSL - Ft Collins) wrote: > Kaitlin, > > Just a follow-up. I think your idea of adding a release attribute is a good one. All three values (release, revision, and changeset) would allow a more precise way to pin functionality to builds. I would expect the release to reflect the latest official release number (currently 0.5.2 I believe) with the running revisions and changesets demarking changes off the release, i.e. currently: release 0.5.2, revision 875, changeset: cde25ad65c74+. > Sure - I think it would be useful as well. It would give you a base version and then the revision / changeset can get you a hint of what patches might be included on top of the release. I'll see if I can get that worked up in the next day or two. Just to note - the current release is 0.5.5. Revision 883, changeset 10e45fca47f0. > Thanks for all your help. > > Dayne > > >> -----Original Message----- >> From: libvirt-cim-bounces at redhat.com [mailto:libvirt-cim- >> bounces at redhat.com] On Behalf Of Kaitlin Rupert >> Sent: Wednesday, May 20, 2009 3:51 PM >> To: List for discussion and development of libvirt CIM >> Subject: Re: [Libvirt-cim] What does NumberOfBlocks and >> ConsumableBlocks in the Xen_Memory class represent? >> >> Medlyn, Dayne (VSL - Ft Collins) wrote: >>> Kaitlin, >>> >>> Do you know if I can rely on a standard format for the Revision field, >> in VirtualSystemManagementService class? Looking at the code I see it >> is provided as a build variable "-DLIBVIRT_CIM_RV=" that gets set in >> the acinclude.m4 script. >>> I suppose any of the distributions can label >>> this any way they see fit. >> Right - the distros have control over how they wish to set this value. >> >>> So far I have seen: >>> >>> SLES11: 0.5.2 >>> RHEL5.3: 613+ >> Upstream, we use the changeset and the revision numbers from mercurial. >> The problem the distros face is that that they may be using 0.5.2 as a >> base, but they will have their own patches applied - some from upstream >> libvirt-cim and some from in house. >> >>> Our 0.4.1 build: 590 >>> Current testing builds: 875 >>> >>> Any thought on any standard format? >> I don't know of an easy way to force a standard format since its up to >> the distros discretion to change whatever method we go with. >> >> It might be possible to use a combination of the distro package version >> and the Revision values from the VirtualSystemManagementService class. >> >>> Do you know what the build number was for 0.5.1 (somewhere between >> 590 and 613)? At the moment I am planning to handle the x.x.x and x\D+ >> cases. If anyone has any other thoughts or experiences I am open to >> them. >> >> 0.5.1 is 657 - you can check out the release versions at: >> http://libvirt.org/hg/libvirt-cim/tags >> >> 613 is somewhere between 0.4.0 (590) and 0.5.0 (632). >> >> Unfortunately, I'm not coming up with an easy way as to how to handle >> the difference in the distros. We have to detect the libvirt version, >> which we do using a acinclude.m4 based on the version the distro >> reports. That might be a more reliable way, but it's less dynamic. >> >> We could add a release attribute to libvirt-cim (in addition to the >> changeset and revision values), but that won't help you with existing >> versions. >> >>> Thanks. >>> >>> Dayne >>> >>> >>> >>>>> Just for confirmation, does this mean that for anything 0.5.1 and >>>> newer I should expect: >>>>> -NumberOfBlocks the maximum memory allocated to the guest >>>>> -ConsumableBlocks the memory currently assigned to the guest >>>>> >>>>> And for anything older than 0.5.1 I should expect: >>>>> >>>>> -NumberOfBlocks the memory currently assigned to the guest >>>>> -ConsumableBlocks the maximum memory allocated to the guest >>>> Yes, that's correct. We're one the same page here. Sorry for the >>>> confusing detour there. ;) >>>> >>>> >> >> -- >> Kaitlin Rupert >> IBM Linux Technology Center >> kaitlin at linux.vnet.ibm.com >> >> _______________________________________________ >> Libvirt-cim mailing list >> Libvirt-cim at redhat.com >> https://www.redhat.com/mailman/listinfo/libvirt-cim > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Thu May 21 20:09:49 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 21 May 2009 13:09:49 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Return SKIP if the provider version doesn't support template pool RASDs Message-ID: <4467ea28368f7343c899.1242936589@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242936560 25200 # Node ID 4467ea28368f7343c89990e7179170be34c08c6f # Parent 835ba5d083c620046dc16c29e1bf44932af759a7 [TEST] Return SKIP if the provider version doesn't support template pool RASDs Signed-off-by: Kaitlin Rupert diff -r 835ba5d083c6 -r 4467ea28368f suites/libvirt-cim/cimtest/RASD/06_parent_net_pool.py --- a/suites/libvirt-cim/cimtest/RASD/06_parent_net_pool.py Wed May 20 03:27:45 2009 -0700 +++ b/suites/libvirt-cim/cimtest/RASD/06_parent_net_pool.py Thu May 21 13:09:20 2009 -0700 @@ -68,7 +68,10 @@ virt = options.virt server = options.ip status = FAIL - netpool_rasd = get_pool_rasds(server, virt, filter_default=False) + status, netpool_rasd = get_pool_rasds(server, virt, filter_default=False) + if status != PASS: + return status + inst_list = [ 'Default', 'Minimum', 'Maximum', 'Increment' ] n_rec_val = { 'ResourceType' : 10, 'PoolID' : "NetworkPool/0", diff -r 835ba5d083c6 -r 4467ea28368f suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Wed May 20 03:27:45 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Thu May 21 13:09:20 2009 -0700 @@ -22,7 +22,7 @@ import sys from CimTest.Globals import logger, CIM_NS -from CimTest.ReturnCodes import PASS, FAIL +from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.classes import get_typed_class, inst_to_mof from XenKvmLib.const import get_provider_version, default_pool_name from XenKvmLib.enumclass import EnumInstances, GetInstance @@ -108,6 +108,22 @@ def get_pool_rasds(server, virt, pool_type="NetworkPool", filter_default=True): + + net_pool_rasd_rev = 867 + disk_pool_rasd_rev = 863 + + try: + rev, changeset = get_provider_version(virt, server) + if pool_type == "NetworkPool" and rev < net_pool_rasd_rev: + raise Exception("Supported in version %d" % net_pool_rasd_rev) + + if pool_type == "DiskPool" and rev < disk_pool_rasd_rev: + raise Exception("Supported in version %d" % disk_pool_rasd_rev) + + except Exception, detail: + logger.error("%s template RASDs not supported. %s.", pool_type, detail) + return SKIP, None + net_pool_rasds = [] ac_cn = get_typed_class(virt, "AllocationCapabilities") @@ -119,16 +135,16 @@ rasd = Associators(server, an_cn, ac_cn, InstanceID=inst.InstanceID) except Exception, detail: logger.error("Exception: %s", detail) - return None + return FAIL, None if filter_default == True: for item in rasd: if item['InstanceID'] == "Default": net_pool_rasds.append(item) else: - return rasd + return PASS, rasd - return net_pool_rasds + return PASS, net_pool_rasds def net_undefine(network, server, virt="Xen"): """Function undefine a given virtual network""" @@ -177,8 +193,8 @@ logger.error("IP address is in use by a different network") return FAIL - net_pool_rasds = get_pool_rasds(server, virt) - if len(net_pool_rasds) == 0: + status, net_pool_rasds = get_pool_rasds(server, virt) + if len(net_pool_rasds) == 0 or status != PASS: logger.error("We can not get NetPoolRASDs") return FAIL else: From kaitlin at linux.vnet.ibm.com Thu May 21 20:12:30 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 21 May 2009 13:12:30 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Adding new tc to verify the template RASDs for parent DiskPoolRASD In-Reply-To: References: Message-ID: <4A15B5AE.1080504@linux.vnet.ibm.com> > + > +def get_rec(netpool_rasd, inst_id='Default'): This should be diskpool_rasd > + recs = [] > + for np_rasd in netpool_rasd: Same here. Also, np_rasd should be dp_rasd > + if np_rasd['InstanceID'] == inst_id : > + recs.append(np_rasd) Same here. > + return recs > + > + at do_main(sup_types) > +def main(): > + options = main.options > + virt = options.virt > + server = options.ip > + status = FAIL > + netpool_rasd = get_pool_rasds(server, virt, pool_type="DiskPool", > + filter_default=False) This should be diskpool_rasd > + inst_list = [ 'Default', 'Minimum', 'Maximum', 'Increment' ] > + n_rec_val = { 'ResourceType' : 17, > + 'PoolID' : "DiskPool/0", > + 'Path' : "/dev/null", > + } > + exp_type_path_host_dir = [('1', 'None', 'None', 'None'), > + ('2', '/dev/sda100', 'None', 'None'), > + ('3', 'None', 'host_sys.domain.com', > + '/var/lib/images')] > + > + > + for inst_type in inst_list: > + logger.info("Verifying '%s' records", inst_type) > + > + try: > + n_rec = get_rec(netpool_rasd, inst_id=inst_type) Same here. > + if len(n_rec) != 3: Instead of using a hard coded number here, could you use a variable? I'll be adding template RASDs for netfs and iscsi pools soon. You could define the variable right before/after you define exp_type_path_host_dir. > + raise Exception("Got %s recs instead of 3" %(len(n_rec))) Use the variable instead of the hard coded value here. > + > + res_type_path_host_dir = [] > + for rec in n_rec: > + l = (str(rec['Type']), str(rec['DevicePath']), > + str(rec['Host']), str(rec['SourceDirectory'])) > + res_type_path_host_dir.append(l) > + > + if len(Set(exp_type_path_host_dir) & Set(res_type_path_host_dir)) != 3 : This is longer than 80 characters. Also, use variable instead of hard coded value. > + raise Exception("Mismatching values, " \ > + "\nGot %s, \nExpected %s" \ > + %(exp_type_path_host_dir, \ > + res_type_path_host_dir)) This is a little hard to read. Would something like the following be easier? raise Exception("Mismatching values, \nGot %s, \nExpected %s" \ %(exp_type_path_host_dir, res_type_path_host_dir)) -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Thu May 21 20:24:21 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 21 May 2009 13:24:21 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] (#2) Return SKIP if the provider version doesn't support template pool RASDs Message-ID: <5417439be71f3f502576.1242937461@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242937450 25200 # Node ID 5417439be71f3f502576ee4db92764708c370874 # Parent e5fd77170913c3819d667e240c9873efa3bf0d07 [TEST] (#2) Return SKIP if the provider version doesn't support template pool RASDs Changes: -Rebase on updated tree. Signed-off-by: Kaitlin Rupert diff -r e5fd77170913 -r 5417439be71f suites/libvirt-cim/cimtest/RASD/06_parent_net_pool.py --- a/suites/libvirt-cim/cimtest/RASD/06_parent_net_pool.py Thu May 21 04:21:15 2009 -0700 +++ b/suites/libvirt-cim/cimtest/RASD/06_parent_net_pool.py Thu May 21 13:24:10 2009 -0700 @@ -67,7 +67,11 @@ options = main.options virt = options.virt server = options.ip - netpool_rasd = get_pool_rasds(server, virt, filter_default=False) + + status, netpool_rasd = get_pool_rasds(server, virt, filter_default=False) + if status != PASS: + return status + inst_list = [ 'Default', 'Minimum', 'Maximum', 'Increment' ] n_rec_val = { 'ResourceType' : 10, 'PoolID' : "NetworkPool/0", diff -r e5fd77170913 -r 5417439be71f suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Thu May 21 04:21:15 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Thu May 21 13:24:10 2009 -0700 @@ -22,7 +22,7 @@ import sys from CimTest.Globals import logger, CIM_NS -from CimTest.ReturnCodes import PASS, FAIL +from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.classes import get_typed_class, inst_to_mof from XenKvmLib.const import get_provider_version, default_pool_name from XenKvmLib.enumclass import EnumInstances, GetInstance @@ -108,6 +108,22 @@ def get_pool_rasds(server, virt, pool_type="NetworkPool", filter_default=True): + + net_pool_rasd_rev = 867 + disk_pool_rasd_rev = 863 + + try: + rev, changeset = get_provider_version(virt, server) + if pool_type == "NetworkPool" and rev < net_pool_rasd_rev: + raise Exception("Supported in version %d" % net_pool_rasd_rev) + + if pool_type == "DiskPool" and rev < disk_pool_rasd_rev: + raise Exception("Supported in version %d" % disk_pool_rasd_rev) + + except Exception, detail: + logger.error("%s template RASDs not supported. %s.", pool_type, detail) + return SKIP, None + net_pool_rasds = [] ac_cn = get_typed_class(virt, "AllocationCapabilities") @@ -119,16 +135,16 @@ rasd = Associators(server, an_cn, ac_cn, InstanceID=inst.InstanceID) except Exception, detail: logger.error("Exception: %s", detail) - return None + return FAIL, None if filter_default == True: for item in rasd: if item['InstanceID'] == "Default": net_pool_rasds.append(item) else: - return rasd + return PASS, rasd - return net_pool_rasds + return PASS, net_pool_rasds def net_undefine(network, server, virt="Xen"): """Function undefine a given virtual network""" @@ -177,8 +193,8 @@ logger.error("IP address is in use by a different network") return FAIL - net_pool_rasds = get_pool_rasds(server, virt) - if len(net_pool_rasds) == 0: + status, net_pool_rasds = get_pool_rasds(server, virt) + if len(net_pool_rasds) == 0 or status != PASS: logger.error("We can not get NetPoolRASDs") return FAIL else: From kaitlin at linux.vnet.ibm.com Thu May 21 23:00:00 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 21 May 2009 16:00:00 -0700 Subject: [Libvirt-cim] [PATCH 1 of 4] [TEST] Move cleanup_guest_netpool() to vsmigrations.py In-Reply-To: References: Message-ID: # HG changeset patch # User Kaitlin Rupert # Date 1242946256 25200 # Node ID f26248c6fb4186ee6a942192f25eab8d4bcc5626 # Parent 922d6e12a15460adbabb955c1c35e4d7ff86b4e0 [TEST] Move cleanup_guest_netpool() to vsmigrations.py Migration test 06 - 08 duplicate this same code. Also, if the migration is a localhost one, set the hostname to localhost. Otherwise, the providers will return an error saying the guest already exists on the target (because the providers haven't detected a localhost migration). If the target system name is localhost, the migration will always be a local migration. Be sure to set remote_migration accordingly. Signed-off-by: Kaitlin Rupert diff -r 922d6e12a154 -r f26248c6fb41 suites/libvirt-cim/lib/XenKvmLib/vsmigrations.py --- a/suites/libvirt-cim/lib/XenKvmLib/vsmigrations.py Thu May 21 13:31:38 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/vsmigrations.py Thu May 21 15:50:56 2009 -0700 @@ -30,10 +30,11 @@ from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.enumclass import EnumInstances from XenKvmLib.classes import get_typed_class, virt_types -from XenKvmLib.xm_virt_util import domain_list -from XenKvmLib.const import get_provider_version +from XenKvmLib.xm_virt_util import domain_list, net_list +from XenKvmLib.const import get_provider_version, default_network_name from CimTest.Globals import logger, CIM_USER, CIM_PASS, CIM_NS, \ CIM_ERROR_ENUMERATE +from XenKvmLib.common_util import destroy_netpool # Migration constants CIM_MIGRATE_OFFLINE=1 @@ -94,9 +95,14 @@ def check_mig_support(virt, options): s_sysname = gethostbyaddr(options.ip)[0] t_sysname = gethostbyaddr(options.t_url)[0] - if virt == 'KVM' and (t_sysname == s_sysname or t_sysname in s_sysname): - logger.info("Libvirt does not support local migration for KVM") - return SKIP, s_sysname, t_sysname + + if t_sysname == s_sysname or t_sysname in s_sysname: + if virt == 'KVM': + logger.info("Libvirt does not support local migration for KVM") + return SKIP, s_sysname, t_sysname + + #localhost migration is supported by Xen + return PASS, s_sysname, "localhost" return PASS, s_sysname, t_sysname @@ -364,12 +370,15 @@ logger.error("Guest to be migrated not specified.") return FAIL + if t_sysname == "localhost": + remote_migrate = 0 + try: if remote_migrate == 1: - status, req_image, backup_image = remote_copy_guest_image(virt, - s_sysname, - t_sysname, - guest_name) + status, req_image, bkup_image = remote_copy_guest_image(virt, + s_sysname, + t_sysname, + guest_name) if status != PASS: raise Exception("Failure from remote_copy_guest_image()") @@ -397,7 +406,10 @@ logger.info("Migrating '%s'.. this will take some time.", guest_name) # Migrate the guest to t_sysname - status, ret = migrate_guest_to_host(vsmservice, guest_ref, t_sysname, msd) + status, ret = migrate_guest_to_host(vsmservice, + guest_ref, + t_sysname, + msd) if status == FAIL: raise Exception("Failed to Migrate guest '%s' from '%s' to '%s'" \ % (guest_name, s_sysname, t_sysname)) @@ -413,5 +425,50 @@ logger.error("Exception details %s", details) status = FAIL - cleanup_image(backup_image, req_image, t_sysname, remote_migrate=1) + if remote_migrate == 1: + cleanup_image(bkup_image, req_image, t_sysname, remote_migrate=1) + return status + +def cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname): + # Clean the domain on target machine. + # This is req when migration is successful, also when migration is not + # completely successful VM might be created on the target machine + # and hence need to clean. + target_list = domain_list(t_sysname, virt) + if target_list != None and test_dom in target_list: + ret_value = cxml.destroy(t_sysname) + if not ret_value: + logger.info("Failed to destroy the migrated domain '%s' on '%s'", + test_dom, t_sysname) + + ret_value = cxml.undefine(t_sysname) + if not ret_value: + logger.info("Failed to undefine the migrated domain '%s' on '%s'", + test_dom, t_sysname) + + # Done cleaning environment + if t_sysname == "localhost": + return + + # Remote Migration not Successful, clean the domain on src machine + src_list = domain_list(s_sysname, virt) + if src_list != None and test_dom in src_list: + ret_value = cxml.cim_destroy(s_sysname) + if not ret_value: + logger.info("Failed to destroy the domain '%s' on the source '%s'", + test_dom, s_sysname) + + ret_value = cxml.undefine(s_sysname) + if not ret_value: + logger.info("Failed to undefine the domain '%s' on source '%s'", + test_dom, s_sysname) + + # clean the networkpool created on the remote machine + target_net_list = net_list(t_sysname, virt) + if target_net_list != None and default_network_name in target_net_list: + ret_value = destroy_netpool(t_sysname, virt, default_network_name) + if ret_value != PASS: + logger.info("Unable to destroy networkpool '%s' on '%s'", + default_network_name, t_sysname) + From kaitlin at linux.vnet.ibm.com Thu May 21 23:00:02 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 21 May 2009 16:00:02 -0700 Subject: [Libvirt-cim] [PATCH 3 of 4] [TEST] 07 - Remove cleanup_guest_netpool() def and call it from vsmigration.py In-Reply-To: References: Message-ID: <4e7716bd2775599cdafd.1242946802@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242946256 25200 # Node ID 4e7716bd2775599cdafd858d7875924669c41766 # Parent 7f1b1858ff6961c21356675236cde48096182708 [TEST] 07 - Remove cleanup_guest_netpool() def and call it from vsmigration.py Signed-off-by: Kaitlin Rupert diff -r 7f1b1858ff69 -r 4e7716bd2775 suites/libvirt-cim/cimtest/VirtualSystemMigrationService/07_remote_offline_migration.py --- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/07_remote_offline_migration.py Thu May 21 15:50:56 2009 -0700 +++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/07_remote_offline_migration.py Thu May 21 15:50:56 2009 -0700 @@ -35,9 +35,9 @@ from XenKvmLib.const import do_main, default_network_name from CimTest.ReturnCodes import PASS, FAIL from XenKvmLib.classes import get_typed_class -from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate -from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf,\ - destroy_netpool +from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate, \ + cleanup_guest_netpool +from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf sup_types = ['KVM', 'Xen'] @@ -60,35 +60,6 @@ return PASS, cxml -def cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname): - # Clean the domain on target machine. - # This is req when migration is successful, also when migration is not - # completely successful VM might be created on the target machine - # and hence need to clean. - target_list = domain_list(t_sysname, virt) - if target_list != None and test_dom in target_list: - ret_value = cxml.undefine(t_sysname) - if not ret_value: - logger.info("Failed to undefine the migrated domain '%s' on '%s'", - test_dom, t_sysname) - - # clean the networkpool created on the remote machine - target_net_list = net_list(t_sysname, virt) - if target_net_list != None and default_network_name in target_net_list: - ret_value = destroy_netpool(t_sysname, virt, default_network_name) - if ret_value != PASS: - logger.info("Unable to destroy networkpool '%s' on '%s'", - default_network_name, t_sysname) - - # Remote Migration not Successful, clean the domain on src machine - src_list = domain_list(s_sysname, virt) - if src_list != None and test_dom in src_list: - ret_value = cxml.undefine(s_sysname) - if not ret_value: - logger.info("Failed to undefine the domain '%s' on source '%s'", - test_dom, s_sysname) - - @do_main(sup_types) def main(): options = main.options From kaitlin at linux.vnet.ibm.com Thu May 21 23:00:01 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 21 May 2009 16:00:01 -0700 Subject: [Libvirt-cim] [PATCH 2 of 4] [TEST] 06 - Remove cleanup_guest_netpool() def and call it from vsmigration.py In-Reply-To: References: Message-ID: <7f1b1858ff6961c21356.1242946801@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242946256 25200 # Node ID 7f1b1858ff6961c21356675236cde48096182708 # Parent f26248c6fb4186ee6a942192f25eab8d4bcc5626 [TEST] 06 - Remove cleanup_guest_netpool() def and call it from vsmigration.py Signed-off-by: Kaitlin Rupert diff -r f26248c6fb41 -r 7f1b1858ff69 suites/libvirt-cim/cimtest/VirtualSystemMigrationService/06_remote_live_migration.py --- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/06_remote_live_migration.py Thu May 21 15:50:56 2009 -0700 +++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/06_remote_live_migration.py Thu May 21 15:50:56 2009 -0700 @@ -35,9 +35,9 @@ from XenKvmLib.const import do_main, default_network_name from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.classes import get_typed_class -from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate -from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf,\ - destroy_netpool +from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate, \ + cleanup_guest_netpool +from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf sup_types = ['KVM', 'Xen'] @@ -67,45 +67,6 @@ return PASS, cxml -def cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname): - # Clean the domain on target machine. - # This is req when migration is successful, also when migration is not - # completely successful VM might be created on the target machine - # and hence need to clean. - target_list = domain_list(t_sysname, virt) - if target_list != None and test_dom in target_list: - ret_value = cxml.destroy(t_sysname) - if not ret_value: - logger.info("Failed to destroy the migrated domain '%s' on '%s'", - test_dom, t_sysname) - - ret_value = cxml.undefine(t_sysname) - if not ret_value: - logger.info("Failed to undefine the migrated domain '%s' on '%s'", - test_dom, t_sysname) - - # clean the networkpool created on the remote machine - target_net_list = net_list(t_sysname, virt) - if target_net_list != None and default_network_name in target_net_list: - ret_value = destroy_netpool(t_sysname, virt, default_network_name) - if ret_value != PASS: - logger.info("Unable to destroy networkpool '%s' on '%s'", - default_network_name, t_sysname) - - # Remote Migration not Successful, clean the domain on src machine - src_list = domain_list(s_sysname, virt) - if src_list != None and test_dom in src_list: - ret_value = cxml.cim_destroy(s_sysname) - if not ret_value: - logger.info("Failed to destroy the domain '%s' on the source '%s'", - test_dom, s_sysname) - - ret_value = cxml.undefine(s_sysname) - if not ret_value: - logger.info("Failed to undefine the domain '%s' on source '%s'", - test_dom, s_sysname) - - @do_main(sup_types) def main(): options = main.options From kaitlin at linux.vnet.ibm.com Thu May 21 23:00:03 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 21 May 2009 16:00:03 -0700 Subject: [Libvirt-cim] [PATCH 4 of 4] [TEST] 08 - Remove cleanup_guest_netpool() def and call it from vsmigration.py In-Reply-To: References: Message-ID: <6495a6ca36878b10e7bb.1242946803@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242946256 25200 # Node ID 6495a6ca36878b10e7bb57eae53eb6a943d8b4d1 # Parent 4e7716bd2775599cdafd858d7875924669c41766 [TEST] 08 - Remove cleanup_guest_netpool() def and call it from vsmigration.py Signed-off-by: Kaitlin Rupert diff -r 4e7716bd2775 -r 6495a6ca3687 suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py --- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py Thu May 21 15:50:56 2009 -0700 +++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py Thu May 21 15:50:56 2009 -0700 @@ -35,9 +35,9 @@ from XenKvmLib.const import do_main, default_network_name from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.classes import get_typed_class -from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate -from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf,\ - destroy_netpool +from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate, \ + cleanup_guest_netpool +from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf sup_types = ['KVM', 'Xen'] @@ -168,13 +168,7 @@ cleanup_guest(virt, cxml, test_dom, t_sysname, s_sysname) status = FAIL - # clean the networkpool created on the remote machine - target_net_list = net_list(t_sysname, virt) - if target_net_list != None and net_pool_name in target_net_list: - ret_value = destroy_netpool(t_sysname, virt, net_pool_name) - if ret_value != PASS: - logger.info("Unable to destroy networkpool '%s' on '%s'", - net_pool_name, t_sysname) + cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname) if status_restart != PASS or status_resume != PASS: status = FAIL From kaitlin at linux.vnet.ibm.com Thu May 21 22:59:59 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 21 May 2009 15:59:59 -0700 Subject: [Libvirt-cim] [PATCH 0 of 4] [TEST] Misc migration test fixes Message-ID: This tests enable localhost migration to work with Xen guests From deeptik at linux.vnet.ibm.com Fri May 22 06:41:52 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Thu, 21 May 2009 23:41:52 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] #2 Adding new tc to verify the template RASDs for parent DiskPoolRASD Message-ID: # HG changeset patch # User Deepti B. Kalakeri # Date 1242974489 25200 # Node ID c2fb325da0413f33ea9b893f29ace6caed8a24b1 # Parent e5fd77170913c3819d667e240c9873efa3bf0d07 [TEST] #2 Adding new tc to verify the template RASDs for parent DiskPoolRASD Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r e5fd77170913 -r c2fb325da041 suites/libvirt-cim/cimtest/RASD/07_parent_disk_pool.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/RASD/07_parent_disk_pool.py Thu May 21 23:41:29 2009 -0700 @@ -0,0 +1,114 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Deepti B. Kalakeri +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +# This test case is used to verify the Parent DiskPoolRASD properties +# in detail using the SettingsDefineCapabilities association. +# +# Ex: +# Command: +# wbemcli ai -ac SettingsDefineCapabilities \ +# 'http://localhost:5988/root/virt:KVM_AllocationCapabilties.InstanceID=\ +# "DiskPool/0"' +# +# Output: +# localhost/root/virt:KVM_DiskPoolResourceAllocationSettingData.\ +# InstanceID="Increment" +# -InstanceID="Default" [ verified for Minimum, Maximum, Increment as well ] +# -ResourceType=17 +# -PoolID="DiskPool/0" +# -Type=3 [ For Type 1 and 2 as well ] +# -Path="/dev/null" +# -DevicePath= +# -Host="host_sys.domain.com" +# -SourceDirectory="/var/lib/images" +# +# Date : 21-05-2009 + +import sys +from sets import Set +from CimTest.Globals import logger +from XenKvmLib.const import do_main +from CimTest.ReturnCodes import PASS, FAIL +from XenKvmLib.pool import get_pool_rasds + +sup_types = ['KVM', 'Xen', 'XenFV'] +DISKPOOL_REC_LEN = 3 + +def get_rec(diskpool_rasd, inst_id='Default'): + recs = [] + for dp_rasd in diskpool_rasd: + if dp_rasd['InstanceID'] == inst_id : + recs.append(dp_rasd) + return recs + + at do_main(sup_types) +def main(): + options = main.options + virt = options.virt + server = options.ip + diskpool_rasd = get_pool_rasds(server, virt, pool_type="DiskPool", + filter_default=False) + inst_list = [ 'Default', 'Minimum', 'Maximum', 'Increment' ] + n_rec_val = { 'ResourceType' : 17, + 'PoolID' : "DiskPool/0", + 'Path' : "/dev/null", + } + exp_type_path_host_dir = [('1', 'None', 'None', 'None'), + ('2', '/dev/sda100', 'None', 'None'), + ('3', 'None', 'host_sys.domain.com', + '/var/lib/images')] + + + for inst_type in inst_list: + logger.info("Verifying '%s' records", inst_type) + + try: + n_rec = get_rec(diskpool_rasd, inst_id=inst_type) + if len(n_rec) != DISKPOOL_REC_LEN: + raise Exception("Got %s recs instead of %s" %(len(n_rec), + DISKPOOL_REC_LEN)) + + res_type_path_host_dir = [] + for rec in n_rec: + l = (str(rec['Type']), str(rec['DevicePath']), + str(rec['Host']), str(rec['SourceDirectory'])) + res_type_path_host_dir.append(l) + + if len(Set(exp_type_path_host_dir) & Set(res_type_path_host_dir)) \ + != DISKPOOL_REC_LEN : + raise Exception("Mismatching values, \nGot %s,\nExpected %s"\ + %(exp_type_path_host_dir, + res_type_path_host_dir)) + + for key in n_rec_val.keys(): + for rec in n_rec: + if n_rec_val[key] != rec[key]: + raise Exception("'%s' Mismatch, Got %s, Expected %s" \ + % (key, rec[key], n_rec_val[key])) + + except Exception, details: + logger.error("Exception details: %s", details) + return FAIL + + return PASS +if __name__ == "__main__": + sys.exit(main()) From deeptik at linux.vnet.ibm.com Fri May 22 07:32:18 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Fri, 22 May 2009 13:02:18 +0530 Subject: [Libvirt-cim] [PATCH] [TEST] (#2) Return SKIP if the provider version doesn't support template pool RASDs In-Reply-To: <5417439be71f3f502576.1242937461@localhost.localdomain> References: <5417439be71f3f502576.1242937461@localhost.localdomain> Message-ID: <4A165502.7080502@linux.vnet.ibm.com> I had realised that I need this only yest.. Thanks for adding this. +1 . -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Fri May 22 08:41:25 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Fri, 22 May 2009 01:41:25 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Modifying the RASD/07_parent_disk_pool.py to verify when PoolRASD support Message-ID: # HG changeset patch # User Deepti B. Kalakeri # Date 1242981668 25200 # Node ID a8ad5184432bbbbefa3e17802d334fe465f13140 # Parent 80d1092c225b8f04502c61986e7f81b683bef98b [TEST] Modifying the RASD/07_parent_disk_pool.py to verify when PoolRASD support. This patch is dependent on "(#2) Return SKIP if the provider version doesn't support template pool RASDs" Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 80d1092c225b -r a8ad5184432b suites/libvirt-cim/cimtest/RASD/07_parent_disk_pool.py --- a/suites/libvirt-cim/cimtest/RASD/07_parent_disk_pool.py Fri May 22 01:36:16 2009 -0700 +++ b/suites/libvirt-cim/cimtest/RASD/07_parent_disk_pool.py Fri May 22 01:41:08 2009 -0700 @@ -65,8 +65,10 @@ options = main.options virt = options.virt server = options.ip - diskpool_rasd = get_pool_rasds(server, virt, pool_type="DiskPool", - filter_default=False) + status, diskpool_rasd = get_pool_rasds(server, virt, pool_type="DiskPool", + filter_default=False) + if status != PASS: + return status inst_list = [ 'Default', 'Minimum', 'Maximum', 'Increment' ] n_rec_val = { 'ResourceType' : 17, 'PoolID' : "DiskPool/0", From deeptik at linux.vnet.ibm.com Tue May 26 07:12:36 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Tue, 26 May 2009 00:12:36 -0700 Subject: [Libvirt-cim] [PATCH 1 of 4] [TEST] Modifying pool.py to accomodate changes for diskpool In-Reply-To: References: Message-ID: # HG changeset patch # User Deepti B. Kalakeri # Date 1243320943 25200 # Node ID ecdbd16fefe8d7bdf82f85a74d66a312ab1c52e0 # Parent 46291354b6916236a239356a475c6e6038c8e1c5 [TEST] Modifying pool.py to accomodate changes for diskpool. Tested with KVM on F10 and with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 46291354b691 -r ecdbd16fefe8 suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Mon May 25 23:42:34 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Mon May 25 23:55:43 2009 -0700 @@ -32,7 +32,7 @@ from XenKvmLib import rpcs_service import pywbem from CimTest.CimExt import CIMClassMOF -from XenKvmLib.vxml import NetXML +from XenKvmLib.vxml import NetXML, PoolXML from XenKvmLib.xm_virt_util import virsh_version cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED @@ -124,7 +124,7 @@ logger.error("%s template RASDs not supported. %s.", pool_type, detail) return SKIP, None - net_pool_rasds = [] + n_d_pool_rasds = [] ac_cn = get_typed_class(virt, "AllocationCapabilities") an_cn = get_typed_class(virt, "SettingsDefineCapabilities") @@ -140,11 +140,11 @@ if filter_default == True: for item in rasd: if item['InstanceID'] == "Default": - net_pool_rasds.append(item) + n_d_pool_rasds.append(item) else: return PASS, rasd - return PASS, net_pool_rasds + return PASS, n_d_pool_rasds def net_undefine(network, server, virt="Xen"): """Function undefine a given virtual network""" @@ -165,12 +165,28 @@ return PASS -def create_netpool(server, virt, test_pool, pool_attr_list, mode_type=0): - status = PASS - rpcs = get_typed_class(virt, "ResourcePoolConfigurationService") - rpcs_conn = eval("rpcs_service." + rpcs)(server) +def undefine_diskpool(server, virt, dp_name): + libvirt_version = virsh_version(server, virt) + if libvirt_version >= '0.4.1': + if dp_name == None: + return FAIL + + cmd = "virsh -c %s pool-undefine %s" % (virt2uri(virt), dp_name) + ret, out = run_remote(server, cmd) + if ret != 0: + logger.error("Failed to undefine pool '%s'", dp_name) + return FAIL + + return PASS + +def create_pool(server, virt, test_pool, pool_attr_list, + mode_type=0, pool_type="NetworkPool"): + + rpcs_cn = get_typed_class(virt, "ResourcePoolConfigurationService") + rpcs_conn = eval("rpcs_service." + rpcs_cn)(server) curr_cim_rev, changeset = get_provider_version(virt, server) if curr_cim_rev < libvirt_cim_child_pool_rev: + try: rpcs_conn.CreateChildResourcePool() except pywbem.CIMError, (err_no, desc): @@ -183,75 +199,91 @@ logger.error("Unexpected rc code %s and description %s\n", err_no, desc) return FAIL + elif curr_cim_rev >= libvirt_cim_child_pool_rev: - n_list = net_list(server, virt) - for _net_name in n_list: - net_xml = NetXML(server=server, networkname=_net_name, - virt=virt, is_new_net=False) - pool_use_attr = net_xml.xml_get_netpool_attr_list() - if pool_attr_list['Address'] in pool_use_attr: - logger.error("IP address is in use by a different network") - return FAIL + if pool_type == "NetworkPool" : + n_list = net_list(server, virt) + for _net_name in n_list: + net_xml = NetXML(server=server, networkname=_net_name, + virt=virt, is_new_net=False) + pool_use_attr = net_xml.xml_get_netpool_attr_list() + if pool_attr_list['Address'] in pool_use_attr: + logger.error("IP address is in use by a different network") + return FAIL - status, net_pool_rasds = get_pool_rasds(server, virt) - if len(net_pool_rasds) == 0 or status != PASS: - logger.error("We can not get NetPoolRASDs") + status, n_d_pool_rasds = get_pool_rasds(server, virt, pool_type) + if len(n_d_pool_rasds) == 0 or status != PASS: + logger.error("Failed to get '%sRASD'", pool_type) return FAIL else: - for i in range(0, len(net_pool_rasds)): - if net_pool_rasds[i]['ForwardMode'] == mode_type: - net_pool_rasds[i]['PoolID'] = "NetworkPool/%s" % test_pool - for attr, val in pool_attr_list.iteritems(): - net_pool_rasds[i][attr] = val - break - - pool_settings = inst_to_mof(net_pool_rasds[i]) + for i in range(0, len(n_d_pool_rasds)): + pool_id = "%s/%s" %(pool_type, test_pool) + n_d_pool_rasds[i]['PoolID'] = pool_id + if pool_type == "NetworkPool": + key = 'ForwardMode' + elif pool_type == "DiskPool": + key = 'Type' + + if n_d_pool_rasds[i][key] == mode_type: + for attr, val in pool_attr_list.iteritems(): + n_d_pool_rasds[i][attr] = val + break + + pool_settings = inst_to_mof(n_d_pool_rasds[i]) try: rpcs_conn.CreateChildResourcePool(ElementName=test_pool, Settings=[pool_settings]) except Exception, details: - logger.error("Error in childpool creation") - logger.error(details) + logger.error("Exception in create_pool()") + logger.error("Exception details: %s", details) return FAIL - return status + return PASS -def verify_pool(server, virt, pooltype, poolname, pool_attr_list, mode_type=0): +def verify_pool(server, virt, poolname, pool_attr_list, mode_type=0, + pool_type="NetworkPool"): + status = FAIL - pool_list = EnumInstances(server, pooltype) + pool_cn = get_typed_class(virt, pool_type) + pool_list = EnumInstances(server, pool_cn) if len(pool_list) < 1: - logger.error("Return %i instances, expected at least one instance", + logger.error("Got %i instances, expected at least one instance", len(pool_list)) return FAIL - poolid = "NetworkPool/%s" % poolname + poolid = "%s/%s" % (pool_type, poolname) for i in range(0, len(pool_list)): ret_pool = pool_list[i].InstanceID if ret_pool != poolid: continue - net_xml = NetXML(server, virt=virt, networkname=poolname, - is_new_net=False) + if pool_type == "NetworkPool" : + net_xml = NetXML(server, virt=virt, networkname=poolname, + is_new_net=False) - ret_mode = net_xml.xml_get_netpool_mode() - libvirt_version = virsh_version(server, virt) + ret_mode = net_xml.xml_get_netpool_mode() + libvirt_version = virsh_version(server, virt) + #Forward mode support was added in 0.4.2 + if libvirt_version >= '0.4.2': + if mode_type == 1 and ret_mode != "nat": + logger.error("Error when verifying 'nat' type network") + return FAIL + elif mode_type == 2 and ret_mode != "route": + logger.error("Error when verifying 'route' type network") + return FAIL - #Forward mode support was added in 0.4.2 - if libvirt_version >= '0.4.2': - if mode_type == 1 and ret_mode != "nat": - logger.error("Got error when verify nat type") - return FAIL - elif mode_type == 2 and ret_mode != "route": - logger.error("Got error when verify route type") - return FAIL + ret_pool_attr_list = net_xml.xml_get_netpool_attr_list() - ret_pool_attr_list = net_xml.xml_get_netpool_attr_list() + elif pool_type == "DiskPool" : + disk_xml = PoolXML(server ,virt=virt, poolname=poolname, + is_new_pool=False) + ret_pool_attr_list = disk_xml.xml_get_pool_attr_list() for i in range(0, len(ret_pool_attr_list)): if ret_pool_attr_list[i] not in pool_attr_list.itervalues(): - logger.error("Got error when parsing %s", ret_pool_attr_list[i]) + logger.error("Failed to verify '%s'", ret_pool_attr_list[i]) return FAIL status = PASS From deeptik at linux.vnet.ibm.com Tue May 26 07:12:39 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Tue, 26 May 2009 00:12:39 -0700 Subject: [Libvirt-cim] [PATCH 4 of 4] [TEST] Modifying RPCS/04_CreateChildResourcePool.py and RPCS/07_DeleteChildResourcePool.py In-Reply-To: References: Message-ID: # HG changeset patch # User Deepti B. Kalakeri # Date 1243321835 25200 # Node ID b417a41ea09985ad7aaabc642496c86e25c5d404 # Parent d79401609c9ce45ef70e6f5ddea3f81c10b21b33 [TEST] Modifying RPCS/04_CreateChildResourcePool.py and RPCS/07_DeleteChildResourcePool.py Modifying RPCS/04_CreateChildResourcePool.py and RPCS/07_DeleteChildResourcePool.py to align with the changes to create_pool() and verify_pool() changes of pool.py. Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r d79401609c9c -r b417a41ea099 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Tue May 26 00:06:28 2009 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Tue May 26 00:10:35 2009 -0700 @@ -54,7 +54,7 @@ from XenKvmLib.const import do_main, platform_sup from XenKvmLib.classes import get_typed_class from XenKvmLib.common_util import destroy_netpool -from XenKvmLib.pool import create_netpool, verify_pool, undefine_netpool +from XenKvmLib.pool import create_pool, verify_pool, undefine_netpool test_pool = "testpool" @@ -77,13 +77,13 @@ "IPRangeEnd" : range_addr_end } for item in range(0, 3): - status = create_netpool(options.ip, options.virt, - test_pool, pool_attr, mode_type=item) + status = create_pool(options.ip, options.virt, + test_pool, pool_attr, mode_type=item) if status != PASS: logger.error("Error in networkpool creation") return FAIL - status = verify_pool(options.ip, options.virt, np, + status = verify_pool(options.ip, options.virt, test_pool, pool_attr, mode_type=item) if status != PASS: logger.error("Error in networkpool verification") diff -r d79401609c9c -r b417a41ea099 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py Tue May 26 00:06:28 2009 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py Tue May 26 00:10:35 2009 -0700 @@ -52,7 +52,7 @@ from XenKvmLib.enumclass import EnumInstances, EnumNames from XenKvmLib.classes import get_typed_class from XenKvmLib.common_util import destroy_netpool -from XenKvmLib.pool import create_netpool, verify_pool +from XenKvmLib.pool import create_pool, verify_pool cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED cim_mname = "DeleteResourcePool" @@ -71,7 +71,8 @@ rpcs_conn.DeleteResourcePool() except pywbem.CIMError, (err_no, desc): if err_no == cim_errno : - logger.info("Got expected exception for '%s' service", cim_mname) + logger.info("Got expected exception for '%s' service", + cim_mname) logger.info("Errno is '%s' ", err_no) logger.info("Error string is '%s'", desc) return PASS @@ -87,21 +88,23 @@ "IPRangeEnd" : "192.168.0.15", "ForwardMode" : "nat" } - np = get_typed_class(options.virt, 'NetworkPool') - np_id = "NetworkPool/%s" % test_pool - status = create_netpool(options.ip, options.virt, test_pool, pool_attr) + status = create_pool(options.ip, options.virt, + test_pool, pool_attr) if status != PASS: logger.error("Error in networkpool creation") return FAIL - status = verify_pool(options.ip, options.virt, np, + status = verify_pool(options.ip, options.virt, test_pool, pool_attr) + if status != PASS: logger.error("Error in networkpool verification") destroy_netpool(options.ip, options.virt, test_pool) return FAIL + np = get_typed_class(options.virt, 'NetworkPool') + np_id = "NetworkPool/%s" % test_pool netpool = EnumNames(options.ip, np) for i in range(0, len(netpool)): ret_pool = netpool[i].keybindings['InstanceID'] From deeptik at linux.vnet.ibm.com Tue May 26 07:12:37 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Tue, 26 May 2009 00:12:37 -0700 Subject: [Libvirt-cim] [PATCH 2 of 4] [TEST] Modifying vxml.py to accomadate DiskPool support In-Reply-To: References: Message-ID: <502f6364a823ebfc0574.1243321957@elm3a148.beaverton.ibm.com> # HG changeset patch # User Deepti B. Kalakeri # Date 1243321084 25200 # Node ID 502f6364a823ebfc05748661625765028c339bb6 # Parent ecdbd16fefe8d7bdf82f85a74d66a312ab1c52e0 [TEST] Modifying vxml.py to accomadate DiskPool support. Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r ecdbd16fefe8 -r 502f6364a823 suites/libvirt-cim/lib/XenKvmLib/vxml.py --- a/suites/libvirt-cim/lib/XenKvmLib/vxml.py Mon May 25 23:55:43 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/vxml.py Mon May 25 23:58:04 2009 -0700 @@ -293,7 +293,7 @@ class PoolXML(Virsh, XMLClass): def __init__(self, server, poolname=const.default_pool_name, - virt='xen'): + virt='xen', is_new_pool=True): XMLClass.__init__(self) if virt == 'XenFV': @@ -302,6 +302,17 @@ self.pool_name = poolname self.server = server + if is_new_pool is False: + cmd = "virsh pool-dumpxml %s" % self.pool_name + s, disk_xml = utils.run_remote(server, cmd) + if s != 0: + logger.error("Encounter error dump netxml") + return None + else: + self.xml_string = disk_xml + self.xdoc = minidom.parseString(self.xml_string) + return + pool = self.add_sub_node(self.xdoc, 'pool', type='dir') self.add_sub_node(pool, 'name', self.pool_name) target = self.add_sub_node(pool, 'target') @@ -313,10 +324,19 @@ def destroy_vpool(self): return self.run(self.server, 'pool-destroy', self.pool_name) + def undefine_vpool(self): + return self.run(self.server, 'pool-undefine', self.pool_name) + def xml_get_diskpool_name(self): dpoolname = self.get_value_xpath('/pool/name') return dpoolname + def xml_get_pool_attr_list(self): + pool_attr_list = [] + poolpath = self.get_value_xpath('/pool/target/path') + pool_attr_list.append(poolpath) + + return pool_attr_list class VirtXML(Virsh, XMLClass): """Base class for all XML generation & operation""" From deeptik at linux.vnet.ibm.com Tue May 26 07:12:35 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Tue, 26 May 2009 00:12:35 -0700 Subject: [Libvirt-cim] [PATCH 0 of 4] [TEST] Adding new tc to verify DiskPool deletion using DeleteResourcePool. Message-ID: This test case should be applied on top of "(#2) Return SKIP if the provider version doesn't support template pool RASDs" Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri From deeptik at linux.vnet.ibm.com Tue May 26 07:12:38 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Tue, 26 May 2009 00:12:38 -0700 Subject: [Libvirt-cim] [PATCH 3 of 4] [TEST] Adding new tc to verify DiskPool Deletion using DeleteResourcePool In-Reply-To: References: Message-ID: # HG changeset patch # User Deepti B. Kalakeri # Date 1243321588 25200 # Node ID d79401609c9ce45ef70e6f5ddea3f81c10b21b33 # Parent 502f6364a823ebfc05748661625765028c339bb6 [TEST] Adding new tc to verify DiskPool Deletion using DeleteResourcePool. Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 502f6364a823 -r d79401609c9c suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py Tue May 26 00:06:28 2009 -0700 @@ -0,0 +1,133 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Guolian Yun +# +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# This test case should test the DeleteResourcePool service +# supplied by the RPCS provider. +# The DeleteResourcePool is used to delete a resource pool. +# DeleteResourcePool() details: +# Input +# ----- +# IN -- Pool -- CIM_ResourcePool REF -- The resource pool to delete +# +# Output +# ------ +# OUT -- Job -- CIM_ConcreteJob REF -- Returned job if started +# OUT -- Error-- String -- Encoded error instance if the operation +# failed and did not return a job. +# +# Exception details before Revision 841 +# ----- +# Error code: CIM_ERR_NOT_SUPPORTED +# +# After revision 841, the service is implemented +# The test case verifies DeleteResourcePool is able to delete the +# dir type diskpool. +# -Date: 26.05.2009 + +import sys +import pywbem +from XenKvmLib import rpcs_service +from CimTest.Globals import logger +from CimTest.ReturnCodes import FAIL, PASS +from XenKvmLib.const import do_main, platform_sup, get_provider_version +from XenKvmLib.enumclass import EnumInstances, EnumNames +from XenKvmLib.classes import get_typed_class +from XenKvmLib.pool import create_pool, verify_pool, undefine_diskpool +from XenKvmLib.common_util import destroy_diskpool + +cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED +cim_mname = "DeleteResourcePool" +libvirt_cim_child_pool_rev = 841 +test_pool = "dp_pool" +TYPE = 1 # Dir type diskpool + + at do_main(platform_sup) +def main(): + status = FAIL + options = main.options + server = options.ip + virt = options.virt + cn = get_typed_class(virt, "ResourcePoolConfigurationService") + rpcs_conn = eval("rpcs_service." + cn)(server) + curr_cim_rev, changeset = get_provider_version(virt, server) + if curr_cim_rev < libvirt_cim_child_pool_rev: + + try: + rpcs_conn.DeleteResourcePool() + except pywbem.CIMError, (err_no, desc): + if err_no == cim_errno : + logger.info("Got expected exception for '%s' service", cim_mname) + logger.info("Errno is '%s' ", err_no) + logger.info("Error string is '%s'", desc) + return PASS + else: + logger.error("Unexpected rc code %s and description %s\n", + err_no, desc) + return status + + elif curr_cim_rev >= libvirt_cim_child_pool_rev: + + try: + pool_attr = { "Path" : "/tmp" } + status = create_pool(server, virt, test_pool, pool_attr, + pool_type="DiskPool", mode_type=TYPE) + if status != PASS: + logger.error("Failed to create diskpool '%s'", test_pool) + return FAIL + + status = verify_pool(server, virt, test_pool, + pool_attr, pool_type="DiskPool") + if status != PASS: + raise Exception("Failed to verify diskpool '%s'" % test_pool) + + dp = get_typed_class(virt, 'DiskPool') + dp_id = "DiskPool/%s" % test_pool + pool_settings = None + pool = EnumNames(server, dp) + for i in range(0, len(pool)): + ret_pool = pool[i].keybindings['InstanceID'] + if ret_pool == dp_id: + pool_settings = pool[i] + break + + if pool_settings == None: + logger.error("Failed to get poolsettings for '%s'", test_pool) + return FAIL + + rpcs_conn.DeleteResourcePool(Pool = pool_settings) + pool = EnumInstances(server, dp) + for i in range(0, len(pool)): + ret_pool = pool[i].InstanceID + if ret_pool == dp_id: + raise Exception("Failed to delete diskpool '%s'" %test_pool) + + status = PASS + except Exception, details: + logger.error("Exception details: %s", details) + destroy_diskpool(server, virt, test_pool) + undefine_diskpool(server, virt, test_pool) + return FAIL + + return status + +if __name__ == "__main__": + sys.exit(main()) From deeptik at linux.vnet.ibm.com Tue May 26 12:25:52 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Tue, 26 May 2009 05:25:52 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Add new tc to verify dir type diskpool creations using CreateChildResourcePool() Message-ID: <7329a4c6f1884c4be3e3.1243340752@elm3a148.beaverton.ibm.com> # HG changeset patch # User Deepti B. Kalakeri # Date 1243340705 25200 # Node ID 7329a4c6f1884c4be3e3f042a704f56410bea3fa # Parent b417a41ea09985ad7aaabc642496c86e25c5d404 [TEST] Add new tc to verify dir type diskpool creations using CreateChildResourcePool(). This test case should be applied on top of "(#2) Return SKIP if the provider version doesn't support template pool RASDs" Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r b417a41ea099 -r 7329a4c6f188 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py Tue May 26 05:25:05 2009 -0700 @@ -0,0 +1,102 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Deepti B. Kalakeri +# +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# This test case should test the CreateChildResourcePool service +# supplied by the RPCS provider. +# Input +# ----- +# IN -- ElementName -- String -- The desired name of the resource pool +# IN -- Settings -- String -- A string representation of a +# CIM_ResourceAllocationSettingData +# instance that represents the allocation +# assigned to this child pool +# IN -- ParentPool -- CIM_ResourcePool REF -- The parent pool from which +# to create this pool +# +# Output +# ------ +# OUT -- Pool -- CIM_ResourcePool REF -- The resulting resource pool +# OUT -- Job -- CIM_ConcreteJob REF -- Returned job if started +# OUT -- Error -- String -- Encoded error instance if the operation +# failed and did not return a job +# +# Exception details before Revision 846 +# ----- +# Error code: CIM_ERR_NOT_SUPPORTED +# +# After revision 846, the service is implemented +# +# -Date: 26.05.2009 + +import sys +from CimTest.Globals import logger +from CimTest.ReturnCodes import FAIL, PASS +from XenKvmLib.const import do_main, platform_sup +from XenKvmLib.classes import get_typed_class +from XenKvmLib.common_util import destroy_diskpool +from XenKvmLib.pool import create_pool, verify_pool, undefine_diskpool + +test_pool = "diskpool" +dp_types = { "DISK_POOL_DIR" : 1 } + + + at do_main(platform_sup) +def main(): + options = main.options + server = options.ip + virt = options.virt + pool_attr = { "Path" : "/tmp" } + + # For now the test case support only the creation of + # dir type disk pool, later change to fs and netfs etc + for key, value in dp_types.iteritems(): + status = create_pool(server, virt, test_pool, pool_attr, + mode_type=value, pool_type= "DiskPool") + if status != PASS: + logger.error("Failed to create '%s' type diskpool '%s'", + key, test_pool) + return FAIL + + status = verify_pool(server, virt, test_pool, pool_attr, + mode_type=value, pool_type="DiskPool") + if status != PASS: + logger.error("Error in diskpool verification") + destroy_diskpool(server, virt, test_pool) + undefine_diskpool(server, virt, test_pool) + return FAIL + + status = destroy_diskpool(server, virt, test_pool) + if status != PASS: + logger.error("Unable to destroy diskpool '%s'", test_pool) + return FAIL + + status = undefine_diskpool(server, virt, test_pool) + if status != PASS: + logger.error("Unable to undefine diskpool '%s'", test_pool) + return FAIL + + status = PASS + + return status + +if __name__ == "__main__": + sys.exit(main()) diff -r b417a41ea099 -r 7329a4c6f188 suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Tue May 26 00:10:35 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Tue May 26 05:25:05 2009 -0700 @@ -230,7 +230,7 @@ break pool_settings = inst_to_mof(n_d_pool_rasds[i]) - + try: rpcs_conn.CreateChildResourcePool(ElementName=test_pool, Settings=[pool_settings]) From deeptik at linux.vnet.ibm.com Wed May 27 13:20:11 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Wed, 27 May 2009 06:20:11 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Modifying ResourcePool/01_enum.py to accomodate verifying Parent DiskPool and NetworkPool Message-ID: <30ce5799fb9abed46b52.1243430411@elm3a148.beaverton.ibm.com> # HG changeset patch # User Deepti B. Kalakeri # Date 1243430368 25200 # Node ID 30ce5799fb9abed46b528e70ad81b4242a801f05 # Parent e5fd77170913c3819d667e240c9873efa3bf0d07 [TEST] Modifying ResourcePool/01_enum.py to accomodate verifying Parent DiskPool and NetworkPool. Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r e5fd77170913 -r 30ce5799fb9a suites/libvirt-cim/cimtest/ResourcePool/01_enum.py --- a/suites/libvirt-cim/cimtest/ResourcePool/01_enum.py Thu May 21 04:21:15 2009 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePool/01_enum.py Wed May 27 06:19:28 2009 -0700 @@ -30,8 +30,7 @@ from XenKvmLib.enumclass import EnumInstances from XenKvmLib.classes import get_typed_class from XenKvmLib import vxml -from CimTest import Globals -from CimTest.Globals import logger +from CimTest.Globals import logger, CIM_ERROR_ENUMERATE from XenKvmLib.const import do_main, default_pool_name from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.xm_virt_util import net_list @@ -56,8 +55,8 @@ netxml = vxml.NetXML(server, bridgename, test_network, virt) ret = netxml.create_vnet() if not ret: - logger.error("Failed to create the Virtual Network '%s'", \ - test_network) + logger.error("Failed to create the Virtual Network '%s'", + test_network) return SKIP, None disk_instid = '%s/%s' % (dp_cn, default_pool_name) @@ -77,25 +76,30 @@ logger.error("Returned %s instead of %s", ret_value, exp_value) def verify_fields(pool_list, poolname, cn): - status = PASS if len(poolname) < 1: logger.error("%s return %i instances, expected atleast 1 instance", cn, len(poolname)) return FAIL - exp_value = pool_list[cn][0] + for i in range(0, len(poolname)): - ret_value = poolname[i].InstanceID - if ret_value == exp_value: - break - elif ret_value != exp_value and i == len(poolname)-1: - print_error('InstanceID', ret_value, exp_value) - status = FAIL - ret_value = poolname[0].ResourceType - exp_value = pool_list[cn][1] - if ret_value != exp_value: - print_error('ResourceType', ret_value, exp_value) - status = FAIL - return status + + rtype_ret_value = poolname[i].ResourceType + rtype_exp_value = pool_list[cn][1] + if rtype_ret_value != rtype_exp_value: + print_error('ResourceType', rtype_ret_value, rtype_exp_value) + return FAIL + + inst_ret_value = poolname[i].InstanceID + inst_exp_value = pool_list[cn][0] + if "DiskPool/0" == inst_ret_value or "NetworkPool/0" == inst_ret_value: + if poolname[i].Primordial != True: + print_error('Primordial', poolname[i].Primordial, "True") + return FAIL + elif inst_ret_value != inst_exp_value and i == len(poolname)-1: + print_error('InstanceID', inst_ret_value, inst_exp_value) + return FAIL + + return PASS @do_main(sup_types) @@ -113,38 +117,23 @@ mp = get_typed_class(virt, mp_cn) pp = get_typed_class(virt, pp_cn) - dp = get_typed_class(virt, dp_cn) - np = get_typed_class(virt, np_cn) + cn_list = [ mp, pp ] + if virt != 'LXC': + dp = get_typed_class(virt, dp_cn) + np = get_typed_class(virt, np_cn) + cn_list.append(dp) + cn_list.append(np) - try: - mempool = EnumInstances(ip, mp) - except Exception: - logger.error(Globals.CIM_ERROR_ENUMERATE, mp) - return FAIL - status = verify_fields(pool_list, mempool, mp) - - try: - propool = EnumInstances(ip, pp) - except Exception: - logger.error(Globals.CIM_ERROR_ENUMERATE, pp) - return FAIL - status = verify_fields(pool_list, propool, pp) - - if virt != 'LXC': + for cn in cn_list: try: - diskpool = EnumInstances(ip, dp) - except Exception: - logger.error(Globals.CIM_ERROR_ENUMERATE, dp) + pool = EnumInstances(ip, cn) + except Exception, details: + logger.error(CIM_ERROR_ENUMERATE, cn) + logger.error("Exception details: %s", details) return FAIL - status = verify_fields(pool_list, diskpool, dp) - - try: - netpool = EnumInstances(ip, np) - except Exception: - logger.error(Globals.CIM_ERROR_ENUMERATE, np) - return FAIL - status = verify_fields(pool_list, netpool, np) - + status = verify_fields(pool_list, pool, cn) + if status != PASS: + return status return status if __name__ == "__main__": From rmaciel at linux.vnet.ibm.com Wed May 27 17:04:46 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Wed, 27 May 2009 14:04:46 -0300 Subject: [Libvirt-cim] [PATCH] A few schema cleanups after migrating from 2.16 to 2.21 In-Reply-To: References: Message-ID: <4A1D72AE.6070300@linux.vnet.ibm.com> Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1242841306 25200 > # Node ID aa8e071730d2ce20064f1c0295a8005e31ef2cea > # Parent 10e45fca47f0d19eddcf7bb1559ba9e7397aea24 > A few schema cleanups after migrating from 2.16 to 2.21 > > Fixes: > -CIM_HostedResourcePool is needed in cimv2 to register Virt_HostedResourcePool > as a cross-namespace provider > > -Implementation specific VirtualSystemSnapshotService should be a subclass of > CIM_VirtualSystemSnapshotService, not Virt_VirtualSystemSnapshotService > > -CIM_VirtualSystemMigrationSettingData.mof and > CIM_VirtualSystemMigrationService.mof are needed in interop to properly > register ECTP asa cross-namespace provider CIM_VirtualSystemMigrationSettingData is not accessible in the interop namespace. To test I did a query for the aforementioned provider in the root/interop space: wbemcli ein 'http://localhost:5988/root/interop:CIM_VirtualSystemMigrationSettingData' > > Signed-off-by: Kaitlin Rupert > > diff -r 10e45fca47f0 -r aa8e071730d2 base_schema/cimv2.21.0-cimv2_mof > --- a/base_schema/cimv2.21.0-cimv2_mof Mon May 18 16:39:20 2009 -0700 > +++ b/base_schema/cimv2.21.0-cimv2_mof Wed May 20 10:41:46 2009 -0700 > @@ -9,3 +9,4 @@ > #pragma include ("Core/CIM_HostedResourcePool.mof") > #pragma include ("Core/CIM_ElementCapabilities.mof") > #pragma include ("Core/CIM_HostedService.mof") > +#pragma include ("Core/CIM_HostedResourcePool.mof") > diff -r 10e45fca47f0 -r aa8e071730d2 base_schema/cimv2.21.0-interop_mof > --- a/base_schema/cimv2.21.0-interop_mof Mon May 18 16:39:20 2009 -0700 > +++ b/base_schema/cimv2.21.0-interop_mof Wed May 20 10:41:46 2009 -0700 > @@ -26,3 +26,5 @@ > #pragma include ("Core/CIM_ResourcePool.mof") > #pragma include ("Core/CIM_Capabilities.mof") > #pragma include ("Core/CIM_AllocationCapabilities.mof") > +#pragma include ("System/CIM_VirtualSystemMigrationSettingData.mof") > +#pragma include ("System/CIM_VirtualSystemMigrationService.mof") > diff -r 10e45fca47f0 -r aa8e071730d2 schema/VirtualSystemSnapshotService.mof > --- a/schema/VirtualSystemSnapshotService.mof Mon May 18 16:39:20 2009 -0700 > +++ b/schema/VirtualSystemSnapshotService.mof Wed May 20 10:41:46 2009 -0700 > @@ -1,5 +1,5 @@ > // Copyright IBM Corp. 2008 > > -class Xen_VirtualSystemSnapshotService : Virt_VirtualSystemSnapshotService { }; > -class KVM_VirtualSystemSnapshotService : Virt_VirtualSystemSnapshotService { }; > -class LXC_VirtualSystemSnapshotService : Virt_VirtualSystemSnapshotService { }; > +class Xen_VirtualSystemSnapshotService : CIM_VirtualSystemSnapshotService { }; > +class KVM_VirtualSystemSnapshotService : CIM_VirtualSystemSnapshotService { }; > +class LXC_VirtualSystemSnapshotService : CIM_VirtualSystemSnapshotService { }; > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim -- Richard Maciel, MSc IBM Linux Technology Center rmaciel at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Wed May 27 19:54:27 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Wed, 27 May 2009 12:54:27 -0700 Subject: [Libvirt-cim] [PATCH 1 of 4] [TEST] Modifying pool.py to accomodate changes for diskpool In-Reply-To: References: Message-ID: <4A1D9A73.6040701@linux.vnet.ibm.com> Deepti B. Kalakeri wrote: > # HG changeset patch > # User Deepti B. Kalakeri > # Date 1243320943 25200 > # Node ID ecdbd16fefe8d7bdf82f85a74d66a312ab1c52e0 > # Parent 46291354b6916236a239356a475c6e6038c8e1c5 > [TEST] Modifying pool.py to accomodate changes for diskpool. > > Tested with KVM on F10 and with current sources. > Signed-off-by: Deepti B. Kalakeri > > diff -r 46291354b691 -r ecdbd16fefe8 suites/libvirt-cim/lib/XenKvmLib/pool.py This patch fails to apply for me. $ hg qpush applying pool.patch patching file suites/libvirt-cim/lib/XenKvmLib/pool.py Hunk #5 FAILED at 198 1 out of 5 hunks FAILED -- saving rejects to file suites/libvirt-cim/lib/XenKvmLib/pool.py.rej > @@ -183,75 +199,91 @@ Looks like this is the offending hunk. Can you rebase on updated sources? > logger.error("Unexpected rc code %s and description %s\n", > err_no, desc) > return FAIL > + > elif curr_cim_rev >= libvirt_cim_child_pool_rev: > - n_list = net_list(server, virt) > - for _net_name in n_list: > - net_xml = NetXML(server=server, networkname=_net_name, -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Thu May 28 09:53:18 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Thu, 28 May 2009 02:53:18 -0700 Subject: [Libvirt-cim] [PATCH 4 of 4] [TEST][Rebased] Modifying pool.py and vxml.py to accomodate changes for diskpool In-Reply-To: References: Message-ID: <13e68baa2f6de1737a7a.1243504398@elm3a148.beaverton.ibm.com> # HG changeset patch # User Deepti B. Kalakeri # Date 1243503287 25200 # Node ID 13e68baa2f6de1737a7ad17e9d4909782d3fe192 # Parent 3c17b4d15e84469ed3d2307a7123c75d99415dee [TEST][Rebased] Modifying pool.py and vxml.py to accomodate changes for diskpool. Tested with KVM on F10 and with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 3c17b4d15e84 -r 13e68baa2f6d suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Fri May 22 01:41:08 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Thu May 28 02:34:47 2009 -0700 @@ -32,7 +32,7 @@ from XenKvmLib import rpcs_service import pywbem from CimTest.CimExt import CIMClassMOF -from XenKvmLib.vxml import NetXML +from XenKvmLib.vxml import NetXML, PoolXML from XenKvmLib.xm_virt_util import virsh_version cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED @@ -106,8 +106,7 @@ return volume -def get_pool_rasds(server, virt, - pool_type="NetworkPool", filter_default=True): +def get_pool_rasds(server, virt, pool_type="NetworkPool", filter_default=True): net_pool_rasd_rev = 867 disk_pool_rasd_rev = 863 @@ -124,7 +123,7 @@ logger.error("%s template RASDs not supported. %s.", pool_type, detail) return SKIP, None - net_pool_rasds = [] + n_d_pool_rasds = [] ac_cn = get_typed_class(virt, "AllocationCapabilities") an_cn = get_typed_class(virt, "SettingsDefineCapabilities") @@ -140,11 +139,11 @@ if filter_default == True: for item in rasd: if item['InstanceID'] == "Default": - net_pool_rasds.append(item) + n_d_pool_rasds.append(item) else: return PASS, rasd - return PASS, net_pool_rasds + return PASS, n_d_pool_rasds def net_undefine(network, server, virt="Xen"): """Function undefine a given virtual network""" @@ -165,16 +164,32 @@ return PASS -def create_netpool(server, virt, test_pool, pool_attr_list, mode_type=0): - status = PASS +def undefine_diskpool(server, virt, dp_name): + libvirt_version = virsh_version(server, virt) + if libvirt_version >= '0.4.1': + if dp_name == None: + return FAIL + + cmd = "virsh -c %s pool-undefine %s" % (virt2uri(virt), dp_name) + ret, out = run_remote(server, cmd) + if ret != 0: + logger.error("Failed to undefine pool '%s'", dp_name) + return FAIL + + return PASS + +def create_pool(server, virt, test_pool, pool_attr_list, + mode_type=0, pool_type="NetworkPool"): + rpcs = get_typed_class(virt, "ResourcePoolConfigurationService") rpcs_conn = eval("rpcs_service." + rpcs)(server) curr_cim_rev, changeset = get_provider_version(virt, server) if curr_cim_rev < libvirt_cim_child_pool_rev: + try: rpcs_conn.CreateChildResourcePool() except pywbem.CIMError, (err_no, desc): - if err_no == cim_errno : + if err_no == cim_errno: logger.info("Got expected exception for '%s'service", cim_mname) logger.info("Errno is '%s' ", err_no) logger.info("Error string is '%s'", desc) @@ -183,78 +198,92 @@ logger.error("Unexpected rc code %s and description %s\n", err_no, desc) return FAIL + elif curr_cim_rev >= libvirt_cim_child_pool_rev: - n_list = net_list(server, virt) - for _net_name in n_list: - net_xml = NetXML(server=server, networkname=_net_name, - virt=virt, is_new_net=False) - pool_use_attr = net_xml.xml_get_netpool_attr_list() - if pool_attr_list['Address'] in pool_use_attr: - logger.error("IP address is in use by a different network") - return FAIL + + if pool_type == "NetworkPool" : + n_list = net_list(server, virt) + for _net_name in n_list: + net_xml = NetXML(server=server, networkname=_net_name, + virt=virt, is_new_net=False) + pool_use_attr = net_xml.xml_get_netpool_attr_list() + if pool_attr_list['Address'] in pool_use_attr: + logger.error("IP address is in use by a different network") + return FAIL - status, net_pool_rasds = get_pool_rasds(server, virt) + status, n_d_pool_rasds = get_pool_rasds(server, virt, pool_type) if status != PASS: return status - if len(net_pool_rasds) == 0: - logger.error("We can not get NetPoolRASDs") + if len(n_d_pool_rasds) == 0: + logger.error("Failed to get '%sRASD'", pool_type) return FAIL else: - for i in range(0, len(net_pool_rasds)): - if net_pool_rasds[i]['ForwardMode'] == mode_type: - net_pool_rasds[i]['PoolID'] = "NetworkPool/%s" % test_pool - for attr, val in pool_attr_list.iteritems(): - net_pool_rasds[i][attr] = val - break - - pool_settings = inst_to_mof(net_pool_rasds[i]) + for i in range(0, len(n_d_pool_rasds)): + pool_id = "%s/%s" %(pool_type, test_pool) + n_d_pool_rasds[i]['PoolID'] = pool_id + if pool_type == "NetworkPool": + key = 'ForwardMode' + elif pool_type == "DiskPool": + key = 'Type' + + if n_d_pool_rasds[i][key] == mode_type: + for attr, val in pool_attr_list.iteritems(): + n_d_pool_rasds[i][attr] = val + break + + pool_settings = inst_to_mof(n_d_pool_rasds[i]) try: rpcs_conn.CreateChildResourcePool(ElementName=test_pool, Settings=[pool_settings]) except Exception, details: - logger.error("Error in childpool creation") - logger.error(details) + logger.error("Exception in create_pool()") + logger.error("Exception details: %s", details) return FAIL - return status + return PASS - -def verify_pool(server, virt, pooltype, poolname, pool_attr_list, mode_type=0): +def verify_pool(server, virt, poolname, pool_attr_list, mode_type=0, + pool_type="NetworkPool"): status = FAIL - pool_list = EnumInstances(server, pooltype) + pool_cn = get_typed_class(virt, pool_type) + pool_list = EnumInstances(server, pool_cn) if len(pool_list) < 1: - logger.error("Return %i instances, expected at least one instance", - len(pool_list)) + logger.error("Got %i instances, expected at least one instance", + len(pool_list)) return FAIL - poolid = "NetworkPool/%s" % poolname + poolid = "%s/%s" % (pool_type, poolname) for i in range(0, len(pool_list)): ret_pool = pool_list[i].InstanceID if ret_pool != poolid: continue - net_xml = NetXML(server, virt=virt, networkname=poolname, - is_new_net=False) + if pool_type == "NetworkPool": + net_xml = NetXML(server, virt=virt, networkname=poolname, + is_new_net=False) - ret_mode = net_xml.xml_get_netpool_mode() - libvirt_version = virsh_version(server, virt) + ret_mode = net_xml.xml_get_netpool_mode() + libvirt_version = virsh_version(server, virt) + #Forward mode support was added in 0.4.2 + if libvirt_version >= '0.4.2': + if mode_type == 1 and ret_mode != "nat": + logger.error("Error when verifying 'nat' type network") + return FAIL + elif mode_type == 2 and ret_mode != "route": + logger.error("Error when verifying 'route' type network") + return FAIL + ret_pool_attr_list = net_xml.xml_get_netpool_attr_list() - #Forward mode support was added in 0.4.2 - if libvirt_version >= '0.4.2': - if mode_type == 1 and ret_mode != "nat": - logger.error("Got error when verify nat type") - return FAIL - elif mode_type == 2 and ret_mode != "route": - logger.error("Got error when verify route type") - return FAIL - - ret_pool_attr_list = net_xml.xml_get_netpool_attr_list() + elif pool_type == "DiskPool": + disk_xml = PoolXML(server ,virt=virt, poolname=poolname, + is_new_pool=False) + ret_pool_attr_list = disk_xml.xml_get_pool_attr_list() for i in range(0, len(ret_pool_attr_list)): if ret_pool_attr_list[i] not in pool_attr_list.itervalues(): - logger.error("Got error when parsing %s", ret_pool_attr_list[i]) + logger.error("Failed to verify '%s'", ret_pool_attr_list[i]) return FAIL status = PASS diff -r 3c17b4d15e84 -r 13e68baa2f6d suites/libvirt-cim/lib/XenKvmLib/vxml.py --- a/suites/libvirt-cim/lib/XenKvmLib/vxml.py Fri May 22 01:41:08 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/vxml.py Thu May 28 02:34:47 2009 -0700 @@ -293,7 +293,7 @@ class PoolXML(Virsh, XMLClass): def __init__(self, server, poolname=const.default_pool_name, - virt='xen'): + virt='xen', is_new_pool=True): XMLClass.__init__(self) if virt == 'XenFV': @@ -302,6 +302,17 @@ self.pool_name = poolname self.server = server + if is_new_pool is False: + cmd = "virsh pool-dumpxml %s" % self.pool_name + s, disk_xml = utils.run_remote(server, cmd) + if s != 0: + logger.error("Encounter error dump netxml") + return None + else: + self.xml_string = disk_xml + self.xdoc = minidom.parseString(self.xml_string) + return + pool = self.add_sub_node(self.xdoc, 'pool', type='dir') self.add_sub_node(pool, 'name', self.pool_name) target = self.add_sub_node(pool, 'target') @@ -313,10 +324,19 @@ def destroy_vpool(self): return self.run(self.server, 'pool-destroy', self.pool_name) + def undefine_vpool(self): + return self.run(self.server, 'pool-undefine', self.pool_name) + def xml_get_diskpool_name(self): dpoolname = self.get_value_xpath('/pool/name') return dpoolname + def xml_get_pool_attr_list(self): + pool_attr_list = [] + poolpath = self.get_value_xpath('/pool/target/path') + pool_attr_list.append(poolpath) + + return pool_attr_list class VirtXML(Virsh, XMLClass): """Base class for all XML generation & operation""" From deeptik at linux.vnet.ibm.com Thu May 28 09:53:14 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Thu, 28 May 2009 02:53:14 -0700 Subject: [Libvirt-cim] [PATCH 0 of 4] Adding tc to verify creation/deletion of DiskPool. Message-ID: The patchset includes new tc to cover the creation and deletion of DiskPool. It also includes the changes to RPCS/04_CreateChildResourcePool.py and RPCS/07_DeleteChildResourcePool.py to align with the create_pool() and verify_pool() changes. This test case should be applied on top of "(#2) Return SKIP if the provider version doesn't support template pool RASDs" Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri From deeptik at linux.vnet.ibm.com Thu May 28 09:53:15 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Thu, 28 May 2009 02:53:15 -0700 Subject: [Libvirt-cim] [PATCH 1 of 4] [TEST] Add new tc to verify dir type diskpool creations using CreateChildResourcePool() In-Reply-To: References: Message-ID: <9b503078f05e7ff90f20.1243504395@elm3a148.beaverton.ibm.com> # HG changeset patch # User Deepti B. Kalakeri # Date 1243504352 25200 # Node ID 9b503078f05e7ff90f203f1b8075cec655118b0d # Parent 8eda9601bbace5fe53e4df54d6528d4fd3edf809 [TEST] Add new tc to verify dir type diskpool creations using CreateChildResourcePool(). This test case should be applied on top of "(#2) Return SKIP if the provider version doesn't support template pool RASDs" Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 8eda9601bbac -r 9b503078f05e suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py Thu May 28 02:52:32 2009 -0700 @@ -0,0 +1,102 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Deepti B. Kalakeri +# +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# This test case should test the CreateChildResourcePool service +# supplied by the RPCS provider. +# Input +# ----- +# IN -- ElementName -- String -- The desired name of the resource pool +# IN -- Settings -- String -- A string representation of a +# CIM_ResourceAllocationSettingData +# instance that represents the allocation +# assigned to this child pool +# IN -- ParentPool -- CIM_ResourcePool REF -- The parent pool from which +# to create this pool +# +# Output +# ------ +# OUT -- Pool -- CIM_ResourcePool REF -- The resulting resource pool +# OUT -- Job -- CIM_ConcreteJob REF -- Returned job if started +# OUT -- Error -- String -- Encoded error instance if the operation +# failed and did not return a job +# +# Exception details before Revision 846 +# ----- +# Error code: CIM_ERR_NOT_SUPPORTED +# +# After revision 846, the service is implemented +# +# -Date: 26.05.2009 + +import sys +from CimTest.Globals import logger +from CimTest.ReturnCodes import FAIL, PASS +from XenKvmLib.const import do_main, platform_sup +from XenKvmLib.classes import get_typed_class +from XenKvmLib.common_util import destroy_diskpool +from XenKvmLib.pool import create_pool, verify_pool, undefine_diskpool + +test_pool = "diskpool" +dp_types = { "DISK_POOL_DIR" : 1 } + + + at do_main(platform_sup) +def main(): + options = main.options + server = options.ip + virt = options.virt + pool_attr = { "Path" : "/tmp" } + + # For now the test case support only the creation of + # dir type disk pool, later change to fs and netfs etc + for key, value in dp_types.iteritems(): + status = create_pool(server, virt, test_pool, pool_attr, + mode_type=value, pool_type= "DiskPool") + if status != PASS: + logger.error("Failed to create '%s' type diskpool '%s'", + key, test_pool) + return FAIL + + status = verify_pool(server, virt, test_pool, pool_attr, + mode_type=value, pool_type="DiskPool") + if status != PASS: + logger.error("Error in diskpool verification") + destroy_diskpool(server, virt, test_pool) + undefine_diskpool(server, virt, test_pool) + return FAIL + + status = destroy_diskpool(server, virt, test_pool) + if status != PASS: + logger.error("Unable to destroy diskpool '%s'", test_pool) + return FAIL + + status = undefine_diskpool(server, virt, test_pool) + if status != PASS: + logger.error("Unable to undefine diskpool '%s'", test_pool) + return FAIL + + status = PASS + + return status + +if __name__ == "__main__": + sys.exit(main()) From deeptik at linux.vnet.ibm.com Thu May 28 09:53:17 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Thu, 28 May 2009 02:53:17 -0700 Subject: [Libvirt-cim] [PATCH 3 of 4] [TEST] Adding new tc to verify DiskPool Deletion using DeleteResourcePool In-Reply-To: References: Message-ID: # HG changeset patch # User Deepti B. Kalakeri # Date 1243503501 25200 # Node ID ff0fb8fca1512987cb6a8210e7a3e95396dfa0a9 # Parent 13e68baa2f6de1737a7ad17e9d4909782d3fe192 [TEST] Adding new tc to verify DiskPool Deletion using DeleteResourcePool. Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 13e68baa2f6d -r ff0fb8fca151 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py Thu May 28 02:38:21 2009 -0700 @@ -0,0 +1,133 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Guolian Yun +# +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# This test case should test the DeleteResourcePool service +# supplied by the RPCS provider. +# The DeleteResourcePool is used to delete a resource pool. +# DeleteResourcePool() details: +# Input +# ----- +# IN -- Pool -- CIM_ResourcePool REF -- The resource pool to delete +# +# Output +# ------ +# OUT -- Job -- CIM_ConcreteJob REF -- Returned job if started +# OUT -- Error-- String -- Encoded error instance if the operation +# failed and did not return a job. +# +# Exception details before Revision 841 +# ----- +# Error code: CIM_ERR_NOT_SUPPORTED +# +# After revision 841, the service is implemented +# The test case verifies DeleteResourcePool is able to delete the +# dir type diskpool. +# -Date: 26.05.2009 + +import sys +import pywbem +from XenKvmLib import rpcs_service +from CimTest.Globals import logger +from CimTest.ReturnCodes import FAIL, PASS +from XenKvmLib.const import do_main, platform_sup, get_provider_version +from XenKvmLib.enumclass import EnumInstances, EnumNames +from XenKvmLib.classes import get_typed_class +from XenKvmLib.pool import create_pool, verify_pool, undefine_diskpool +from XenKvmLib.common_util import destroy_diskpool + +cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED +cim_mname = "DeleteResourcePool" +libvirt_cim_child_pool_rev = 841 +test_pool = "dp_pool" +TYPE = 1 # Dir type diskpool + + at do_main(platform_sup) +def main(): + status = FAIL + options = main.options + server = options.ip + virt = options.virt + cn = get_typed_class(virt, "ResourcePoolConfigurationService") + rpcs_conn = eval("rpcs_service." + cn)(server) + curr_cim_rev, changeset = get_provider_version(virt, server) + if curr_cim_rev < libvirt_cim_child_pool_rev: + + try: + rpcs_conn.DeleteResourcePool() + except pywbem.CIMError, (err_no, desc): + if err_no == cim_errno : + logger.info("Got expected exception for '%s' service", cim_mname) + logger.info("Errno is '%s' ", err_no) + logger.info("Error string is '%s'", desc) + return PASS + else: + logger.error("Unexpected rc code %s and description %s\n", + err_no, desc) + return status + + elif curr_cim_rev >= libvirt_cim_child_pool_rev: + + try: + pool_attr = { "Path" : "/tmp" } + status = create_pool(server, virt, test_pool, pool_attr, + pool_type="DiskPool", mode_type=TYPE) + if status != PASS: + logger.error("Failed to create diskpool '%s'", test_pool) + return FAIL + + status = verify_pool(server, virt, test_pool, + pool_attr, pool_type="DiskPool") + if status != PASS: + raise Exception("Failed to verify diskpool '%s'" % test_pool) + + dp = get_typed_class(virt, 'DiskPool') + dp_id = "DiskPool/%s" % test_pool + pool_settings = None + pool = EnumNames(server, dp) + for i in range(0, len(pool)): + ret_pool = pool[i].keybindings['InstanceID'] + if ret_pool == dp_id: + pool_settings = pool[i] + break + + if pool_settings == None: + logger.error("Failed to get poolsettings for '%s'", test_pool) + return FAIL + + rpcs_conn.DeleteResourcePool(Pool = pool_settings) + pool = EnumInstances(server, dp) + for i in range(0, len(pool)): + ret_pool = pool[i].InstanceID + if ret_pool == dp_id: + raise Exception("Failed to delete diskpool '%s'" %test_pool) + + status = PASS + except Exception, details: + logger.error("Exception details: %s", details) + destroy_diskpool(server, virt, test_pool) + undefine_diskpool(server, virt, test_pool) + return FAIL + + return status + +if __name__ == "__main__": + sys.exit(main()) From deeptik at linux.vnet.ibm.com Thu May 28 09:53:16 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Thu, 28 May 2009 02:53:16 -0700 Subject: [Libvirt-cim] [PATCH 2 of 4] [TEST] Modifying RPCS/04_CreateChildResourcePool.py and RPCS/07_DeleteChildResourcePool.py In-Reply-To: References: Message-ID: <8eda9601bbace5fe53e4.1243504396@elm3a148.beaverton.ibm.com> # HG changeset patch # User Deepti B. Kalakeri # Date 1243504212 25200 # Node ID 8eda9601bbace5fe53e4df54d6528d4fd3edf809 # Parent ff0fb8fca1512987cb6a8210e7a3e95396dfa0a9 [TEST] Modifying RPCS/04_CreateChildResourcePool.py and RPCS/07_DeleteChildResourcePool.py Modifying RPCS/04_CreateChildResourcePool.py and RPCS/07_DeleteChildResourcePool.py to align with the changes to create_pool() and verify_pool() changes of pool.py. diff -r ff0fb8fca151 -r 8eda9601bbac suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Thu May 28 02:38:21 2009 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Thu May 28 02:50:12 2009 -0700 @@ -54,7 +54,7 @@ from XenKvmLib.const import do_main, platform_sup from XenKvmLib.classes import get_typed_class from XenKvmLib.common_util import destroy_netpool -from XenKvmLib.pool import create_netpool, verify_pool, undefine_netpool +from XenKvmLib.pool import create_pool, verify_pool, undefine_netpool test_pool = "testpool" @@ -76,15 +76,17 @@ "IPRangeStart" : range_addr_start, "IPRangeEnd" : range_addr_end } - for item in range(0, 3): - status = create_netpool(options.ip, options.virt, - test_pool, pool_attr, mode_type=item) + net_type = ["isolated", "nat", "route"] + for item in range(0, len(net_type)): + logger.info("Creating '%s' type network", net_type[item]) + status = create_pool(options.ip, options.virt, + test_pool, pool_attr, mode_type=item) if status != PASS: logger.error("Error in networkpool creation") return status - status = verify_pool(options.ip, options.virt, np, - test_pool, pool_attr, mode_type=item) + status = verify_pool(options.ip, options.virt, + test_pool, pool_attr, mode_type=item) if status != PASS: logger.error("Error in networkpool verification") destroy_netpool(options.ip, options.virt, test_pool) diff -r ff0fb8fca151 -r 8eda9601bbac suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py Thu May 28 02:38:21 2009 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py Thu May 28 02:50:12 2009 -0700 @@ -52,7 +52,7 @@ from XenKvmLib.enumclass import EnumInstances, EnumNames from XenKvmLib.classes import get_typed_class from XenKvmLib.common_util import destroy_netpool -from XenKvmLib.pool import create_netpool, verify_pool +from XenKvmLib.pool import create_pool, verify_pool cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED cim_mname = "DeleteResourcePool" @@ -71,7 +71,8 @@ rpcs_conn.DeleteResourcePool() except pywbem.CIMError, (err_no, desc): if err_no == cim_errno : - logger.info("Got expected exception for '%s' service", cim_mname) + logger.info("Got expected exception for '%s' service", + cim_mname) logger.info("Errno is '%s' ", err_no) logger.info("Error string is '%s'", desc) return PASS @@ -87,21 +88,22 @@ "IPRangeEnd" : "192.168.0.15", "ForwardMode" : "nat" } - np = get_typed_class(options.virt, 'NetworkPool') - np_id = "NetworkPool/%s" % test_pool - status = create_netpool(options.ip, options.virt, test_pool, pool_attr) + status = create_pool(options.ip, options.virt, test_pool, pool_attr) if status != PASS: logger.error("Error in networkpool creation") return status - status = verify_pool(options.ip, options.virt, np, - test_pool, pool_attr) + status = verify_pool(options.ip, options.virt, + test_pool, pool_attr) + if status != PASS: logger.error("Error in networkpool verification") destroy_netpool(options.ip, options.virt, test_pool) return status + np = get_typed_class(options.virt, 'NetworkPool') + np_id = "NetworkPool/%s" % test_pool netpool = EnumNames(options.ip, np) for i in range(0, len(netpool)): ret_pool = netpool[i].keybindings['InstanceID'] From deeptik at linux.vnet.ibm.com Thu May 28 12:15:14 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Thu, 28 May 2009 17:45:14 +0530 Subject: [Libvirt-cim] [PATCH 1 of 4] [TEST] Modifying pool.py to accomodate changes for diskpool In-Reply-To: <4A1D9A73.6040701@linux.vnet.ibm.com> References: <4A1D9A73.6040701@linux.vnet.ibm.com> Message-ID: <4A1E8052.4090200@linux.vnet.ibm.com> Kaitlin Rupert wrote: > Deepti B. Kalakeri wrote: >> # HG changeset patch >> # User Deepti B. Kalakeri >> # Date 1243320943 25200 >> # Node ID ecdbd16fefe8d7bdf82f85a74d66a312ab1c52e0 >> # Parent 46291354b6916236a239356a475c6e6038c8e1c5 >> [TEST] Modifying pool.py to accomodate changes for diskpool. >> >> Tested with KVM on F10 and with current sources. >> Signed-off-by: Deepti B. Kalakeri >> >> diff -r 46291354b691 -r ecdbd16fefe8 >> suites/libvirt-cim/lib/XenKvmLib/pool.py > > This patch fails to apply for me. > > $ hg qpush > applying pool.patch > patching file suites/libvirt-cim/lib/XenKvmLib/pool.py > Hunk #5 FAILED at 198 > 1 out of 5 hunks FAILED -- saving rejects to file > suites/libvirt-cim/lib/XenKvmLib/pool.py.rej I have rebased the changes and sent the same. > > >> @@ -183,75 +199,91 @@ > > Looks like this is the offending hunk. Can you rebase on updated sources? > >> logger.error("Unexpected rc code %s and description %s\n", >> err_no, desc) >> return FAIL >> + >> elif curr_cim_rev >= libvirt_cim_child_pool_rev: - n_list = >> net_list(server, virt) >> - for _net_name in n_list: >> - net_xml = NetXML(server=server, networkname=_net_name, > > -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Thu May 28 18:16:32 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 28 May 2009 11:16:32 -0700 Subject: [Libvirt-cim] [PATCH 2 of 4] [TEST] Modifying RPCS/04_CreateChildResourcePool.py and RPCS/07_DeleteChildResourcePool.py In-Reply-To: <8eda9601bbace5fe53e4.1243504396@elm3a148.beaverton.ibm.com> References: <8eda9601bbace5fe53e4.1243504396@elm3a148.beaverton.ibm.com> Message-ID: <4A1ED500.40400@linux.vnet.ibm.com> Deepti B. Kalakeri wrote: > # HG changeset patch > # User Deepti B. Kalakeri > # Date 1243504212 25200 > # Node ID 8eda9601bbace5fe53e4df54d6528d4fd3edf809 > # Parent ff0fb8fca1512987cb6a8210e7a3e95396dfa0a9 > [TEST] Modifying RPCS/04_CreateChildResourcePool.py and RPCS/07_DeleteChildResourcePool.py > > Modifying RPCS/04_CreateChildResourcePool.py and RPCS/07_DeleteChildResourcePool.py to align with > the changes to create_pool() and verify_pool() changes of pool.py. > > diff -r ff0fb8fca151 -r 8eda9601bbac suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py This patch is missing a DCO. -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Thu May 28 18:21:34 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 28 May 2009 11:21:34 -0700 Subject: [Libvirt-cim] [PATCH 3 of 4] [TEST] Adding new tc to verify DiskPool Deletion using DeleteResourcePool In-Reply-To: References: Message-ID: <4A1ED62E.2080100@linux.vnet.ibm.com> Deepti B. Kalakeri wrote: > # HG changeset patch > # User Deepti B. Kalakeri > # Date 1243503501 25200 > # Node ID ff0fb8fca1512987cb6a8210e7a3e95396dfa0a9 > # Parent 13e68baa2f6de1737a7ad17e9d4909782d3fe192 > [TEST] Adding new tc to verify DiskPool Deletion using DeleteResourcePool. > > Tested with KVM on F10 with current sources. > Signed-off-by: Deepti B. Kalakeri > > diff -r 13e68baa2f6d -r ff0fb8fca151 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py Thu May 28 02:38:21 2009 -0700 No complaints on this patch.. just a question. =) Do you plan on updating this test case to support deletion of other disk pool types? All the pool types should be identical. So if you plan on using different test cases, it would be a good idea to put parts of this test case into a function (or functions) at some point later on. -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Thu May 28 23:47:03 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 28 May 2009 16:47:03 -0700 Subject: [Libvirt-cim] Test Run Summary (May 28 2009): KVM on Fedora release 10.90 (Rawhide) with Pegasus Message-ID: <4A1F2277.6060208@linux.vnet.ibm.com> ================================================= Test Run Summary (May 28 2009): KVM on Fedora release 10.90 (Rawhide) with Pegasus ================================================= Distro: Fedora release 10.90 (Rawhide) Kernel: 2.6.29-0.24.rc0.git13.fc11.x86_64 libvirt: 0.6.3 Hypervisor: QEMU 0.10.1 CIMOM: Pegasus 2.7.2 Libvirt-cim revision: 886 Libvirt-cim changeset: c127bb551eb8 Cimtest revision: 693 Cimtest changeset: 6d369846b8ec ================================================= FAIL : 7 XFAIL : 3 SKIP : 9 PASS : 137 ----------------- Total : 156 ================================================= FAIL Test Summary: ComputerSystemIndication - 01_created_indication.py: FAIL HostSystem - 03_hs_to_settdefcap.py: FAIL RASD - 05_disk_rasd_emu_type.py: FAIL RASD - 07_parent_disk_pool.py: FAIL RedirectionService - 01_enum_crs.py: FAIL SettingsDefineCapabilities - 01_forward.py: FAIL SystemDevice - 01_forward.py: FAIL ================================================= XFAIL Test Summary: ComputerSystem - 33_suspend_reboot.py: XFAIL VirtualSystemManagementService - 09_procrasd_persist.py: XFAIL VirtualSystemManagementService - 16_removeresource.py: XFAIL ================================================= SKIP Test Summary: ComputerSystem - 02_nosystems.py: SKIP LogicalDisk - 02_nodevs.py: SKIP VSSD - 02_bootldr.py: SKIP VirtualSystemMigrationService - 01_migratable_host.py: SKIP VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP ================================================= Full report: -------------------------------------------------------------------- AllocationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- AllocationCapabilities - 02_alloccap_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 01_enum.py: PASS -------------------------------------------------------------------- ComputerSystem - 02_nosystems.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- ComputerSystem - 03_defineVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 04_defineStartVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 05_activate_defined_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 06_paused_active_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 22_define_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 23_pause_pause.py: PASS -------------------------------------------------------------------- ComputerSystem - 27_define_pause_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 32_start_reboot.py: PASS -------------------------------------------------------------------- ComputerSystem - 33_suspend_reboot.py: XFAIL ERROR - Got CIM error CIM_ERR_NOT_SUPPORTED: State not supported with return code 7 ERROR - Exception: Unable Suspend dom 'test_domain' InvokeMethod(RequestStateChange): CIM_ERR_NOT_SUPPORTED: State not supported Bug:<00012> -------------------------------------------------------------------- ComputerSystem - 35_start_reset.py: PASS -------------------------------------------------------------------- ComputerSystem - 40_RSC_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 41_cs_to_settingdefinestate.py: PASS -------------------------------------------------------------------- ComputerSystem - 42_cs_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystemIndication - 01_created_indication.py: FAIL ERROR - Waited too long for define indication ERROR - Waited too long for start indication ERROR - Waited too long for destroy indication -------------------------------------------------------------------- ElementAllocatedFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 03_reverse_errs.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 04_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 01_forward.py: PASS -------------------------------------------------------------------- ElementCapabilities - 02_reverse.py: PASS -------------------------------------------------------------------- ElementCapabilities - 03_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 05_hostsystem_cap.py: PASS -------------------------------------------------------------------- ElementConforms - 01_forward.py: PASS -------------------------------------------------------------------- ElementConforms - 02_reverse.py: PASS -------------------------------------------------------------------- ElementConforms - 03_ectp_fwd_errs.py: PASS -------------------------------------------------------------------- ElementConforms - 04_ectp_rev_errs.py: PASS -------------------------------------------------------------------- ElementSettingData - 01_forward.py: PASS -------------------------------------------------------------------- ElementSettingData - 03_esd_assoc_with_rasd_errs.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 02_elecap_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 01_enum.py: PASS -------------------------------------------------------------------- HostSystem - 02_hostsystem_to_rasd.py: PASS -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - 'KVM_SettingsDefineCapabilities' returned 8 RASD objects instead of 4 CIM_ERR_INVALID_CLASS: Linux_ComputerSystem -------------------------------------------------------------------- HostSystem - 04_hs_to_EAPF.py: PASS -------------------------------------------------------------------- HostSystem - 05_hs_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 06_hs_to_vsms.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 01_forward.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 01_forward.py: PASS -------------------------------------------------------------------- HostedDependency - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 03_enabledstate.py: PASS -------------------------------------------------------------------- HostedDependency - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedResourcePool - 01_forward.py: PASS -------------------------------------------------------------------- HostedResourcePool - 02_reverse.py: PASS -------------------------------------------------------------------- HostedResourcePool - 03_forward_errs.py: PASS -------------------------------------------------------------------- HostedResourcePool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedService - 01_forward.py: PASS -------------------------------------------------------------------- HostedService - 02_reverse.py: PASS -------------------------------------------------------------------- HostedService - 03_forward_errs.py: PASS -------------------------------------------------------------------- HostedService - 04_reverse_errs.py: PASS -------------------------------------------------------------------- KVMRedirectionSAP - 01_enum_KVMredSAP.py: PASS -------------------------------------------------------------------- LogicalDisk - 01_disk.py: PASS -------------------------------------------------------------------- LogicalDisk - 02_nodevs.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- LogicalDisk - 03_ld_gi_errs.py: PASS -------------------------------------------------------------------- Memory - 01_memory.py: PASS -------------------------------------------------------------------- Memory - 02_defgetmem.py: PASS -------------------------------------------------------------------- Memory - 03_mem_gi_errs.py: PASS -------------------------------------------------------------------- NetworkPort - 01_netport.py: PASS -------------------------------------------------------------------- NetworkPort - 02_np_gi_errors.py: PASS -------------------------------------------------------------------- NetworkPort - 03_user_netport.py: PASS -------------------------------------------------------------------- Processor - 01_processor.py: PASS -------------------------------------------------------------------- Processor - 02_definesys_get_procs.py: PASS -------------------------------------------------------------------- Processor - 03_proc_gi_errs.py: PASS -------------------------------------------------------------------- Profile - 01_enum.py: PASS -------------------------------------------------------------------- Profile - 02_profile_to_elec.py: PASS -------------------------------------------------------------------- Profile - 03_rprofile_gi_errs.py: PASS -------------------------------------------------------------------- RASD - 01_verify_rasd_fields.py: PASS -------------------------------------------------------------------- RASD - 02_enum.py: PASS -------------------------------------------------------------------- RASD - 03_rasd_errs.py: PASS -------------------------------------------------------------------- RASD - 04_disk_rasd_size.py: PASS -------------------------------------------------------------------- RASD - 05_disk_rasd_emu_type.py: FAIL ERROR - Exception: EmulatedType Mismatch: got 0,expected 1 -------------------------------------------------------------------- RASD - 06_parent_net_pool.py: PASS -------------------------------------------------------------------- RASD - 07_parent_disk_pool.py: FAIL 07_parent_disk_pool.py:47: DeprecationWarning: the sets module is deprecated from sets import Set ERROR - Exception details: Got 5 recs instead of 3 -------------------------------------------------------------------- RedirectionService - 01_enum_crs.py: FAIL 01_enum_crs.py:29: DeprecationWarning: the sets module is deprecated from sets import Set ERROR - TypeError : __call__() takes exactly 1 argument (2 given) Traceback (most recent call last): File "./lib/XenKvmLib/const.py", line 139, in do_try File "01_enum_crs.py", line 108, in main if res_val != exp_val: TypeError: __call__() takes exactly 1 argument (2 given) ERROR - None CIM_ERR_INVALID_CLASS: Linux_ComputerSystem -------------------------------------------------------------------- RedirectionService - 02_enum_crscap.py: PASS -------------------------------------------------------------------- RedirectionService - 03_RedirectionSAP_errs.py: PASS -------------------------------------------------------------------- ReferencedProfile - 01_verify_refprof.py: PASS -------------------------------------------------------------------- ReferencedProfile - 02_refprofile_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 03_forward_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 05_RAPF_err.py: PASS -------------------------------------------------------------------- ResourcePool - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePool - 02_rp_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 02_rpcc_gi_errs.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 02_rcps_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 03_CreateResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 05_AddResourcesToResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 06_RemoveResourcesFromResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 07_DeleteResourcePool.py: PASS -------------------------------------------------------------------- ServiceAccessBySAP - 01_forward.py: PASS -------------------------------------------------------------------- ServiceAccessBySAP - 02_reverse.py: PASS -------------------------------------------------------------------- ServiceAffectsElement - 01_forward.py: PASS -------------------------------------------------------------------- ServiceAffectsElement - 02_reverse.py: PASS -------------------------------------------------------------------- SettingsDefine - 01_forward.py: PASS -------------------------------------------------------------------- SettingsDefine - 02_reverse.py: PASS -------------------------------------------------------------------- SettingsDefine - 03_sds_fwd_errs.py: PASS -------------------------------------------------------------------- SettingsDefine - 04_sds_rev_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 01_forward.py: FAIL ERROR - KVM_SettingsDefineCapabilities returned 8 ResourcePool objects instead of 4 -------------------------------------------------------------------- SettingsDefineCapabilities - 03_forward_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 04_forward_vsmsdata.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 05_reverse_vsmcap.py: PASS -------------------------------------------------------------------- SystemDevice - 01_forward.py: FAIL 01_forward.py:29: DeprecationWarning: the sets module is deprecated from sets import Set ERROR - DeviceID mismatch ERROR - Exception Expected DeviceID: ['test_domain/0', 'test_domain/1', 'test_domain/2'] Got: [u'test_domain/0'] -------------------------------------------------------------------- SystemDevice - 02_reverse.py: PASS -------------------------------------------------------------------- SystemDevice - 03_fwderrs.py: PASS -------------------------------------------------------------------- VSSD - 01_enum.py: PASS -------------------------------------------------------------------- VSSD - 02_bootldr.py: SKIP -------------------------------------------------------------------- VSSD - 03_vssd_gi_errs.py: PASS -------------------------------------------------------------------- VSSD - 04_vssd_to_rasd.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 02_vsmcap_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 01_definesystem_name.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 02_destroysystem.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 03_definesystem_ess.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 04_definesystem_ers.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 05_destroysystem_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 06_addresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 07_addresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 08_modifyresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 09_procrasd_persist.py: XFAIL -------------------------------------------------------------------- VirtualSystemManagementService - 10_hv_version.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 11_define_memrasdunits.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 12_referenced_config.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 13_refconfig_additional_devs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 14_define_sys_disk.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 15_mod_system_settings.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 16_removeresource.py: XFAIL ERROR - 0 RASD insts for domain/mouse:ps2 CIM_ERR_NOT_FOUND: No such instance (no device domain/mouse:ps2) Bug:<00014> -------------------------------------------------------------------- VirtualSystemManagementService - 17_removeresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 02_vsmc_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationService - 01_migratable_host.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 02_vsmsd_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 01_forward.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 02_reverse.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 03_vssdc_fwd_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 04_vssdc_rev_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 02_vs_sservice_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 03_create_snapshot.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 02_vs_sservicecap_gi_errs.py: PASS -------------------------------------------------------------------- -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Thu May 28 23:52:57 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 28 May 2009 16:52:57 -0700 Subject: [Libvirt-cim] [PATCH] A few schema cleanups after migrating from 2.16 to 2.21 In-Reply-To: <4A1D72AE.6070300@linux.vnet.ibm.com> References: <4A1D72AE.6070300@linux.vnet.ibm.com> Message-ID: <4A1F23D9.7000500@linux.vnet.ibm.com> Richard Maciel wrote: > Kaitlin Rupert wrote: >> # HG changeset patch >> # User Kaitlin Rupert >> # Date 1242841306 25200 >> # Node ID aa8e071730d2ce20064f1c0295a8005e31ef2cea >> # Parent 10e45fca47f0d19eddcf7bb1559ba9e7397aea24 >> A few schema cleanups after migrating from 2.16 to 2.21 >> >> Fixes: >> -CIM_HostedResourcePool is needed in cimv2 to register >> Virt_HostedResourcePool >> as a cross-namespace provider >> >> -Implementation specific VirtualSystemSnapshotService should be a >> subclass of >> CIM_VirtualSystemSnapshotService, not >> Virt_VirtualSystemSnapshotService >> >> -CIM_VirtualSystemMigrationSettingData.mof and >> CIM_VirtualSystemMigrationService.mof are needed in interop to >> properly >> register ECTP asa cross-namespace provider > > > CIM_VirtualSystemMigrationSettingData is not accessible in the interop > namespace. > > To test I did a query for the aforementioned provider in the > root/interop space: > > wbemcli ein > 'http://localhost:5988/root/interop:CIM_VirtualSystemMigrationSettingData' Can you redirect the output of base_schema/install_base_schema.sh so that the output goes to stdout? cimmof should print an error if it is unable to add a class to repository. If cimmofl fails on a given class, can you call cimmofl by hand using the -trace option? So if the following fails: cimmofl -uc -aEV -R/var/lib/Pegasus -n root/virt cim_schema_2.21.0.mof can you try: cimmofl -uc -aEV -trace -R/var/lib/Pegasus -n root/virt cim_schema_2.21.0.mof -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Fri May 29 00:34:05 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Thu, 28 May 2009 17:34:05 -0700 Subject: [Libvirt-cim] Test Run Summary (May 28 2009): KVM on Fedora release 10.90 (Rawhide) with Pegasus In-Reply-To: <4A1F2277.6060208@linux.vnet.ibm.com> References: <4A1F2277.6060208@linux.vnet.ibm.com> Message-ID: <4A1F2D7D.2000904@linux.vnet.ibm.com> I'll be investigating the rest of these tomorrow. > -------------------------------------------------------------------- > ComputerSystemIndication - 01_created_indication.py: FAIL > ERROR - Waited too long for define indication > ERROR - Waited too long for start indication > ERROR - Waited too long for destroy indication This failed due to some stale subscriptions. Which means the test doesn't always clean up properly. > -------------------------------------------------------------------- > HostSystem - 03_hs_to_settdefcap.py: FAIL > ERROR - 'KVM_SettingsDefineCapabilities' returned 8 RASD objects > instead of 4 > CIM_ERR_INVALID_CLASS: Linux_ComputerSystem > -------------------------------------------------------------------- This is due to the recent DiskPoolRASD / NetPoolRASDs changes. I'm working on a fix for this. > -------------------------------------------------------------------- > RASD - 05_disk_rasd_emu_type.py: FAIL > ERROR - Exception: EmulatedType Mismatch: got 0,expected 1 > -------------------------------------------------------------------- > -------------------------------------------------------------------- > RASD - 07_parent_disk_pool.py: FAIL > 07_parent_disk_pool.py:47: DeprecationWarning: the sets module is > deprecated > from sets import Set > ERROR - Exception details: Got 5 recs instead of 3 This is due to a setup issue on the system. > -------------------------------------------------------------------- > RedirectionService - 01_enum_crs.py: FAIL > 01_enum_crs.py:29: DeprecationWarning: the sets module is deprecated > from sets import Set > ERROR - TypeError : __call__() takes exactly 1 argument (2 given) > Traceback (most recent call last): > File "./lib/XenKvmLib/const.py", line 139, in do_try > File "01_enum_crs.py", line 108, in main > if res_val != exp_val: > TypeError: __call__() takes exactly 1 argument (2 given) > ERROR - None > CIM_ERR_INVALID_CLASS: Linux_ComputerSystem > -------------------------------------------------------------------- > -------------------------------------------------------------------- > SettingsDefineCapabilities - 01_forward.py: FAIL > ERROR - KVM_SettingsDefineCapabilities returned 8 ResourcePool > objects instead of 4 > -------------------------------------------------------------------- > -------------------------------------------------------------------- > SystemDevice - 01_forward.py: FAIL > 01_forward.py:29: DeprecationWarning: the sets module is deprecated > from sets import Set > ERROR - DeviceID mismatch > ERROR - Exception Expected DeviceID: ['test_domain/0', > 'test_domain/1', 'test_domain/2'] > Got: [u'test_domain/0'] > -------------------------------------------------------------------- -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Fri May 29 08:28:15 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Fri, 29 May 2009 13:58:15 +0530 Subject: [Libvirt-cim] [PATCH 1 of 4] [TEST] Move cleanup_guest_netpool() to vsmigrations.py In-Reply-To: References: Message-ID: <4A1F9C9F.7010607@linux.vnet.ibm.com> Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1242946256 25200 > # Node ID f26248c6fb4186ee6a942192f25eab8d4bcc5626 > # Parent 922d6e12a15460adbabb955c1c35e4d7ff86b4e0 > [TEST] Move cleanup_guest_netpool() to vsmigrations.py > > Migration test 06 - 08 duplicate this same code. > > Also, if the migration is a localhost one, set the hostname to localhost. > Otherwise, the providers will return an error saying the guest already exists > on the target (because the providers haven't detected a localhost migration). > > If the target system name is localhost, the migration will always be a local > migration. Be sure to set remote_migration accordingly. > > Signed-off-by: Kaitlin Rupert > > diff -r 922d6e12a154 -r f26248c6fb41 suites/libvirt-cim/lib/XenKvmLib/vsmigrations.py > --- a/suites/libvirt-cim/lib/XenKvmLib/vsmigrations.py Thu May 21 13:31:38 2009 -0700 > +++ b/suites/libvirt-cim/lib/XenKvmLib/vsmigrations.py Thu May 21 15:50:56 2009 -0700 > @@ -30,10 +30,11 @@ > from CimTest.ReturnCodes import PASS, FAIL, SKIP > from XenKvmLib.enumclass import EnumInstances > from XenKvmLib.classes import get_typed_class, virt_types > -from XenKvmLib.xm_virt_util import domain_list > -from XenKvmLib.const import get_provider_version > +from XenKvmLib.xm_virt_util import domain_list, net_list > +from XenKvmLib.const import get_provider_version, default_network_name > from CimTest.Globals import logger, CIM_USER, CIM_PASS, CIM_NS, \ > CIM_ERROR_ENUMERATE > +from XenKvmLib.common_util import destroy_netpool > > # Migration constants > CIM_MIGRATE_OFFLINE=1 > @@ -94,9 +95,14 @@ > def check_mig_support(virt, options): > s_sysname = gethostbyaddr(options.ip)[0] > t_sysname = gethostbyaddr(options.t_url)[0] > - if virt == 'KVM' and (t_sysname == s_sysname or t_sysname in s_sysname): > - logger.info("Libvirt does not support local migration for KVM") > - return SKIP, s_sysname, t_sysname > + > + if t_sysname == s_sysname or t_sysname in s_sysname: > + if virt == 'KVM': > + logger.info("Libvirt does not support local migration for KVM") > + return SKIP, s_sysname, t_sysname > + > + #localhost migration is supported by Xen > + return PASS, s_sysname, "localhost" > > return PASS, s_sysname, t_sysname > > @@ -364,12 +370,15 @@ > logger.error("Guest to be migrated not specified.") > return FAIL > > + if t_sysname == "localhost": > + remote_migrate = 0 > + > try: > if remote_migrate == 1: > - status, req_image, backup_image = remote_copy_guest_image(virt, > - s_sysname, > - t_sysname, > - guest_name) > + status, req_image, bkup_image = remote_copy_guest_image(virt, > + s_sysname, > + t_sysname, > + guest_name) > if status != PASS: > raise Exception("Failure from remote_copy_guest_image()") > > @@ -397,7 +406,10 @@ > logger.info("Migrating '%s'.. this will take some time.", guest_name) > > # Migrate the guest to t_sysname > - status, ret = migrate_guest_to_host(vsmservice, guest_ref, t_sysname, msd) > + status, ret = migrate_guest_to_host(vsmservice, > + guest_ref, > + t_sysname, > + msd) > if status == FAIL: > raise Exception("Failed to Migrate guest '%s' from '%s' to '%s'" \ > % (guest_name, s_sysname, t_sysname)) > @@ -413,5 +425,50 @@ > logger.error("Exception details %s", details) > status = FAIL > > - cleanup_image(backup_image, req_image, t_sysname, remote_migrate=1) > + if remote_migrate == 1: > + cleanup_image(bkup_image, req_image, t_sysname, remote_migrate=1) > + > return status > + > +def cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname): > + # Clean the domain on target machine. > + # This is req when migration is successful, also when migration is not > + # completely successful VM might be created on the target machine > + # and hence need to clean. > + target_list = domain_list(t_sysname, virt) > + if target_list != None and test_dom in target_list: > + ret_value = cxml.destroy(t_sysname) > The ret_value from cxml.destroy is False and hence even though the VM is getting destroyed we are getting the following false log: Failed to destroy the migrated domain 'VM_frm_elm3b217.beaverton.ibm.com' on 'localhost' Thu, 28 May 2009 22:35:49:TEST LOG:INFO - Failed to destroy the migrated domain 'VM_frm_elm3b217.beaverton.ibm.com' on 'localhost' use cxml.cim_destroy()instead. > + if not ret_value: > + logger.info("Failed to destroy the migrated domain '%s' on '%s'", > + test_dom, t_sysname) > + > + ret_value = cxml.undefine(t_sysname) > Same here cxml.undefine() returns False and hence the following statement gets printed. Thu, 28 May 2009 22:52:39:TEST LOG:INFO - Failed to undefine the migrated domain 'VM_frm_elm3b217.beaverton.ibm.com' on 'localhost' > + if not ret_value: > + logger.info("Failed to undefine the migrated domain '%s' on '%s'", > + test_dom, t_sysname) > + > + # Done cleaning environment > + if t_sysname == "localhost": > + return > + > + # Remote Migration not Successful, clean the domain on src machine > + src_list = domain_list(s_sysname, virt) > + if src_list != None and test_dom in src_list: > + ret_value = cxml.cim_destroy(s_sysname) > + if not ret_value: > + logger.info("Failed to destroy the domain '%s' on the source '%s'", > + test_dom, s_sysname) > + > + ret_value = cxml.undefine(s_sysname) > + if not ret_value: > + logger.info("Failed to undefine the domain '%s' on source '%s'", > + test_dom, s_sysname) > + > + # clean the networkpool created on the remote machine > + target_net_list = net_list(t_sysname, virt) > + if target_net_list != None and default_network_name in target_net_list: > + ret_value = destroy_netpool(t_sysname, virt, default_network_name) > + if ret_value != PASS: > + logger.info("Unable to destroy networkpool '%s' on '%s'", > + default_network_name, t_sysname) > + > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim > -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Fri May 29 08:48:42 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Fri, 29 May 2009 14:18:42 +0530 Subject: [Libvirt-cim] [PATCH 4 of 4] [TEST] 08 - Remove cleanup_guest_netpool() def and call it from vsmigration.py In-Reply-To: <6495a6ca36878b10e7bb.1242946803@localhost.localdomain> References: <6495a6ca36878b10e7bb.1242946803@localhost.localdomain> Message-ID: <4A1FA16A.2000202@linux.vnet.ibm.com> Kaitlin Rupert wrote: > # HG changeset patch > # User Kaitlin Rupert > # Date 1242946256 25200 > # Node ID 6495a6ca36878b10e7bb57eae53eb6a943d8b4d1 > # Parent 4e7716bd2775599cdafd858d7875924669c41766 > [TEST] 08 - Remove cleanup_guest_netpool() def and call it from vsmigration.py > > Signed-off-by: Kaitlin Rupert > > diff -r 4e7716bd2775 -r 6495a6ca3687 suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py > --- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py Thu May 21 15:50:56 2009 -0700 > +++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py Thu May 21 15:50:56 2009 -0700 > @@ -35,9 +35,9 @@ > from XenKvmLib.const import do_main, default_network_name > from CimTest.ReturnCodes import PASS, FAIL, SKIP > from XenKvmLib.classes import get_typed_class > -from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate > -from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf,\ > - destroy_netpool > +from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate, \ > + cleanup_guest_netpool > +from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf > > sup_types = ['KVM', 'Xen'] > > @@ -168,13 +168,7 @@ > cleanup_guest(virt, cxml, test_dom, t_sysname, s_sysname) > status = FAIL > > - # clean the networkpool created on the remote machine > - target_net_list = net_list(t_sysname, virt) > - if target_net_list != None and net_pool_name in target_net_list: > - ret_value = destroy_netpool(t_sysname, virt, net_pool_name) > - if ret_value != PASS: > - logger.info("Unable to destroy networkpool '%s' on '%s'", > - net_pool_name, t_sysname) > + cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname) > This tc fails with the following: -------------------------------------------------------------------- VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: FAIL ERROR - JobStatus for dom 'VM_frm_elm3b217.beaverton.ibm.com' has 'Domain failed to shutdown in 120 seconds' instead of 'Completed' -------------------------------------------------------------------- I tried increasing the timeout and run the test but it still failed. > if status_restart != PASS or status_resume != PASS: > status = FAIL > > _______________________________________________ > Libvirt-cim mailing list > Libvirt-cim at redhat.com > https://www.redhat.com/mailman/listinfo/libvirt-cim > -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Fri May 29 09:02:23 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Fri, 29 May 2009 02:02:23 -0700 Subject: [Libvirt-cim] [PATCH 1 of 4] [TEST][Rebased] Modifying pool.py and vxml.py to accomodate changes for diskpool In-Reply-To: References: Message-ID: <72be8ddf94c096cae3e7.1243587743@elm3b151.beaverton.ibm.com> # HG changeset patch # User Deepti B. Kalakeri # Date 1243575989 25200 # Node ID 72be8ddf94c096cae3e795c6a7d4634b915922c1 # Parent 3c17b4d15e84469ed3d2307a7123c75d99415dee [TEST][Rebased] Modifying pool.py and vxml.py to accomodate changes for diskpool. Tested with KVM on F10 and with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 3c17b4d15e84 -r 72be8ddf94c0 suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Fri May 22 01:41:08 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Thu May 28 22:46:29 2009 -0700 @@ -32,7 +32,7 @@ from XenKvmLib import rpcs_service import pywbem from CimTest.CimExt import CIMClassMOF -from XenKvmLib.vxml import NetXML +from XenKvmLib.vxml import NetXML, PoolXML from XenKvmLib.xm_virt_util import virsh_version cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED @@ -106,8 +106,7 @@ return volume -def get_pool_rasds(server, virt, - pool_type="NetworkPool", filter_default=True): +def get_pool_rasds(server, virt, pool_type="NetworkPool", filter_default=True): net_pool_rasd_rev = 867 disk_pool_rasd_rev = 863 @@ -124,7 +123,7 @@ logger.error("%s template RASDs not supported. %s.", pool_type, detail) return SKIP, None - net_pool_rasds = [] + n_d_pool_rasds = [] ac_cn = get_typed_class(virt, "AllocationCapabilities") an_cn = get_typed_class(virt, "SettingsDefineCapabilities") @@ -140,11 +139,11 @@ if filter_default == True: for item in rasd: if item['InstanceID'] == "Default": - net_pool_rasds.append(item) + n_d_pool_rasds.append(item) else: return PASS, rasd - return PASS, net_pool_rasds + return PASS, n_d_pool_rasds def net_undefine(network, server, virt="Xen"): """Function undefine a given virtual network""" @@ -165,16 +164,32 @@ return PASS -def create_netpool(server, virt, test_pool, pool_attr_list, mode_type=0): - status = PASS +def undefine_diskpool(server, virt, dp_name): + libvirt_version = virsh_version(server, virt) + if libvirt_version >= '0.4.1': + if dp_name == None: + return FAIL + + cmd = "virsh -c %s pool-undefine %s" % (virt2uri(virt), dp_name) + ret, out = run_remote(server, cmd) + if ret != 0: + logger.error("Failed to undefine pool '%s'", dp_name) + return FAIL + + return PASS + +def create_pool(server, virt, test_pool, pool_attr_list, + mode_type=0, pool_type="NetworkPool"): + rpcs = get_typed_class(virt, "ResourcePoolConfigurationService") rpcs_conn = eval("rpcs_service." + rpcs)(server) curr_cim_rev, changeset = get_provider_version(virt, server) if curr_cim_rev < libvirt_cim_child_pool_rev: + try: rpcs_conn.CreateChildResourcePool() except pywbem.CIMError, (err_no, desc): - if err_no == cim_errno : + if err_no == cim_errno: logger.info("Got expected exception for '%s'service", cim_mname) logger.info("Errno is '%s' ", err_no) logger.info("Error string is '%s'", desc) @@ -183,78 +198,92 @@ logger.error("Unexpected rc code %s and description %s\n", err_no, desc) return FAIL + elif curr_cim_rev >= libvirt_cim_child_pool_rev: - n_list = net_list(server, virt) - for _net_name in n_list: - net_xml = NetXML(server=server, networkname=_net_name, - virt=virt, is_new_net=False) - pool_use_attr = net_xml.xml_get_netpool_attr_list() - if pool_attr_list['Address'] in pool_use_attr: - logger.error("IP address is in use by a different network") - return FAIL + + if pool_type == "NetworkPool" : + n_list = net_list(server, virt) + for _net_name in n_list: + net_xml = NetXML(server=server, networkname=_net_name, + virt=virt, is_new_net=False) + pool_use_attr = net_xml.xml_get_netpool_attr_list() + if pool_attr_list['Address'] in pool_use_attr: + logger.error("IP address is in use by a different network") + return FAIL - status, net_pool_rasds = get_pool_rasds(server, virt) + status, n_d_pool_rasds = get_pool_rasds(server, virt, pool_type) if status != PASS: return status - if len(net_pool_rasds) == 0: - logger.error("We can not get NetPoolRASDs") + if len(n_d_pool_rasds) == 0: + logger.error("Failed to get '%sRASD'", pool_type) return FAIL else: - for i in range(0, len(net_pool_rasds)): - if net_pool_rasds[i]['ForwardMode'] == mode_type: - net_pool_rasds[i]['PoolID'] = "NetworkPool/%s" % test_pool - for attr, val in pool_attr_list.iteritems(): - net_pool_rasds[i][attr] = val - break - - pool_settings = inst_to_mof(net_pool_rasds[i]) + for i in range(0, len(n_d_pool_rasds)): + pool_id = "%s/%s" %(pool_type, test_pool) + n_d_pool_rasds[i]['PoolID'] = pool_id + if pool_type == "NetworkPool": + key = 'ForwardMode' + elif pool_type == "DiskPool": + key = 'Type' + + if n_d_pool_rasds[i][key] == mode_type: + for attr, val in pool_attr_list.iteritems(): + n_d_pool_rasds[i][attr] = val + break + + pool_settings = inst_to_mof(n_d_pool_rasds[i]) try: rpcs_conn.CreateChildResourcePool(ElementName=test_pool, Settings=[pool_settings]) except Exception, details: - logger.error("Error in childpool creation") - logger.error(details) + logger.error("Exception in create_pool()") + logger.error("Exception details: %s", details) return FAIL - return status + return PASS - -def verify_pool(server, virt, pooltype, poolname, pool_attr_list, mode_type=0): +def verify_pool(server, virt, poolname, pool_attr_list, mode_type=0, + pool_type="NetworkPool"): status = FAIL - pool_list = EnumInstances(server, pooltype) + pool_cn = get_typed_class(virt, pool_type) + pool_list = EnumInstances(server, pool_cn) if len(pool_list) < 1: - logger.error("Return %i instances, expected at least one instance", - len(pool_list)) + logger.error("Got %i instances, expected at least one instance", + len(pool_list)) return FAIL - poolid = "NetworkPool/%s" % poolname + poolid = "%s/%s" % (pool_type, poolname) for i in range(0, len(pool_list)): ret_pool = pool_list[i].InstanceID if ret_pool != poolid: continue - net_xml = NetXML(server, virt=virt, networkname=poolname, - is_new_net=False) + if pool_type == "NetworkPool": + net_xml = NetXML(server, virt=virt, networkname=poolname, + is_new_net=False) - ret_mode = net_xml.xml_get_netpool_mode() - libvirt_version = virsh_version(server, virt) + ret_mode = net_xml.xml_get_netpool_mode() + libvirt_version = virsh_version(server, virt) + #Forward mode support was added in 0.4.2 + if libvirt_version >= '0.4.2': + if mode_type == 1 and ret_mode != "nat": + logger.error("Error when verifying 'nat' type network") + return FAIL + elif mode_type == 2 and ret_mode != "route": + logger.error("Error when verifying 'route' type network") + return FAIL + ret_pool_attr_list = net_xml.xml_get_netpool_attr_list() - #Forward mode support was added in 0.4.2 - if libvirt_version >= '0.4.2': - if mode_type == 1 and ret_mode != "nat": - logger.error("Got error when verify nat type") - return FAIL - elif mode_type == 2 and ret_mode != "route": - logger.error("Got error when verify route type") - return FAIL - - ret_pool_attr_list = net_xml.xml_get_netpool_attr_list() + elif pool_type == "DiskPool": + disk_xml = PoolXML(server ,virt=virt, poolname=poolname, + is_new_pool=False) + ret_pool_attr_list = disk_xml.xml_get_pool_attr_list() for i in range(0, len(ret_pool_attr_list)): if ret_pool_attr_list[i] not in pool_attr_list.itervalues(): - logger.error("Got error when parsing %s", ret_pool_attr_list[i]) + logger.error("Failed to verify '%s'", ret_pool_attr_list[i]) return FAIL status = PASS diff -r 3c17b4d15e84 -r 72be8ddf94c0 suites/libvirt-cim/lib/XenKvmLib/vxml.py --- a/suites/libvirt-cim/lib/XenKvmLib/vxml.py Fri May 22 01:41:08 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/vxml.py Thu May 28 22:46:29 2009 -0700 @@ -293,7 +293,7 @@ class PoolXML(Virsh, XMLClass): def __init__(self, server, poolname=const.default_pool_name, - virt='xen'): + virt='xen', is_new_pool=True): XMLClass.__init__(self) if virt == 'XenFV': @@ -302,6 +302,17 @@ self.pool_name = poolname self.server = server + if is_new_pool is False: + cmd = "virsh pool-dumpxml %s" % self.pool_name + s, disk_xml = utils.run_remote(server, cmd) + if s != 0: + logger.error("Encounter error dump netxml") + return None + else: + self.xml_string = disk_xml + self.xdoc = minidom.parseString(self.xml_string) + return + pool = self.add_sub_node(self.xdoc, 'pool', type='dir') self.add_sub_node(pool, 'name', self.pool_name) target = self.add_sub_node(pool, 'target') @@ -313,10 +324,19 @@ def destroy_vpool(self): return self.run(self.server, 'pool-destroy', self.pool_name) + def undefine_vpool(self): + return self.run(self.server, 'pool-undefine', self.pool_name) + def xml_get_diskpool_name(self): dpoolname = self.get_value_xpath('/pool/name') return dpoolname + def xml_get_pool_attr_list(self): + pool_attr_list = [] + poolpath = self.get_value_xpath('/pool/target/path') + pool_attr_list.append(poolpath) + + return pool_attr_list class VirtXML(Virsh, XMLClass): """Base class for all XML generation & operation""" From deeptik at linux.vnet.ibm.com Fri May 29 09:02:25 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Fri, 29 May 2009 02:02:25 -0700 Subject: [Libvirt-cim] [PATCH 3 of 4] [TEST] Adding new tc to verify DiskPool Deletion using DeleteResourcePool In-Reply-To: References: Message-ID: <1420ff7756e0d69ed1ed.1243587745@elm3b151.beaverton.ibm.com> # HG changeset patch # User Deepti B. Kalakeri # Date 1243576110 25200 # Node ID 1420ff7756e0d69ed1eda45c1065079c36d8e92c # Parent 10e25eb82585ea86195ff7047cfe32645065ff58 [TEST] Adding new tc to verify DiskPool Deletion using DeleteResourcePool. Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 10e25eb82585 -r 1420ff7756e0 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py Thu May 28 22:48:30 2009 -0700 @@ -0,0 +1,133 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Guolian Yun +# +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# This test case should test the DeleteResourcePool service +# supplied by the RPCS provider. +# The DeleteResourcePool is used to delete a resource pool. +# DeleteResourcePool() details: +# Input +# ----- +# IN -- Pool -- CIM_ResourcePool REF -- The resource pool to delete +# +# Output +# ------ +# OUT -- Job -- CIM_ConcreteJob REF -- Returned job if started +# OUT -- Error-- String -- Encoded error instance if the operation +# failed and did not return a job. +# +# Exception details before Revision 841 +# ----- +# Error code: CIM_ERR_NOT_SUPPORTED +# +# After revision 841, the service is implemented +# The test case verifies DeleteResourcePool is able to delete the +# dir type diskpool. +# -Date: 26.05.2009 + +import sys +import pywbem +from XenKvmLib import rpcs_service +from CimTest.Globals import logger +from CimTest.ReturnCodes import FAIL, PASS +from XenKvmLib.const import do_main, platform_sup, get_provider_version +from XenKvmLib.enumclass import EnumInstances, EnumNames +from XenKvmLib.classes import get_typed_class +from XenKvmLib.pool import create_pool, verify_pool, undefine_diskpool +from XenKvmLib.common_util import destroy_diskpool + +cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED +cim_mname = "DeleteResourcePool" +libvirt_cim_child_pool_rev = 841 +test_pool = "dp_pool" +TYPE = 1 # Dir type diskpool + + at do_main(platform_sup) +def main(): + status = FAIL + options = main.options + server = options.ip + virt = options.virt + cn = get_typed_class(virt, "ResourcePoolConfigurationService") + rpcs_conn = eval("rpcs_service." + cn)(server) + curr_cim_rev, changeset = get_provider_version(virt, server) + if curr_cim_rev < libvirt_cim_child_pool_rev: + + try: + rpcs_conn.DeleteResourcePool() + except pywbem.CIMError, (err_no, desc): + if err_no == cim_errno : + logger.info("Got expected exception for '%s' service", cim_mname) + logger.info("Errno is '%s' ", err_no) + logger.info("Error string is '%s'", desc) + return PASS + else: + logger.error("Unexpected rc code %s and description %s\n", + err_no, desc) + return status + + elif curr_cim_rev >= libvirt_cim_child_pool_rev: + + try: + pool_attr = { "Path" : "/tmp" } + status = create_pool(server, virt, test_pool, pool_attr, + pool_type="DiskPool", mode_type=TYPE) + if status != PASS: + logger.error("Failed to create diskpool '%s'", test_pool) + return FAIL + + status = verify_pool(server, virt, test_pool, + pool_attr, pool_type="DiskPool") + if status != PASS: + raise Exception("Failed to verify diskpool '%s'" % test_pool) + + dp = get_typed_class(virt, 'DiskPool') + dp_id = "DiskPool/%s" % test_pool + pool_settings = None + pool = EnumNames(server, dp) + for i in range(0, len(pool)): + ret_pool = pool[i].keybindings['InstanceID'] + if ret_pool == dp_id: + pool_settings = pool[i] + break + + if pool_settings == None: + logger.error("Failed to get poolsettings for '%s'", test_pool) + return FAIL + + rpcs_conn.DeleteResourcePool(Pool = pool_settings) + pool = EnumInstances(server, dp) + for i in range(0, len(pool)): + ret_pool = pool[i].InstanceID + if ret_pool == dp_id: + raise Exception("Failed to delete diskpool '%s'" %test_pool) + + status = PASS + except Exception, details: + logger.error("Exception details: %s", details) + destroy_diskpool(server, virt, test_pool) + undefine_diskpool(server, virt, test_pool) + return FAIL + + return status + +if __name__ == "__main__": + sys.exit(main()) From deeptik at linux.vnet.ibm.com Fri May 29 09:02:22 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Fri, 29 May 2009 02:02:22 -0700 Subject: [Libvirt-cim] [PATCH 0 of 4] Adding tc to verify creation/deletion of DiskPool. Message-ID: The patchset includes new tc to cover the creation and deletion of DiskPool. It also includes the changes to RPCS/04_CreateChildResourcePool.py and RPCS/07_DeleteChildResourcePool.py to align with the create_pool() and verify_pool() changes. This test case should be applied on top of "(#2) Return SKIP if the provide version doesn't support template pool RASDs" Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri From deeptik at linux.vnet.ibm.com Fri May 29 09:02:24 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Fri, 29 May 2009 02:02:24 -0700 Subject: [Libvirt-cim] [PATCH 2 of 4] [TEST] Add new tc to verify dir type diskpool creations using CreateChildResourcePool() In-Reply-To: References: Message-ID: <10e25eb82585ea86195f.1243587744@elm3b151.beaverton.ibm.com> # HG changeset patch # User Deepti B. Kalakeri # Date 1243576050 25200 # Node ID 10e25eb82585ea86195ff7047cfe32645065ff58 # Parent 72be8ddf94c096cae3e795c6a7d4634b915922c1 [TEST] Add new tc to verify dir type diskpool creations using CreateChildResourcePool(). Tested with KVM on F10 with current sources. Signed-off-by: Deepti B. Kalakeri diff -r 72be8ddf94c0 -r 10e25eb82585 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py Thu May 28 22:47:30 2009 -0700 @@ -0,0 +1,102 @@ +#!/usr/bin/python +# +# Copyright 2009 IBM Corp. +# +# Authors: +# Deepti B. Kalakeri +# +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# This test case should test the CreateChildResourcePool service +# supplied by the RPCS provider. +# Input +# ----- +# IN -- ElementName -- String -- The desired name of the resource pool +# IN -- Settings -- String -- A string representation of a +# CIM_ResourceAllocationSettingData +# instance that represents the allocation +# assigned to this child pool +# IN -- ParentPool -- CIM_ResourcePool REF -- The parent pool from which +# to create this pool +# +# Output +# ------ +# OUT -- Pool -- CIM_ResourcePool REF -- The resulting resource pool +# OUT -- Job -- CIM_ConcreteJob REF -- Returned job if started +# OUT -- Error -- String -- Encoded error instance if the operation +# failed and did not return a job +# +# Exception details before Revision 846 +# ----- +# Error code: CIM_ERR_NOT_SUPPORTED +# +# After revision 846, the service is implemented +# +# -Date: 26.05.2009 + +import sys +from CimTest.Globals import logger +from CimTest.ReturnCodes import FAIL, PASS +from XenKvmLib.const import do_main, platform_sup +from XenKvmLib.classes import get_typed_class +from XenKvmLib.common_util import destroy_diskpool +from XenKvmLib.pool import create_pool, verify_pool, undefine_diskpool + +test_pool = "diskpool" +dp_types = { "DISK_POOL_DIR" : 1 } + + + at do_main(platform_sup) +def main(): + options = main.options + server = options.ip + virt = options.virt + pool_attr = { "Path" : "/tmp" } + + # For now the test case support only the creation of + # dir type disk pool, later change to fs and netfs etc + for key, value in dp_types.iteritems(): + status = create_pool(server, virt, test_pool, pool_attr, + mode_type=value, pool_type= "DiskPool") + if status != PASS: + logger.error("Failed to create '%s' type diskpool '%s'", + key, test_pool) + return FAIL + + status = verify_pool(server, virt, test_pool, pool_attr, + mode_type=value, pool_type="DiskPool") + if status != PASS: + logger.error("Error in diskpool verification") + destroy_diskpool(server, virt, test_pool) + undefine_diskpool(server, virt, test_pool) + return FAIL + + status = destroy_diskpool(server, virt, test_pool) + if status != PASS: + logger.error("Unable to destroy diskpool '%s'", test_pool) + return FAIL + + status = undefine_diskpool(server, virt, test_pool) + if status != PASS: + logger.error("Unable to undefine diskpool '%s'", test_pool) + return FAIL + + status = PASS + + return status + +if __name__ == "__main__": + sys.exit(main()) From deeptik at linux.vnet.ibm.com Fri May 29 09:02:26 2009 From: deeptik at linux.vnet.ibm.com (Deepti B. Kalakeri) Date: Fri, 29 May 2009 02:02:26 -0700 Subject: [Libvirt-cim] [PATCH 4 of 4] [TEST] Modifying RPCS/04_CreateChildResourcePool.py and RPCS/07_DeleteChildResourcePool.py In-Reply-To: References: Message-ID: # HG changeset patch # User Deepti B. Kalakeri # Date 1243587710 25200 # Node ID fe2f7e27210c2437d410484bd8f16a0714994321 # Parent 1420ff7756e0d69ed1eda45c1065079c36d8e92c [TEST] Modifying RPCS/04_CreateChildResourcePool.py and RPCS/07_DeleteChildResourcePool.py Modifying RPCS/04_CreateChildResourcePool.py and RPCS/07_DeleteChildResourcePool.py to align with the changes to create_pool() and verify_pool() changes of pool.py. Update: ------- Added the missing DCO. Signed-off-by: Deepti B. Kalakeri diff -r 1420ff7756e0 -r fe2f7e27210c suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Thu May 28 22:48:30 2009 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Fri May 29 02:01:50 2009 -0700 @@ -54,7 +54,7 @@ from XenKvmLib.const import do_main, platform_sup from XenKvmLib.classes import get_typed_class from XenKvmLib.common_util import destroy_netpool -from XenKvmLib.pool import create_netpool, verify_pool, undefine_netpool +from XenKvmLib.pool import create_pool, verify_pool, undefine_netpool test_pool = "testpool" @@ -76,15 +76,17 @@ "IPRangeStart" : range_addr_start, "IPRangeEnd" : range_addr_end } - for item in range(0, 3): - status = create_netpool(options.ip, options.virt, - test_pool, pool_attr, mode_type=item) + net_type = ["isolated", "nat", "route"] + for item in range(0, len(net_type)): + logger.info("Creating '%s' type network", net_type[item]) + status = create_pool(options.ip, options.virt, + test_pool, pool_attr, mode_type=item) if status != PASS: logger.error("Error in networkpool creation") return status - status = verify_pool(options.ip, options.virt, np, - test_pool, pool_attr, mode_type=item) + status = verify_pool(options.ip, options.virt, + test_pool, pool_attr, mode_type=item) if status != PASS: logger.error("Error in networkpool verification") destroy_netpool(options.ip, options.virt, test_pool) @@ -96,11 +98,6 @@ logger.error("Unable to destroy networkpool %s", test_pool) return status - status = undefine_netpool(options.ip, options.virt, test_pool) - if status != PASS: - logger.error("Unable to undefine networkpool %s", test_pool) - return status - status = PASS return status diff -r 1420ff7756e0 -r fe2f7e27210c suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py Thu May 28 22:48:30 2009 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/07_DeleteResourcePool.py Fri May 29 02:01:50 2009 -0700 @@ -52,7 +52,7 @@ from XenKvmLib.enumclass import EnumInstances, EnumNames from XenKvmLib.classes import get_typed_class from XenKvmLib.common_util import destroy_netpool -from XenKvmLib.pool import create_netpool, verify_pool +from XenKvmLib.pool import create_pool, verify_pool cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED cim_mname = "DeleteResourcePool" @@ -71,7 +71,8 @@ rpcs_conn.DeleteResourcePool() except pywbem.CIMError, (err_no, desc): if err_no == cim_errno : - logger.info("Got expected exception for '%s' service", cim_mname) + logger.info("Got expected exception for '%s' service", + cim_mname) logger.info("Errno is '%s' ", err_no) logger.info("Error string is '%s'", desc) return PASS @@ -87,21 +88,22 @@ "IPRangeEnd" : "192.168.0.15", "ForwardMode" : "nat" } - np = get_typed_class(options.virt, 'NetworkPool') - np_id = "NetworkPool/%s" % test_pool - status = create_netpool(options.ip, options.virt, test_pool, pool_attr) + status = create_pool(options.ip, options.virt, test_pool, pool_attr) if status != PASS: logger.error("Error in networkpool creation") return status - status = verify_pool(options.ip, options.virt, np, - test_pool, pool_attr) + status = verify_pool(options.ip, options.virt, + test_pool, pool_attr) + if status != PASS: logger.error("Error in networkpool verification") destroy_netpool(options.ip, options.virt, test_pool) return status + np = get_typed_class(options.virt, 'NetworkPool') + np_id = "NetworkPool/%s" % test_pool netpool = EnumNames(options.ip, np) for i in range(0, len(netpool)): ret_pool = netpool[i].keybindings['InstanceID'] diff -r 1420ff7756e0 -r fe2f7e27210c suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Thu May 28 22:48:30 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Fri May 29 02:01:50 2009 -0700 @@ -150,7 +150,7 @@ cmd = "virsh -c %s net-undefine %s" % (virt2uri(virt), network) ret, out = run_remote(server, cmd) - + return ret def undefine_netpool(server, virt, net_name): From deeptik at linux.vnet.ibm.com Fri May 29 10:10:21 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Fri, 29 May 2009 15:40:21 +0530 Subject: [Libvirt-cim] Test Run Summary (May 29 2009): KVM on SLES_11 with sfcb Message-ID: <4A1FB48D.7070501@linux.vnet.ibm.com> ================================================= Test Run Summary (May 29 2009): KVM on with sfcb ================================================= Distro: Kernel: 2.6.27.21-5.1-default libvirt: 0.6.3 Hypervisor: QEMU 0.10.4 CIMOM: sfcb sfcbd 1.3.2 Libvirt-cim revision: 0 Libvirt-cim changeset: SLES_11 Cimtest revision: 692 Cimtest changeset: 3c17b4d15e84 ================================================= FAIL : 23 XFAIL : 3 SKIP : 21 PASS : 109 ----------------- Total : 156 ================================================= FAIL Test Summary: AllocationCapabilities - 01_enum.py: FAIL ComputerSystem - 42_cs_gi_errs.py: FAIL ComputerSystemIndication - 01_created_indication.py: FAIL ElementCapabilities - 01_forward.py: FAIL ElementCapabilities - 03_forward_errs.py: FAIL ElementConforms - 01_forward.py: FAIL ElementConforms - 04_ectp_rev_errs.py: FAIL HostSystem - 03_hs_to_settdefcap.py: FAIL HostedDependency - 04_reverse_errs.py: FAIL HostedResourcePool - 03_forward_errs.py: FAIL HostedService - 03_forward_errs.py: FAIL ResourceAllocationFromPool - 01_forward.py: FAIL ResourceAllocationFromPool - 02_reverse.py: FAIL ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: FAIL ResourcePoolConfigurationService - 07_DeleteResourcePool.py: FAIL ServiceAffectsElement - 01_forward.py: FAIL SettingsDefine - 01_forward.py: FAIL SettingsDefine - 02_reverse.py: FAIL SettingsDefineCapabilities - 01_forward.py: FAIL SettingsDefineCapabilities - 03_forward_errs.py: FAIL SystemDevice - 01_forward.py: FAIL VirtualSystemManagementService - 05_destroysystem_neg.py: FAIL VirtualSystemSnapshotService - 03_create_snapshot.py: FAIL ================================================= XFAIL Test Summary: ComputerSystem - 33_suspend_reboot.py: XFAIL VirtualSystemManagementService - 09_procrasd_persist.py: XFAIL VirtualSystemManagementService - 16_removeresource.py: XFAIL ================================================= SKIP Test Summary: ComputerSystem - 02_nosystems.py: SKIP HostSystem - 05_hs_gi_errs.py: SKIP HostedAccessPoint - 01_forward.py: SKIP HostedAccessPoint - 02_reverse.py: SKIP KVMRedirectionSAP - 01_enum_KVMredSAP.py: SKIP LogicalDisk - 02_nodevs.py: SKIP RASD - 05_disk_rasd_emu_type.py: SKIP RASD - 06_parent_net_pool.py: SKIP RASD - 07_parent_disk_pool.py: SKIP RedirectionService - 01_enum_crs.py: SKIP RedirectionService - 02_enum_crscap.py: SKIP RedirectionService - 03_RedirectionSAP_errs.py: SKIP ServiceAccessBySAP - 01_forward.py: SKIP ServiceAccessBySAP - 02_reverse.py: SKIP VSSD - 02_bootldr.py: SKIP VirtualSystemMigrationService - 01_migratable_host.py: SKIP VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP ================================================= Full report: -------------------------------------------------------------------- AllocationCapabilities - 01_enum.py: FAIL ERROR - 12 KVM_AllocationCapabilities insts != 10 pool insts -------------------------------------------------------------------- AllocationCapabilities - 02_alloccap_gi_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 01_enum.py: PASS -------------------------------------------------------------------- ComputerSystem - 02_nosystems.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- ComputerSystem - 03_defineVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 04_defineStartVS.py: PASS -------------------------------------------------------------------- ComputerSystem - 05_activate_defined_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 06_paused_active_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 22_define_suspend.py: PASS -------------------------------------------------------------------- ComputerSystem - 23_pause_pause.py: PASS -------------------------------------------------------------------- ComputerSystem - 27_define_pause_errs.py: PASS -------------------------------------------------------------------- ComputerSystem - 32_start_reboot.py: PASS -------------------------------------------------------------------- ComputerSystem - 33_suspend_reboot.py: XFAIL ERROR - Got CIM error State not supported with return code 7 ERROR - Exception: Unable Suspend dom 'test_domain' InvokeMethod(RequestStateChange): State not supported Bug:<00012> -------------------------------------------------------------------- ComputerSystem - 35_start_reset.py: PASS -------------------------------------------------------------------- ComputerSystem - 40_RSC_start.py: PASS -------------------------------------------------------------------- ComputerSystem - 41_cs_to_settingdefinestate.py: PASS -------------------------------------------------------------------- ComputerSystem - 42_cs_gi_errs.py: FAIL ERROR - Unexpected errno 6, desc Referenced domain `invalid_name' does not exist: Domain not found: no domain with matching name 'invalid_name' ERROR - Expected No such instance (invalid_name) 6 ERROR - ------ FAILED: invalid_name ------ -------------------------------------------------------------------- ComputerSystemIndication - 01_created_indication.py: FAIL ERROR - Waited too long for start indication ERROR - Waited too long for destroy indication -------------------------------------------------------------------- ElementAllocatedFromPool - 01_forward.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 02_reverse.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 03_reverse_errs.py: PASS -------------------------------------------------------------------- ElementAllocatedFromPool - 04_forward_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 01_forward.py: FAIL ERROR - 'KVM_ElementCapabilities' association classname error -------------------------------------------------------------------- ElementCapabilities - 02_reverse.py: PASS -------------------------------------------------------------------- ElementCapabilities - 03_forward_errs.py: FAIL ERROR - Unexpected rc code 6 and description Referenced domain `wrong' does not exist: Domain not found: no domain with matching name 'wrong' -------------------------------------------------------------------- ElementCapabilities - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ElementCapabilities - 05_hostsystem_cap.py: PASS -------------------------------------------------------------------- ElementConforms - 01_forward.py: FAIL ERROR - verify_fields() exception: u'KVM_ConsoleRedirectionService' ERROR - Exception: Failed to verify instance -------------------------------------------------------------------- ElementConforms - 02_reverse.py: PASS -------------------------------------------------------------------- ElementConforms - 03_ectp_fwd_errs.py: PASS -------------------------------------------------------------------- ElementConforms - 04_ectp_rev_errs.py: FAIL ERROR - Unexpected rc code 6 and description Referenced domain `INVALID_Name_Keyvalue' does not exist: Domain not found: no domain with matching name 'INVALID_Name_Keyvalue' ERROR - ------ FAILED: INVALID_Name_Keyvalue------ -------------------------------------------------------------------- ElementSettingData - 01_forward.py: PASS -------------------------------------------------------------------- ElementSettingData - 03_esd_assoc_with_rasd_errs.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- EnabledLogicalElementCapabilities - 02_elecap_gi_errs.py: PASS -------------------------------------------------------------------- HostSystem - 01_enum.py: PASS -------------------------------------------------------------------- HostSystem - 02_hostsystem_to_rasd.py: PASS -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - 'KVM_SettingsDefineCapabilities' returned 8 RASD objects instead of 4 -------------------------------------------------------------------- HostSystem - 04_hs_to_EAPF.py: PASS -------------------------------------------------------------------- HostSystem - 05_hs_gi_errs.py: SKIP -------------------------------------------------------------------- HostSystem - 06_hs_to_vsms.py: PASS -------------------------------------------------------------------- HostedAccessPoint - 01_forward.py: SKIP -------------------------------------------------------------------- HostedAccessPoint - 02_reverse.py: SKIP -------------------------------------------------------------------- HostedDependency - 01_forward.py: PASS -------------------------------------------------------------------- HostedDependency - 02_reverse.py: PASS -------------------------------------------------------------------- HostedDependency - 03_enabledstate.py: PASS -------------------------------------------------------------------- HostedDependency - 04_reverse_errs.py: FAIL ERROR - Unexpected rc code 6 and description No such instance (Name) ERROR - --- FAILED: Invalid Name Key Name--- ERROR - Exception: KVMXML instance has no attribute 'cim_undefine' ERROR - Got CIM error Referenced domain `hd_domain1' does not exist: Domain not found: no domain with matching name 'hd_domain1' with return code 6 ERROR - AttributeError : KVMXML instance has no attribute 'cim_undefine' Traceback (most recent call last): File "./lib/XenKvmLib/const.py", line 139, in do_try File "04_reverse_errs.py", line 126, in main acn, msg, field, expr_values) File "04_reverse_errs.py", line 79, in verify_err_fields cxml.cim_undefine(server) AttributeError: KVMXML instance has no attribute 'cim_undefine' ERROR - None InvokeMethod(DestroySystem): Referenced domain `hd_domain1' does not exist: Domain not found: no domain with matching name 'hd_domain1' -------------------------------------------------------------------- HostedResourcePool - 01_forward.py: PASS -------------------------------------------------------------------- HostedResourcePool - 02_reverse.py: PASS -------------------------------------------------------------------- HostedResourcePool - 03_forward_errs.py: FAIL ERROR - Unexpected rc code 6 and description No such instance (CreationClassName) ERROR - ------FAILED: Invalid CreationClassName Key Value.------ -------------------------------------------------------------------- HostedResourcePool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- HostedService - 01_forward.py: PASS -------------------------------------------------------------------- HostedService - 02_reverse.py: PASS -------------------------------------------------------------------- HostedService - 03_forward_errs.py: FAIL ERROR - Unexpected rc code 6 and description No such instance (Name) ERROR - ------ FAILED: Invalid Name Key Name.------ -------------------------------------------------------------------- HostedService - 04_reverse_errs.py: PASS -------------------------------------------------------------------- KVMRedirectionSAP - 01_enum_KVMredSAP.py: SKIP -------------------------------------------------------------------- LogicalDisk - 01_disk.py: PASS -------------------------------------------------------------------- LogicalDisk - 02_nodevs.py: SKIP ERROR - System has defined domains; unable to run -------------------------------------------------------------------- LogicalDisk - 03_ld_gi_errs.py: PASS -------------------------------------------------------------------- Memory - 01_memory.py: PASS -------------------------------------------------------------------- Memory - 02_defgetmem.py: PASS -------------------------------------------------------------------- Memory - 03_mem_gi_errs.py: PASS -------------------------------------------------------------------- NetworkPort - 01_netport.py: PASS -------------------------------------------------------------------- NetworkPort - 02_np_gi_errors.py: PASS -------------------------------------------------------------------- NetworkPort - 03_user_netport.py: PASS -------------------------------------------------------------------- Processor - 01_processor.py: PASS -------------------------------------------------------------------- Processor - 02_definesys_get_procs.py: PASS -------------------------------------------------------------------- Processor - 03_proc_gi_errs.py: PASS -------------------------------------------------------------------- Profile - 01_enum.py: PASS -------------------------------------------------------------------- Profile - 02_profile_to_elec.py: PASS -------------------------------------------------------------------- Profile - 03_rprofile_gi_errs.py: PASS -------------------------------------------------------------------- RASD - 01_verify_rasd_fields.py: PASS -------------------------------------------------------------------- RASD - 02_enum.py: PASS -------------------------------------------------------------------- RASD - 03_rasd_errs.py: PASS -------------------------------------------------------------------- RASD - 04_disk_rasd_size.py: PASS -------------------------------------------------------------------- RASD - 05_disk_rasd_emu_type.py: SKIP -------------------------------------------------------------------- RASD - 06_parent_net_pool.py: SKIP 06_parent_net_pool.py:50: DeprecationWarning: the sets module is deprecated from sets import Set ERROR - NetworkPool template RASDs not supported. Supported in version 867. -------------------------------------------------------------------- RASD - 07_parent_disk_pool.py: SKIP 07_parent_disk_pool.py:47: DeprecationWarning: the sets module is deprecated from sets import Set ERROR - DiskPool template RASDs not supported. Supported in version 863. -------------------------------------------------------------------- RedirectionService - 01_enum_crs.py: SKIP 01_enum_crs.py:29: DeprecationWarning: the sets module is deprecated from sets import Set -------------------------------------------------------------------- RedirectionService - 02_enum_crscap.py: SKIP -------------------------------------------------------------------- RedirectionService - 03_RedirectionSAP_errs.py: SKIP -------------------------------------------------------------------- ReferencedProfile - 01_verify_refprof.py: PASS -------------------------------------------------------------------- ReferencedProfile - 02_refprofile_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 01_forward.py: FAIL ERROR - 6 RASD insts != 4 pool insts -------------------------------------------------------------------- ResourceAllocationFromPool - 02_reverse.py: FAIL ERROR - 6 RASD insts != 4 pool insts -------------------------------------------------------------------- ResourceAllocationFromPool - 03_forward_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 04_reverse_errs.py: PASS -------------------------------------------------------------------- ResourceAllocationFromPool - 05_RAPF_err.py: PASS -------------------------------------------------------------------- ResourcePool - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePool - 02_rp_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationCapabilities - 02_rpcc_gi_errs.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 01_enum.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 02_rcps_gi_errors.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 03_CreateResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: FAIL ERROR - Unexpected rc code 4 and description One or more parameter values passed to the method were invalid ERROR - Error in networkpool creation InvokeMethod(CreateChildResourcePool): One or more parameter values passed to the method were invalid -------------------------------------------------------------------- ResourcePoolConfigurationService - 05_AddResourcesToResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 06_RemoveResourcesFromResourcePool.py: PASS -------------------------------------------------------------------- ResourcePoolConfigurationService - 07_DeleteResourcePool.py: FAIL ERROR - Unexpected rc code 4 and description One or more parameter values passed to the method were invalid InvokeMethod(DeleteResourcePool): One or more parameter values passed to the method were invalid -------------------------------------------------------------------- ServiceAccessBySAP - 01_forward.py: SKIP -------------------------------------------------------------------- ServiceAccessBySAP - 02_reverse.py: SKIP -------------------------------------------------------------------- ServiceAffectsElement - 01_forward.py: FAIL 01_forward.py:51: DeprecationWarning: the sets module is deprecated from sets import Set ERROR - Exception in fn verify_assoc() ERROR - Exception details: Failed to get init_list -------------------------------------------------------------------- ServiceAffectsElement - 02_reverse.py: PASS -------------------------------------------------------------------- SettingsDefine - 01_forward.py: FAIL ERROR - 4 device insts != 6 RASD insts -------------------------------------------------------------------- SettingsDefine - 02_reverse.py: FAIL ERROR - u'KVM_DisplayController' -------------------------------------------------------------------- SettingsDefine - 03_sds_fwd_errs.py: PASS -------------------------------------------------------------------- SettingsDefine - 04_sds_rev_errs.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 01_forward.py: FAIL ERROR - KVM_SettingsDefineCapabilities returned 60 ResourcePool objects instead of 4 -------------------------------------------------------------------- SettingsDefineCapabilities - 03_forward_errs.py: FAIL ERROR - Unexpected rc code 6 and description No such instance (INVALID_InstID_KeyValue) - resource pool type mismatch ERROR - ------ FAILED: Invalid InstanceID Key Value.------ -------------------------------------------------------------------- SettingsDefineCapabilities - 04_forward_vsmsdata.py: PASS -------------------------------------------------------------------- SettingsDefineCapabilities - 05_reverse_vsmcap.py: PASS -------------------------------------------------------------------- SystemDevice - 01_forward.py: FAIL 01_forward.py:29: DeprecationWarning: the sets module is deprecated from sets import Set ERROR - Device Class mismatch ERROR - Exception Expected Device class list: ['KVM_LogicalDisk', 'KVM_Memory', 'KVM_NetworkPort', 'KVM_Processor'] Got: [u'KVM_DisplayController', u'KVM_LogicalDisk', u'KVM_Memory', u'KVM_NetworkPort', u'KVM_PointingDevice', u'KVM_Processor'] -------------------------------------------------------------------- SystemDevice - 02_reverse.py: PASS -------------------------------------------------------------------- SystemDevice - 03_fwderrs.py: PASS -------------------------------------------------------------------- VSSD - 01_enum.py: PASS -------------------------------------------------------------------- VSSD - 02_bootldr.py: SKIP -------------------------------------------------------------------- VSSD - 03_vssd_gi_errs.py: PASS -------------------------------------------------------------------- VSSD - 04_vssd_to_rasd.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemManagementCapabilities - 02_vsmcap_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 01_definesystem_name.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 02_destroysystem.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 03_definesystem_ess.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 04_definesystem_ers.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 05_destroysystem_neg.py: FAIL ERROR - Got error no 6, but expected no 1 ERROR - Got error desc: Unable to retrieve domain name: Error 0, but expected desc: Unable to retrieve domain name. ERROR - Got error no 6, but expected no 1 ERROR - Got error desc: Referenced domain `##@@!!cimtest_domain' does not exist: Domain not found: no domain with matching name '##@@!!cimtest_domain', but expected desc: Failed to find domain InvokeMethod(DestroySystem): Unable to retrieve domain name: Error 0 InvokeMethod(DestroySystem): Referenced domain `##@@!!cimtest_domain' does not exist: Domain not found: no domain with matching name '##@@!!cimtest_domain' -------------------------------------------------------------------- VirtualSystemManagementService - 06_addresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 07_addresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 08_modifyresource.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 09_procrasd_persist.py: XFAIL -------------------------------------------------------------------- VirtualSystemManagementService - 10_hv_version.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 11_define_memrasdunits.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 12_referenced_config.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 13_refconfig_additional_devs.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 14_define_sys_disk.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 15_mod_system_settings.py: PASS -------------------------------------------------------------------- VirtualSystemManagementService - 16_removeresource.py: XFAIL ERROR - 0 RASD insts for domain/mouse:ps2 No such instance (no device domain/mouse:ps2) Bug:<00014> -------------------------------------------------------------------- VirtualSystemManagementService - 17_removeresource_neg.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationCapabilities - 02_vsmc_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationService - 01_migratable_host.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 06_remote_live_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 07_remote_offline_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationService - 08_remote_restart_resume_migration.py: SKIP -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemMigrationSettingData - 02_vsmsd_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 01_forward.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 02_reverse.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 03_vssdc_fwd_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSettingDataComponent - 04_vssdc_rev_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 02_vs_sservice_gi_errs.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotService - 03_create_snapshot.py: FAIL ERROR - Exception: (1, u'*** Provider Virt_VirtualSystemSnapshotService(18530) exiting due to a SIGSEGV signal ') ERROR - Failed to remove snapshot file for snapshot_vm InvokeMethod(CreateSnapshot): *** Provider Virt_VirtualSystemSnapshotService(18530) exiting due to a SIGSEGV signal -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 01_enum.py: PASS -------------------------------------------------------------------- VirtualSystemSnapshotServiceCapabilities - 02_vs_sservicecap_gi_errs.py: PASS -------------------------------------------------------------------- -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From deeptik at linux.vnet.ibm.com Fri May 29 12:21:15 2009 From: deeptik at linux.vnet.ibm.com (Deepti B Kalakeri) Date: Fri, 29 May 2009 17:51:15 +0530 Subject: [Libvirt-cim] [PATCH 3 of 4] [TEST] Adding new tc to verify DiskPool Deletion using DeleteResourcePool In-Reply-To: <4A1ED62E.2080100@linux.vnet.ibm.com> References: <4A1ED62E.2080100@linux.vnet.ibm.com> Message-ID: <4A1FD33B.4070901@linux.vnet.ibm.com> Kaitlin Rupert wrote: > Deepti B. Kalakeri wrote: >> # HG changeset patch >> # User Deepti B. Kalakeri >> # Date 1243503501 25200 >> # Node ID ff0fb8fca1512987cb6a8210e7a3e95396dfa0a9 >> # Parent 13e68baa2f6de1737a7ad17e9d4909782d3fe192 >> [TEST] Adding new tc to verify DiskPool Deletion using >> DeleteResourcePool. >> >> Tested with KVM on F10 with current sources. >> Signed-off-by: Deepti B. Kalakeri >> >> diff -r 13e68baa2f6d -r ff0fb8fca151 >> suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py >> >> --- /dev/null Thu Jan 01 00:00:00 1970 +0000 >> +++ >> b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py >> Thu May 28 02:38:21 2009 -0700 > > No complaints on this patch.. just a question. =) Do you plan on > updating this test case to support deletion of other disk pool types? > All the pool types should be identical. So if you plan on using > different test cases, it would be a good idea to put parts of this > test case into a function (or functions) at some point later on. > Yes we should be supporting the deletion of the other disk / net types. When that happens we can see if we need to write different functions. For now the DeleteResourcePool() has atleast one test for verification. Do you think we should immediately implement the deletion of the other types as well ? -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Fri May 29 20:25:28 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 29 May 2009 13:25:28 -0700 Subject: [Libvirt-cim] [PATCH 3 of 4] [TEST] Adding new tc to verify DiskPool Deletion using DeleteResourcePool In-Reply-To: <4A1FD33B.4070901@linux.vnet.ibm.com> References: <4A1ED62E.2080100@linux.vnet.ibm.com> <4A1FD33B.4070901@linux.vnet.ibm.com> Message-ID: <4A2044B8.7060008@linux.vnet.ibm.com> >> No complaints on this patch.. just a question. =) Do you plan on >> updating this test case to support deletion of other disk pool types? >> All the pool types should be identical. So if you plan on using >> different test cases, it would be a good idea to put parts of this >> test case into a function (or functions) at some point later on. >> > Yes we should be supporting the deletion of the other disk / net types. > When that happens we can see if we need to write different functions. > For now the DeleteResourcePool() has atleast one test for verification. > Do you think we should immediately implement the deletion of the other > types as well ? > No, this is fine for now. I was just curious how you were planning to approach it. But this sounds fine to me. Thanks! -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Fri May 29 21:09:49 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 29 May 2009 14:09:49 -0700 Subject: [Libvirt-cim] [PATCH 1 of 4] [TEST] Move cleanup_guest_netpool() to vsmigrations.py In-Reply-To: <4A1F9C9F.7010607@linux.vnet.ibm.com> References: <4A1F9C9F.7010607@linux.vnet.ibm.com> Message-ID: <4A204F1D.8060700@linux.vnet.ibm.com> >> + # completely successful VM might be created on the target machine >> + # and hence need to clean. >> + target_list = domain_list(t_sysname, virt) >> + if target_list != None and test_dom in target_list: >> + ret_value = cxml.destroy(t_sysname) >> > The ret_value from cxml.destroy is False and hence even though the VM is > getting destroyed we are getting the following false log: > Failed to destroy the migrated domain > 'VM_frm_elm3b217.beaverton.ibm.com' on 'localhost' > Thu, 28 May 2009 22:35:49:TEST LOG:INFO - Failed to destroy the > migrated domain 'VM_frm_elm3b217.beaverton.ibm.com' on 'localhost' > > use cxml.cim_destroy()instead. Good point - this should be cim_destroy(). > >> + if not ret_value: >> + logger.info("Failed to destroy the migrated domain '%s' >> on '%s'", >> + test_dom, t_sysname) >> + >> + ret_value = cxml.undefine(t_sysname) >> > Same here cxml.undefine() returns False and hence the following > statement gets printed. > Thu, 28 May 2009 22:52:39:TEST LOG:INFO - Failed to undefine > the migrated domain 'VM_frm_elm3b217.beaverton.ibm.com' on 'localhost' I'm not sure I understand.. if the guest fails to undefine, we should print an error. When you tested, did undefine() return false even when the guest was removed properly? I was unable to reproduce this. >> + if not ret_value: >> + logger.info("Failed to undefine the migrated domain '%s' >> on '%s'", >> + test_dom, t_sysname) >> + -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Fri May 29 21:19:59 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 29 May 2009 14:19:59 -0700 Subject: [Libvirt-cim] [PATCH 3 of 4] [TEST] 07 - Remove cleanup_guest_netpool() def and call it from vsmigration.py In-Reply-To: References: Message-ID: # HG changeset patch # User Kaitlin Rupert # Date 1242946256 25200 # Node ID e21a60339e4be98d049db5ea146377e90a1f006b # Parent fdb53fac065cd041292fbc9b6dc0dcd712b99b0b [TEST] 07 - Remove cleanup_guest_netpool() def and call it from vsmigration.py Signed-off-by: Kaitlin Rupert diff -r fdb53fac065c -r e21a60339e4b suites/libvirt-cim/cimtest/VirtualSystemMigrationService/07_remote_offline_migration.py --- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/07_remote_offline_migration.py Thu May 21 15:50:56 2009 -0700 +++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/07_remote_offline_migration.py Thu May 21 15:50:56 2009 -0700 @@ -35,9 +35,9 @@ from XenKvmLib.const import do_main, default_network_name from CimTest.ReturnCodes import PASS, FAIL from XenKvmLib.classes import get_typed_class -from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate -from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf,\ - destroy_netpool +from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate, \ + cleanup_guest_netpool +from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf sup_types = ['KVM', 'Xen'] @@ -60,35 +60,6 @@ return PASS, cxml -def cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname): - # Clean the domain on target machine. - # This is req when migration is successful, also when migration is not - # completely successful VM might be created on the target machine - # and hence need to clean. - target_list = domain_list(t_sysname, virt) - if target_list != None and test_dom in target_list: - ret_value = cxml.undefine(t_sysname) - if not ret_value: - logger.info("Failed to undefine the migrated domain '%s' on '%s'", - test_dom, t_sysname) - - # clean the networkpool created on the remote machine - target_net_list = net_list(t_sysname, virt) - if target_net_list != None and default_network_name in target_net_list: - ret_value = destroy_netpool(t_sysname, virt, default_network_name) - if ret_value != PASS: - logger.info("Unable to destroy networkpool '%s' on '%s'", - default_network_name, t_sysname) - - # Remote Migration not Successful, clean the domain on src machine - src_list = domain_list(s_sysname, virt) - if src_list != None and test_dom in src_list: - ret_value = cxml.undefine(s_sysname) - if not ret_value: - logger.info("Failed to undefine the domain '%s' on source '%s'", - test_dom, s_sysname) - - @do_main(sup_types) def main(): options = main.options From kaitlin at linux.vnet.ibm.com Fri May 29 21:19:58 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 29 May 2009 14:19:58 -0700 Subject: [Libvirt-cim] [PATCH 2 of 4] [TEST] 06 - Remove cleanup_guest_netpool() def and call it from vsmigration.py In-Reply-To: References: Message-ID: # HG changeset patch # User Kaitlin Rupert # Date 1242946256 25200 # Node ID fdb53fac065cd041292fbc9b6dc0dcd712b99b0b # Parent 42a5cacd8bb2b96c2a9075d331da0c567661976d [TEST] 06 - Remove cleanup_guest_netpool() def and call it from vsmigration.py Signed-off-by: Kaitlin Rupert diff -r 42a5cacd8bb2 -r fdb53fac065c suites/libvirt-cim/cimtest/VirtualSystemMigrationService/06_remote_live_migration.py --- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/06_remote_live_migration.py Thu May 21 15:50:56 2009 -0700 +++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/06_remote_live_migration.py Thu May 21 15:50:56 2009 -0700 @@ -35,9 +35,9 @@ from XenKvmLib.const import do_main, default_network_name from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.classes import get_typed_class -from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate -from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf,\ - destroy_netpool +from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate, \ + cleanup_guest_netpool +from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf sup_types = ['KVM', 'Xen'] @@ -67,45 +67,6 @@ return PASS, cxml -def cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname): - # Clean the domain on target machine. - # This is req when migration is successful, also when migration is not - # completely successful VM might be created on the target machine - # and hence need to clean. - target_list = domain_list(t_sysname, virt) - if target_list != None and test_dom in target_list: - ret_value = cxml.destroy(t_sysname) - if not ret_value: - logger.info("Failed to destroy the migrated domain '%s' on '%s'", - test_dom, t_sysname) - - ret_value = cxml.undefine(t_sysname) - if not ret_value: - logger.info("Failed to undefine the migrated domain '%s' on '%s'", - test_dom, t_sysname) - - # clean the networkpool created on the remote machine - target_net_list = net_list(t_sysname, virt) - if target_net_list != None and default_network_name in target_net_list: - ret_value = destroy_netpool(t_sysname, virt, default_network_name) - if ret_value != PASS: - logger.info("Unable to destroy networkpool '%s' on '%s'", - default_network_name, t_sysname) - - # Remote Migration not Successful, clean the domain on src machine - src_list = domain_list(s_sysname, virt) - if src_list != None and test_dom in src_list: - ret_value = cxml.cim_destroy(s_sysname) - if not ret_value: - logger.info("Failed to destroy the domain '%s' on the source '%s'", - test_dom, s_sysname) - - ret_value = cxml.undefine(s_sysname) - if not ret_value: - logger.info("Failed to undefine the domain '%s' on source '%s'", - test_dom, s_sysname) - - @do_main(sup_types) def main(): options = main.options From kaitlin at linux.vnet.ibm.com Fri May 29 21:19:56 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 29 May 2009 14:19:56 -0700 Subject: [Libvirt-cim] [PATCH 0 of 4] [TEST] Misc migration test fixes Message-ID: This tests enable localhost migration to work with Xen guests From kaitlin at linux.vnet.ibm.com Fri May 29 21:20:00 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 29 May 2009 14:20:00 -0700 Subject: [Libvirt-cim] [PATCH 4 of 4] [TEST] #2 VSMS 08 Remove cleanup_guest_netpool() def and call it from vsmigration.py In-Reply-To: References: Message-ID: <401fcf44005919231ce6.1243632000@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242946256 25200 # Node ID 401fcf44005919231ce6ea590029aff42e910152 # Parent e21a60339e4be98d049db5ea146377e90a1f006b [TEST] #2 VSMS 08 Remove cleanup_guest_netpool() def and call it from vsmigration.py Updates: -Give guest time to fuly boot before rebooting -Indicate the pass / failure of both the restart and resume cases Note: This test might fail with older versions of Xen - restart migration can be unstable Signed-off-by: Kaitlin Rupert diff -r e21a60339e4b -r 401fcf440059 suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py --- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py Thu May 21 15:50:56 2009 -0700 +++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py Thu May 21 15:50:56 2009 -0700 @@ -28,6 +28,7 @@ import sys import os +from time import sleep from socket import gethostname from XenKvmLib import vxml from XenKvmLib.xm_virt_util import domain_list, net_list @@ -35,9 +36,9 @@ from XenKvmLib.const import do_main, default_network_name from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.classes import get_typed_class -from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate -from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf,\ - destroy_netpool +from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate, \ + cleanup_guest_netpool +from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf sup_types = ['KVM', 'Xen'] @@ -125,6 +126,9 @@ status_resume = status_restart = None cxml = None + status_restart = -1 + status_resume = -1 + try: for mig_type in mig_types: @@ -135,6 +139,10 @@ logger.error("Error setting up the guest") return status + # Generally, having a test sleep is a bad choice, but we need to + # give the guest some time to fully boot before we reboot it + sleep(15) + # create the networkpool used in the domain to be migrated # on the target machine. t_net_list = net_list(t_sysname, virt) @@ -168,18 +176,14 @@ cleanup_guest(virt, cxml, test_dom, t_sysname, s_sysname) status = FAIL - # clean the networkpool created on the remote machine - target_net_list = net_list(t_sysname, virt) - if target_net_list != None and net_pool_name in target_net_list: - ret_value = destroy_netpool(t_sysname, virt, net_pool_name) - if ret_value != PASS: - logger.info("Unable to destroy networkpool '%s' on '%s'", - net_pool_name, t_sysname) + cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname) - if status_restart != PASS or status_resume != PASS: + if status_restart == PASS and status_resume == PASS: + status = PASS + else: + logger.error("Restart migration %d", status_restart) + logger.error("Resume migration %d", status_resume) status = FAIL - else: - status = PASS logger.info("Test case %s", str_status(status)) return status From kaitlin at linux.vnet.ibm.com Fri May 29 21:19:57 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 29 May 2009 14:19:57 -0700 Subject: [Libvirt-cim] [PATCH 1 of 4] [TEST] #2 Move cleanup_guest_netpool() to vsmigrations.py In-Reply-To: References: Message-ID: <42a5cacd8bb2b96c2a90.1243631997@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1242946256 25200 # Node ID 42a5cacd8bb2b96c2a9075d331da0c567661976d # Parent 3c17b4d15e84469ed3d2307a7123c75d99415dee [TEST] #2 Move cleanup_guest_netpool() to vsmigrations.py Migration test 06 - 08 duplicate this same code. Also, if the migration is a localhost one, set the hostname to localhost. Otherwise, the providers will return an error saying the guest already exists on the target (because the providers haven't detected a localhost migration). If the target system name is localhost, the migration will always be a local migration. Be sure to set remote_migration accordingly. Update from 1 to 2: -Change destroy() to cim_destroy() Signed-off-by: Kaitlin Rupert diff -r 3c17b4d15e84 -r 42a5cacd8bb2 suites/libvirt-cim/lib/XenKvmLib/vsmigrations.py --- a/suites/libvirt-cim/lib/XenKvmLib/vsmigrations.py Fri May 22 01:41:08 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/vsmigrations.py Thu May 21 15:50:56 2009 -0700 @@ -30,10 +30,11 @@ from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.enumclass import EnumInstances from XenKvmLib.classes import get_typed_class, virt_types -from XenKvmLib.xm_virt_util import domain_list -from XenKvmLib.const import get_provider_version +from XenKvmLib.xm_virt_util import domain_list, net_list +from XenKvmLib.const import get_provider_version, default_network_name from CimTest.Globals import logger, CIM_USER, CIM_PASS, CIM_NS, \ CIM_ERROR_ENUMERATE +from XenKvmLib.common_util import destroy_netpool # Migration constants CIM_MIGRATE_OFFLINE=1 @@ -94,9 +95,14 @@ def check_mig_support(virt, options): s_sysname = gethostbyaddr(options.ip)[0] t_sysname = gethostbyaddr(options.t_url)[0] - if virt == 'KVM' and (t_sysname == s_sysname or t_sysname in s_sysname): - logger.info("Libvirt does not support local migration for KVM") - return SKIP, s_sysname, t_sysname + + if t_sysname == s_sysname or t_sysname in s_sysname: + if virt == 'KVM': + logger.info("Libvirt does not support local migration for KVM") + return SKIP, s_sysname, t_sysname + + #localhost migration is supported by Xen + return PASS, s_sysname, "localhost" return PASS, s_sysname, t_sysname @@ -364,12 +370,15 @@ logger.error("Guest to be migrated not specified.") return FAIL + if t_sysname == "localhost": + remote_migrate = 0 + try: if remote_migrate == 1: - status, req_image, backup_image = remote_copy_guest_image(virt, - s_sysname, - t_sysname, - guest_name) + status, req_image, bkup_image = remote_copy_guest_image(virt, + s_sysname, + t_sysname, + guest_name) if status != PASS: raise Exception("Failure from remote_copy_guest_image()") @@ -397,7 +406,10 @@ logger.info("Migrating '%s'.. this will take some time.", guest_name) # Migrate the guest to t_sysname - status, ret = migrate_guest_to_host(vsmservice, guest_ref, t_sysname, msd) + status, ret = migrate_guest_to_host(vsmservice, + guest_ref, + t_sysname, + msd) if status == FAIL: raise Exception("Failed to Migrate guest '%s' from '%s' to '%s'" \ % (guest_name, s_sysname, t_sysname)) @@ -413,5 +425,50 @@ logger.error("Exception details %s", details) status = FAIL - cleanup_image(backup_image, req_image, t_sysname, remote_migrate=1) + if remote_migrate == 1: + cleanup_image(bkup_image, req_image, t_sysname, remote_migrate=1) + return status + +def cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname): + # Clean the domain on target machine. + # This is req when migration is successful, also when migration is not + # completely successful VM might be created on the target machine + # and hence need to clean. + target_list = domain_list(t_sysname, virt) + if target_list != None and test_dom in target_list: + ret_value = cxml.cim_destroy(t_sysname) + if not ret_value: + logger.info("Failed to destroy the migrated domain '%s' on '%s'", + test_dom, t_sysname) + + ret_value = cxml.undefine(t_sysname) + if not ret_value: + logger.info("Failed to undefine the migrated domain '%s' on '%s'", + test_dom, t_sysname) + + # Done cleaning environment + if t_sysname == "localhost": + return + + # Remote Migration not Successful, clean the domain on src machine + src_list = domain_list(s_sysname, virt) + if src_list != None and test_dom in src_list: + ret_value = cxml.cim_destroy(s_sysname) + if not ret_value: + logger.info("Failed to destroy the domain '%s' on the source '%s'", + test_dom, s_sysname) + + ret_value = cxml.undefine(s_sysname) + if not ret_value: + logger.info("Failed to undefine the domain '%s' on source '%s'", + test_dom, s_sysname) + + # clean the networkpool created on the remote machine + target_net_list = net_list(t_sysname, virt) + if target_net_list != None and default_network_name in target_net_list: + ret_value = destroy_netpool(t_sysname, virt, default_network_name) + if ret_value != PASS: + logger.info("Unable to destroy networkpool '%s' on '%s'", + default_network_name, t_sysname) + From kaitlin at linux.vnet.ibm.com Fri May 29 21:26:59 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 29 May 2009 14:26:59 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Modifying ResourcePool/01_enum.py to accomodate verifying Parent DiskPool and NetworkPool In-Reply-To: <30ce5799fb9abed46b52.1243430411@elm3a148.beaverton.ibm.com> References: <30ce5799fb9abed46b52.1243430411@elm3a148.beaverton.ibm.com> Message-ID: <4A205323.6060009@linux.vnet.ibm.com> Deepti B. Kalakeri wrote: > # HG changeset patch > # User Deepti B. Kalakeri > # Date 1243430368 25200 > # Node ID 30ce5799fb9abed46b528e70ad81b4242a801f05 > # Parent e5fd77170913c3819d667e240c9873efa3bf0d07 > [TEST] Modifying ResourcePool/01_enum.py to accomodate verifying Parent DiskPool and NetworkPool. > > Tested with KVM on F10 with current sources. > Signed-off-by: Deepti B. Kalakeri > > diff -r e5fd77170913 -r 30ce5799fb9a suites/libvirt-cim/cimtest/ResourcePool/01_enum.py > + if virt != 'LXC': > + dp = get_typed_class(virt, dp_cn) > + np = get_typed_class(virt, np_cn) > + cn_list.append(dp) > + cn_list.append(np) Even though containers don't have disks associated with them, you can create a diskpool with the lxc://system uri. And containers guests can have a network interface (and network pool) if the kernel supports it. Otherwise, this looks good! -- Kaitlin Rupert IBM Linux Technology Center kaitlin at linux.vnet.ibm.com From kaitlin at linux.vnet.ibm.com Fri May 29 21:39:53 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 29 May 2009 14:39:53 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Detect whether the revision is a distro defined value Message-ID: <46664d3f9e880c69ed43.1243633193@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1243633176 25200 # Node ID 46664d3f9e880c69ed43a06f36d0d4f33bd29f67 # Parent 1f0e581729dc63c083a510703b47d4bd457bd178 [TEST] Detect whether the revision is a distro defined value Even if the distro is SLES or RHEL, the providers might be installed from source. In which case, the revision value will be the hg revision value, and not the distro defined value. Signed-off-by: Kaitlin Rupert diff -r 1f0e581729dc -r 46664d3f9e88 suites/libvirt-cim/lib/XenKvmLib/const.py --- a/suites/libvirt-cim/lib/XenKvmLib/const.py Thu May 21 15:50:56 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/const.py Fri May 29 14:39:36 2009 -0700 @@ -148,11 +148,6 @@ def get_provider_version(virt, ip): - cmd = "cat /etc/issue | grep 'SUSE Linux Enterprise Server 11'" - rc, out = run_remote(ip, cmd) - if rc == 0: - return 0, sles11_changeset - conn = WBEMConnection('http://%s' % ip, (os.getenv('CIM_USER'), os.getenv('CIM_PASS')), os.getenv('CIM_NS')) @@ -167,6 +162,18 @@ if revision is None or changeset is None: return 0, "Unknown" + # This is a sloppy mechanism for detecting a distro defined revision value + distro = None + + cmd = "cat /etc/issue | grep 'SUSE Linux Enterprise Server 11'" + rc, out = run_remote(ip, cmd) + if rc == 0: + distro = "sles11" + + if revision.find(".") == 0: + if distro == "sles11": + return 0, sles11_changeset + revision = revision.strip("+") if revision.isdigit(): revision = int(revision) From kaitlin at linux.vnet.ibm.com Fri May 29 21:41:44 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 29 May 2009 14:41:44 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Add support for DiskPoolRASD / NetPoolRASD to get_exp_template_rasd_len() Message-ID: # HG changeset patch # User Kaitlin Rupert # Date 1243630742 25200 # Node ID a22bbefc05e5b294b254c58236b0f2abda65a23a # Parent 46664d3f9e880c69ed43a06f36d0d4f33bd29f67 [TEST] Add support for DiskPoolRASD / NetPoolRASD to get_exp_template_rasd_len() Also update HostSystem 03 to support DiskPoolRASD / NetPoolRASD Signed-off-by: Kaitlin Rupert diff -r 46664d3f9e88 -r a22bbefc05e5 suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py --- a/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py Fri May 29 14:39:36 2009 -0700 +++ b/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py Fri May 29 13:59:02 2009 -0700 @@ -204,8 +204,10 @@ else: rtype = { "%s_DiskResourceAllocationSettingData" % virt : 17, \ + "%s_DiskPoolResourceAllocationSettingData" % virt : 17, \ "%s_MemResourceAllocationSettingData" % virt : 4, \ "%s_NetResourceAllocationSettingData" % virt : 10, \ + "%s_NetPoolResourceAllocationSettingData" % virt : 10, \ "%s_ProcResourceAllocationSettingData" % virt : 3 } try: diff -r 46664d3f9e88 -r a22bbefc05e5 suites/libvirt-cim/lib/XenKvmLib/rasd.py --- a/suites/libvirt-cim/lib/XenKvmLib/rasd.py Fri May 29 14:39:36 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/rasd.py Fri May 29 13:59:02 2009 -0700 @@ -304,45 +304,76 @@ return rasd_insts, PASS -def get_exp_template_rasd_len(virt, ip, id): +def get_exp_disk_rasd_len(virt, ip, rev, id): libvirt_rasd_template_changes = 707 libvirt_rasd_new_changes = 805 libvirt_rasd_dpool_changes = 839 - curr_cim_rev, changeset = get_provider_version(virt, ip) - # For Diskpool, we have info 1 for each of Min, Max, Default, and Incr exp_base_num = 4 exp_cdrom = 4 - exp_len = exp_base_num + exp_len = exp_base_num - if 'DiskPool' in id: - if virt == 'Xen' or virt == 'XenFV': - # For Xen and XenFV, there is a template for PV and FV, so you - # end up with double the number of templates - xen_multi = 2 + if id == "DiskPool/0": + pool_types = 3 + return exp_base_num * pool_types + + if virt == 'Xen' or virt == 'XenFV': + # For Xen and XenFV, there is a template for PV and FV, so you + # end up with double the number of templates + xen_multi = 2 - if curr_cim_rev >= libvirt_rasd_template_changes and \ - curr_cim_rev < libvirt_rasd_new_changes: - exp_len = exp_base_num + exp_cdrom + if rev >= libvirt_rasd_template_changes and \ + rev < libvirt_rasd_new_changes: + exp_len = exp_base_num + exp_cdrom - elif curr_cim_rev >= libvirt_rasd_new_changes and \ - curr_cim_rev < libvirt_rasd_dpool_changes: - exp_len = (exp_base_num + exp_cdrom) * xen_multi + elif rev >= libvirt_rasd_new_changes and \ + rev < libvirt_rasd_dpool_changes: + exp_len = (exp_base_num + exp_cdrom) * xen_multi - elif curr_cim_rev >= libvirt_rasd_dpool_changes: - volumes = enum_volumes(virt, ip) - exp_len = ((volumes * exp_base_num) + exp_cdrom) * xen_multi + elif rev >= libvirt_rasd_dpool_changes: + volumes = enum_volumes(virt, ip) + exp_len = ((volumes * exp_base_num) + exp_cdrom) * xen_multi - elif virt == 'KVM': - if curr_cim_rev >= libvirt_rasd_new_changes and \ - curr_cim_rev < libvirt_rasd_dpool_changes: - exp_len = exp_base_num + exp_cdrom + elif virt == 'KVM': + if rev >= libvirt_rasd_new_changes and \ + rev < libvirt_rasd_dpool_changes: + exp_len = exp_base_num + exp_cdrom - elif curr_cim_rev >= libvirt_rasd_dpool_changes: - volumes = enum_volumes(virt, ip) - exp_len = (volumes * exp_base_num) + exp_cdrom + elif rev >= libvirt_rasd_dpool_changes: + volumes = enum_volumes(virt, ip) + exp_len = (volumes * exp_base_num) + exp_cdrom return exp_len +def get_exp_net_rasd_len(virt, rev, id): + net_rasd_template_changes = 861 + + exp_base_num = 4 + + if id == "NetworkPool/0": + pool_types = 3 + forward_modes = 2 + + return (exp_base_num * pool_types) + (exp_base_num * forward_modes) + + if rev >= net_rasd_template_changes: + dev_types = 2 + + return exp_base_num * dev_types + +def get_exp_template_rasd_len(virt, ip, id): + curr_cim_rev, changeset = get_provider_version(virt, ip) + + exp_len = 4 + + if 'DiskPool' in id: + exp_len = get_exp_disk_rasd_len(virt, ip, curr_cim_rev, id) + + elif 'NetworkPool' in id: + exp_len = get_exp_net_rasd_len(virt, curr_cim_rev, id) + + return exp_len + + From kaitlin at linux.vnet.ibm.com Fri May 29 22:12:16 2009 From: kaitlin at linux.vnet.ibm.com (Kaitlin Rupert) Date: Fri, 29 May 2009 15:12:16 -0700 Subject: [Libvirt-cim] [PATCH] [TEST] Update RedirectionSerivce 01 to work with recent schema changes Message-ID: <7dd85abd05e44f4a1c31.1243635136@localhost.localdomain> # HG changeset patch # User Kaitlin Rupert # Date 1243635120 25200 # Node ID 7dd85abd05e44f4a1c319e7284205a8365f56f5e # Parent a22bbefc05e5b294b254c58236b0f2abda65a23a [TEST] Update RedirectionSerivce 01 to work with recent schema changes Signed-off-by: Kaitlin Rupert diff -r a22bbefc05e5 -r 7dd85abd05e4 suites/libvirt-cim/cimtest/RedirectionService/01_enum_crs.py --- a/suites/libvirt-cim/cimtest/RedirectionService/01_enum_crs.py Fri May 29 13:59:02 2009 -0700 +++ b/suites/libvirt-cim/cimtest/RedirectionService/01_enum_crs.py Fri May 29 15:12:00 2009 -0700 @@ -40,6 +40,7 @@ REDIRECTION_SER_TYPE = 3 CRS_MAX_SAP_REV = 724 libvirtcim_hr_crs_changes = 688 +schema_upgrade_change = 881 sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] @do_main(sup_types) @@ -88,9 +89,13 @@ 'EnabledState' : 2, 'EnabledDefault' : 2, 'RequestedState' : 12, - 'MaxConcurrentEnabledSAPs': max_sap_sessions } + if curr_cim_rev < schema_upgrade_change: + crs_list['MaxConcurrentEnabledSAPs'] = max_sap_sessions + else: + crs_list['MaxCurrentEnabledSAPs'] = max_sap_sessions + try: crs = EnumInstances(server, classname) except Exception, detail: From rmaciel at linux.vnet.ibm.com Sat May 30 17:30:44 2009 From: rmaciel at linux.vnet.ibm.com (Richard Maciel) Date: Sat, 30 May 2009 14:30:44 -0300 Subject: [Libvirt-cim] [PATCH] [CU][RFC](#2) Added lexer/parser support for string array properties Message-ID: <0b79dd93e4ede362b4da.1243704644@localhost.localdomain> # HG changeset patch # User Richard Maciel # Date 1243702984 10800 # Node ID 0b79dd93e4ede362b4da5bb83dba8c6bca0ab783 # Parent 1cb3975921d590d4dda4de197a4dc687e45d1840 Fixes bug #53409 To accomplish this, the lexer and the parser were changed to accept array of strings values for properties Signed-off-by: Richard Maciel diff -r 1cb3975921d5 -r 0b79dd93e4ed eo_util_lexer.l --- a/eo_util_lexer.l Mon Apr 27 16:11:32 2009 -0700 +++ b/eo_util_lexer.l Sat May 30 14:03:04 2009 -0300 @@ -4,6 +4,7 @@ * Authors: * Gareth Bestor * Dan Smith + * Richard Maciel * */ /*** WARNING - COMMENTS IN LEX MUST BE TAB INDENTED ***/ @@ -91,7 +92,18 @@ return(STRING); } - /* Classname */ +\{ { + return(OPENBRACKET); + } + +\} { + return(CLOSEBRACKET); + } + +\, { + return(COMMA); + } + /* NOTE - this rule only applies after a 'INSTANCE OF' has been read in */ [A-Za-z][A-Za-z0-9_]* { BEGIN INITIAL; /* Go back to normal parsing rules now */ diff -r 1cb3975921d5 -r 0b79dd93e4ed eo_util_parser.y --- a/eo_util_parser.y Mon Apr 27 16:11:32 2009 -0700 +++ b/eo_util_parser.y Sat May 30 14:03:04 2009 -0300 @@ -4,6 +4,7 @@ * Authors: * Gareth Bestor * Dan Smith + * Richard Maciel */ /* DEFINITIONS SECTION */ @@ -20,15 +21,24 @@ /* specify prototypes to get rid of warnings */ int eo_parse_lex (void); void eo_parse_error(char *); +inline void ins_chars_into_cmstr_arr(const CMPIBroker *broker, + CMPIArray *arr, + CMPICount index, + char *str); #define RC_OK 0 #define RC_EOF EOF #define RC_INVALID_CLASS -1000 +#define EODEBUG 1 /* DEFINE ANY GLOBAL VARS HERE */ static const CMPIBroker * _BROKER; static CMPIInstance ** _INSTANCE; static const char * _NAMESPACE; +static CMPICount stringarraysize; +static char **stringarray; +static char *stringarraypropname; + #ifdef EODEBUG #define EOTRACE(fmt, arg...) fprintf(stderr, fmt, ##arg) @@ -36,6 +46,7 @@ #define EOTRACE(fmt, arg...) #endif + int eo_parse_parseinstance(const CMPIBroker *broker, CMPIInstance **instance, const char *ns); @@ -52,7 +63,7 @@ } /* Define simple (untyped) lexical tokens */ -%token INSTANCE OF ENDOFFILE +%token INSTANCE OF ENDOFFILE OPENBRACKET CLOSEBRACKET COMMA /* Define lexical tokens that return a value and their return type */ %token CLASS @@ -67,7 +78,7 @@ /* Rules section */ instance: /* empty */ - | INSTANCE OF CLASS '{' + | INSTANCE OF CLASS OPENBRACKET { EOTRACE("classname = %s\n",$3); CMPIObjectPath *op; @@ -82,7 +93,7 @@ return RC_INVALID_CLASS; free($3); } - properties '}' ';' + properties CLOSEBRACKET ';' { /* Return after reading in each instance */ return RC_OK; @@ -127,7 +138,17 @@ CMSetProperty(*_INSTANCE, $1, &($3), CMPI_boolean); free($1); } + | PROPERTYNAME '=' OPENBRACKET + { + EOTRACE("propertyname = %s\n" + "\ttype = CMPI_charsA\n", + $1); + stringarraysize = 0; + stringarraypropname = $1; + } + arrayofstrings CLOSEBRACKET ';' + | PROPERTYNAME '=' CIMNULL ';' { EOTRACE("propertyname = %s\n" @@ -136,11 +157,86 @@ } ; +arrayofstrings: STRING + { + EOTRACE("BootDevices[%u]=%s\n",stringarraysize, $1); + + stringarraysize++; + stringarray = (char **)realloc(stringarray, + sizeof(char *) * + stringarraysize); + stringarray[stringarraysize-1] = $1; + } + COMMA arrayofstrings + + + | STRING + { + CMPIArray *arr; + CMPICount i; + CMPIStatus s; + + EOTRACE("\tBootDevices[%u]=%s\n",stringarraysize, $1); + + stringarraysize++; + + arr = CMNewArray(_BROKER, + stringarraysize, + CMPI_string, + &s); + if (s.rc != CMPI_RC_OK || CMIsNullObject(arr)) + EOTRACE("Error creating array\n"); + + // Values to stringarraysize - 2 are in the + // temporary array + for (i = 0; i < stringarraysize - 1; i++) { + ins_chars_into_cmstr_arr(_BROKER, + arr, + i, + stringarray[i]); + + free(stringarray[i]); + } + + ins_chars_into_cmstr_arr(_BROKER, + arr, + stringarraysize - 1, + $1); + + free($1); + + CMSetProperty(*_INSTANCE, + stringarraypropname, + &arr, + CMPI_stringA); + + free(stringarraypropname); + } + ; + /* END OF RULES SECTION */ %% /* USER SUBROUTINE SECTION */ +inline void ins_chars_into_cmstr_arr(const CMPIBroker *broker, + CMPIArray *arr, + CMPICount index, + char *str) +{ + CMPIString *cm_str; + CMPIStatus s; + + cm_str = CMNewString(_BROKER, str, &s); + if (s.rc != CMPI_RC_OK || CMIsNullObject(cm_str)) + EOTRACE("Error creating CMPIString"); + + s = CMSetArrayElementAt(arr, index, &cm_str, CMPI_string); + if (s.rc != CMPI_RC_OK) + EOTRACE("Error setting array element %u\n" + "Error code: %d\n", index, s.rc); +} + int eo_parse_parseinstance(const CMPIBroker *broker, CMPIInstance **instance, const char *ns)