[libvirt] [PATCH 1/3] numad: Convert node list to cpumap before setting affinity

Osier Yang jyang at redhat.com
Wed Apr 11 14:40:32 UTC 2012


Instead of returning a CPUs list, numad returns NUMA node
list instead, this patch is to convert the node list to
cpumap before affinity setting. Otherwise, the domain
processes will be pinned only to CPU[$numa_cell_num],
which will cause significiant performance losing.

Also because numad will balance the affinity dynamically,
reflecting the cpuset from numad back doesn't make much
sense then, and it may just could produce confusion for
users' eyes. Thus the better way is not to reflect it back
to XML. And in this case, it's better to ignore the cpuset
when parsing XML.

The codes to update the cpuset is removed in this patch
incidentally, and there will be a follow up patch to ignore
the manually specified "cpuset" if "placement" is "auto",
and document will be updated too.

---
The patch is tested on a NUMA box with 4 cells, 24 CPUs.
---
 src/qemu/qemu_process.c |   33 ++++++++++++++++++++-------------
 1 files changed, 20 insertions(+), 13 deletions(-)

diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 481b4f3..0bf743b 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -1819,36 +1819,43 @@ qemuProcessInitCpuAffinity(struct qemud_driver *driver,
     }
 
     if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) {
-        char *tmp_cpumask = NULL;
         char *nodeset = NULL;
+        char *nodemask = NULL;
 
         nodeset = qemuGetNumadAdvice(vm->def);
         if (!nodeset)
             return -1;
 
-        if (VIR_ALLOC_N(tmp_cpumask, VIR_DOMAIN_CPUMASK_LEN) < 0) {
+        if (VIR_ALLOC_N(nodemask, VIR_DOMAIN_CPUMASK_LEN) < 0) {
             virReportOOMError();
             return -1;
         }
 
-        if (virDomainCpuSetParse(nodeset, 0, tmp_cpumask,
+        if (virDomainCpuSetParse(nodeset, 0, nodemask,
                                  VIR_DOMAIN_CPUMASK_LEN) < 0) {
-            VIR_FREE(tmp_cpumask);
+            VIR_FREE(nodemask);
             VIR_FREE(nodeset);
             return -1;
         }
 
-        for (i = 0; i < maxcpu && i < VIR_DOMAIN_CPUMASK_LEN; i++) {
-            if (tmp_cpumask[i])
-                VIR_USE_CPU(cpumap, i);
+        /* numad returns the NUMA node list, convert it to cpumap */
+        int prev_total_ncpus = 0;
+        for (i = 0; i < driver->caps->host.nnumaCell; i++) {
+            int j;
+            int cur_ncpus = driver->caps->host.numaCell[i]->ncpus;
+            if (nodemask[i]) {
+                for (j = prev_total_ncpus;
+                     j < cur_ncpus + prev_total_ncpus &&
+                     j < maxcpu &&
+                     j < VIR_DOMAIN_CPUMASK_LEN;
+                     j++) {
+                    VIR_USE_CPU(cpumap, j);
+                }
+            }
+            prev_total_ncpus += cur_ncpus;
         }
 
-        VIR_FREE(vm->def->cpumask);
-        vm->def->cpumask = tmp_cpumask;
-        if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
-            VIR_WARN("Unable to save status on vm %s after state change",
-                     vm->def->name);
-        }
+        VIR_FREE(nodemask);
         VIR_FREE(nodeset);
     } else {
         if (vm->def->cpumask) {
-- 
1.7.1




More information about the libvir-list mailing list