[libvirt] [PATCH 3/5] Support CPU placement in LXC driver

Daniel P. Berrange berrange at redhat.com
Thu Nov 10 14:30:54 UTC 2011


From: "Daniel P. Berrange" <berrange at redhat.com>

While LXC does not have the concept of VCPUS, so we cann't do
per-VCPU pCPU placement, we can support the VM level CPU
placement. Todo this simply set the CPU affinity of the LXC
controller at startup. All child processes will inherit this
affinity.

* src/lxc/lxc_controller.c: Set process affinity
---
 src/lxc/lxc_controller.c |   62 ++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 62 insertions(+), 0 deletions(-)

diff --git a/src/lxc/lxc_controller.c b/src/lxc/lxc_controller.c
index 4f2326b..2c387a5 100644
--- a/src/lxc/lxc_controller.c
+++ b/src/lxc/lxc_controller.c
@@ -65,6 +65,8 @@
 #include "virfile.h"
 #include "virpidfile.h"
 #include "command.h"
+#include "processinfo.h"
+#include "nodeinfo.h"
 
 #define VIR_FROM_THIS VIR_FROM_LXC
 
@@ -323,6 +325,63 @@ static int lxcSetContainerNUMAPolicy(virDomainDefPtr def)
 #endif
 
 
+/*
+ * To be run while still single threaded
+ */
+static int lxcSetContainerCpuAffinity(virDomainDefPtr def)
+{
+    int i, hostcpus, maxcpu = CPU_SETSIZE;
+    virNodeInfo nodeinfo;
+    unsigned char *cpumap;
+    int cpumaplen;
+
+    VIR_DEBUG("Setting CPU affinity");
+
+    if (nodeGetInfo(NULL, &nodeinfo) < 0)
+        return -1;
+
+    /* setaffinity fails if you set bits for CPUs which
+     * aren't present, so we have to limit ourselves */
+    hostcpus = VIR_NODEINFO_MAXCPUS(nodeinfo);
+    if (maxcpu > hostcpus)
+        maxcpu = hostcpus;
+
+    cpumaplen = VIR_CPU_MAPLEN(maxcpu);
+    if (VIR_ALLOC_N(cpumap, cpumaplen) < 0) {
+        virReportOOMError();
+        return -1;
+    }
+
+    if (def->cpumask) {
+        /* XXX why don't we keep 'cpumask' in the libvirt cpumap
+         * format to start with ?!?! */
+        for (i = 0 ; i < maxcpu && i < def->cpumasklen ; i++)
+            if (def->cpumask[i])
+                VIR_USE_CPU(cpumap, i);
+    } else {
+        /* You may think this is redundant, but we can't assume libvirtd
+         * itself is running on all pCPUs, so we need to explicitly set
+         * the spawned QEMU instance to all pCPUs if no map is given in
+         * its config file */
+        for (i = 0 ; i < maxcpu ; i++)
+            VIR_USE_CPU(cpumap, i);
+    }
+
+    /* We are pressuming we are running between fork/exec of QEMU
+     * so use '0' to indicate our own process ID. No threads are
+     * running at this point
+     */
+    if (virProcessInfoSetAffinity(0, /* Self */
+                                  cpumap, cpumaplen, maxcpu) < 0) {
+        VIR_FREE(cpumap);
+        return -1;
+    }
+    VIR_FREE(cpumap);
+
+    return 0;
+}
+
+
 static int lxcSetContainerCpuTune(virCgroupPtr cgroup, virDomainDefPtr def)
 {
     int ret = -1;
@@ -541,6 +600,9 @@ static int lxcSetContainerResources(virDomainDefPtr def)
      * the cgroup, otherwise  the 'cpuset' controller
      * will reset the values we've just defined!
      */
+    if (lxcSetContainerCpuAffinity(def) < 0)
+        goto cleanup;
+
     if (lxcSetContainerNUMAPolicy(def) < 0)
         goto cleanup;
 
-- 
1.7.6.4




More information about the libvir-list mailing list