[libvirt] [PATCH v2 24/27] qemu: Replace checking for vcpu<->pid mapping availability with a helper

Peter Krempa pkrempa at redhat.com
Fri Nov 27 16:16:49 UTC 2015


Add qemuDomainHasVCpuPids to do the checking and replace in place checks
with it.

We no longer need checking whether the thread contains fake data
(vcpupids[0] == vm->pid) as in b07f3d821dfb11a118ee75ea275fd6ab737d9500
and 65686e5a81d654d834d338fceeaf0229b2ca4f0d this was removed.
---
 src/qemu/qemu_cgroup.c  |  7 ++-----
 src/qemu/qemu_domain.c  | 15 +++++++++++++++
 src/qemu/qemu_domain.h  |  2 ++
 src/qemu/qemu_driver.c  | 29 +++++++++++++----------------
 src/qemu/qemu_process.c |  7 ++++---
 5 files changed, 36 insertions(+), 24 deletions(-)

diff --git a/src/qemu/qemu_cgroup.c b/src/qemu/qemu_cgroup.c
index fa0b97b..a9cf9e8 100644
--- a/src/qemu/qemu_cgroup.c
+++ b/src/qemu/qemu_cgroup.c
@@ -1000,12 +1000,9 @@ qemuSetupCgroupForVcpu(virDomainObjPtr vm)
         !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET))
         return 0;

-    if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
-        /* If we don't know VCPU<->PID mapping or all vcpu runs in the same
-         * thread, we cannot control each vcpu.
-         */
+    /* If vCPU<->pid mapping is missing we can't do vCPU pinning */
+    if (!qemuDomainHasVcpuPids(vm))
         return 0;
-    }

     if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 &&
         mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index c7687b7..32ee5de 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -4098,3 +4098,18 @@ qemuDomainRequiresMlock(virDomainDefPtr def)

     return false;
 }
+
+
+/**
+ * qemuDomainHasVcpuPids:
+ * @vm: Domain object
+ *
+ * Returns true if we were able to successfully detect vCPU pids for the VM.
+ */
+bool
+qemuDomainHasVcpuPids(virDomainObjPtr vm)
+{
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+
+    return priv->nvcpupids > 0;
+}
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 31c7d33..5e2b699 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -505,4 +505,6 @@ int qemuDomainDefValidateMemoryHotplug(const virDomainDef *def,
                                        virQEMUCapsPtr qemuCaps,
                                        const virDomainMemoryDef *mem);

+bool qemuDomainHasVcpuPids(virDomainObjPtr vm);
+
 #endif /* __QEMU_DOMAIN_H__ */
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 5c3703f..3b3761a 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -1428,7 +1428,7 @@ qemuDomainHelperGetVcpus(virDomainObjPtr vm, virVcpuInfoPtr info, int maxinfo,
     size_t i, v;
     qemuDomainObjPrivatePtr priv = vm->privateData;

-    if (priv->vcpupids == NULL) {
+    if (!qemuDomainHasVcpuPids(vm)) {
         virReportError(VIR_ERR_OPERATION_INVALID,
                        "%s", _("cpu affinity is not supported"));
         return -1;
@@ -5118,7 +5118,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
     }

     if (def) {
-        if (priv->vcpupids == NULL) {
+        if (!qemuDomainHasVcpuPids(vm)) {
             virReportError(VIR_ERR_OPERATION_INVALID,
                            "%s", _("cpu affinity is not supported"));
             goto endjob;
@@ -10287,21 +10287,18 @@ qemuSetVcpusBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
     if (period == 0 && quota == 0)
         return 0;

-    /* If we does not know VCPU<->PID mapping or all vcpu runs in the same
-     * thread, we cannot control each vcpu. So we only modify cpu bandwidth
-     * when each vcpu has a separated thread.
-     */
-    if (priv->nvcpupids != 0 && priv->vcpupids[0] != vm->pid) {
-        for (i = 0; i < priv->nvcpupids; i++) {
-            if (virCgroupNewThread(cgroup, VIR_CGROUP_THREAD_VCPU, i,
-                                   false, &cgroup_vcpu) < 0)
-                goto cleanup;
+    if (!qemuDomainHasVcpuPids(vm))
+        return 0;

-            if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0)
-                goto cleanup;
+    for (i = 0; i < priv->nvcpupids; i++) {
+        if (virCgroupNewThread(cgroup, VIR_CGROUP_THREAD_VCPU, i,
+                               false, &cgroup_vcpu) < 0)
+            goto cleanup;

-            virCgroupFree(&cgroup_vcpu);
-        }
+        if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0)
+            goto cleanup;
+
+        virCgroupFree(&cgroup_vcpu);
     }

     return 0;
@@ -10604,7 +10601,7 @@ qemuGetVcpusBWLive(virDomainObjPtr vm,
     int ret = -1;

     priv = vm->privateData;
-    if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
+    if (!qemuDomainHasVcpuPids(vm)) {
         /* We do not create sub dir for each vcpu */
         rc = qemuGetVcpuBWLive(priv->cgroup, period, quota);
         if (rc < 0)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index a3ddb4a..2de2248 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -2240,12 +2240,13 @@ qemuProcessSetVcpuAffinities(virDomainObjPtr vm)
     virDomainPinDefPtr pininfo;
     int n;
     int ret = -1;
-    VIR_DEBUG("Setting affinity on CPUs nvcpupin=%zu nvcpus=%d nvcpupids=%d",
-              def->cputune.nvcpupin, virDomainDefGetVcpus(def), priv->nvcpupids);
+    VIR_DEBUG("Setting affinity on CPUs nvcpupin=%zu nvcpus=%d hasVcpupids=%d",
+              def->cputune.nvcpupin, virDomainDefGetVcpus(def),
+              qemuDomainHasVcpuPids(vm));
     if (!def->cputune.nvcpupin)
         return 0;

-    if (priv->vcpupids == NULL) {
+    if (!qemuDomainHasVcpuPids(vm)) {
         /* If any CPU has custom affinity that differs from the
          * VM default affinity, we must reject it
          */
-- 
2.6.2




More information about the libvir-list mailing list