[libvirt] [PATCH 6/7] qemu: Add support to set IOThreads count

John Ferlan jferlan at redhat.com
Thu Mar 19 17:08:27 UTC 2015


Add qemuDomainSetIOThreads in order to adjust the live and config IOThread
values for the domain similar to the qemuDomainSetVcpus implementation.

The qemuDomainHotplugIOThread will more or less mirror qemuDomainHotplugVcpus
including usage of the recently extracted API's to manage the cgroup and
adding pinning if the domain has a cpuset.

Unlike the Vcpu code, IOThreads are implemented by qmp objects. Forcing the
HotplugIOThread code to add/delete one at a time to/from the end of the list
rather than attempting to batch add/delete more than one and attempting to
manage/determine which failed leaving potential holes unsequenced id numbers.

Signed-off-by: John Ferlan <jferlan at redhat.com>
---
 src/conf/domain_audit.c  |   9 ++
 src/conf/domain_audit.h  |   6 +
 src/libvirt_private.syms |   1 +
 src/qemu/qemu_driver.c   | 331 +++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 347 insertions(+)

diff --git a/src/conf/domain_audit.c b/src/conf/domain_audit.c
index 159ebf5..92fcdd3 100644
--- a/src/conf/domain_audit.c
+++ b/src/conf/domain_audit.c
@@ -790,6 +790,15 @@ virDomainAuditVcpu(virDomainObjPtr vm,
     return virDomainAuditResource(vm, "vcpu", oldvcpu, newvcpu, reason, success);
 }
 
+void
+virDomainAuditIOThread(virDomainObjPtr vm,
+                       unsigned int oldiothread, unsigned int newiothread,
+                       const char *reason, bool success)
+{
+    return virDomainAuditResource(vm, "iothread", oldiothread, newiothread,
+                                  reason, success);
+}
+
 static void
 virDomainAuditLifecycle(virDomainObjPtr vm, const char *op,
                         const char *reason, bool success)
diff --git a/src/conf/domain_audit.h b/src/conf/domain_audit.h
index 4c1ef90..97dadca 100644
--- a/src/conf/domain_audit.h
+++ b/src/conf/domain_audit.h
@@ -102,6 +102,12 @@ void virDomainAuditVcpu(virDomainObjPtr vm,
                         const char *reason,
                         bool success)
     ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(4);
+void virDomainAuditIOThread(virDomainObjPtr vm,
+                            unsigned int oldiothread,
+                            unsigned int newiothread,
+                            const char *reason,
+                            bool success)
+    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(4);
 void virDomainAuditSecurityLabel(virDomainObjPtr vm,
                                  bool success)
     ATTRIBUTE_NONNULL(1);
diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index ca3520d..0d09c23 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -119,6 +119,7 @@ virDomainAuditDisk;
 virDomainAuditFS;
 virDomainAuditHostdev;
 virDomainAuditInit;
+virDomainAuditIOThread;
 virDomainAuditMemory;
 virDomainAuditNet;
 virDomainAuditNetDevice;
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 1fca43c..dbd7ff5 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -6182,6 +6182,336 @@ qemuDomainPinIOThread(virDomainPtr dom,
     return ret;
 }
 
+static int
+qemuDomainHotplugIOThread(virQEMUDriverPtr driver,
+                          virDomainObjPtr vm,
+                          unsigned int req_niothreads)
+{
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+    char *alias = NULL;
+    size_t i;
+    int rc = -1;
+    int ret = -1;
+    unsigned int old_niothreads = vm->def->iothreads;
+    int new_niothreads = 0;
+    qemuMonitorIOThreadsInfoPtr *new_iothreads = NULL;
+    virCgroupPtr cgroup_iothread = NULL;
+    char *mem_mask = NULL;
+
+    qemuDomainObjEnterMonitor(driver, vm);
+
+    if (!virDomainObjIsActive(vm)) {
+        virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+                       _("cannot change IOThreads for an inactive domain"));
+        goto exit_monitor;
+    }
+
+    /* Only allow to hot add or remove one IOThread at a time forcing
+     * the caller to "do the right thing" especially with respect to
+     * deletion since we're dealing with unsigned int math. Getting
+     * this error probably proves the caller used some bad math.
+     */
+    if (req_niothreads > old_niothreads + 1 ||
+        (old_niothreads && req_niothreads < old_niothreads - 1)) {
+        virReportError(VIR_ERR_INVALID_ARG,
+                       _("only allowed to hot add or remove one IOThread "
+                         "at a time for the domain, current count is: %d"),
+                       old_niothreads);
+        goto exit_monitor;
+    }
+
+    if (req_niothreads > old_niothreads) {
+        /* Adding to end - new alias is req_niothreads */
+        if (virAsprintf(&alias, "iothread%u", req_niothreads) < 0)
+            goto exit_monitor;
+
+        rc = qemuMonitorAddObject(priv->mon, "iothread", alias, NULL);
+    } else {
+        /* Removing from end - alias is current old_niothreads */
+        if (virAsprintf(&alias, "iothread%u", old_niothreads) < 0)
+            goto exit_monitor;
+
+        rc = qemuMonitorDelObject(priv->mon, alias);
+    }
+
+    if (rc < 0)
+        goto exit_monitor;
+
+    /* Although conceptually this could be done later - at this point,
+     * the running VM either has a new IOThread or has one removed.
+     * Since decisions depend upon knowing how many threads are in the
+     * running vm, adjusting this now that we've theoretically added or
+     * removed a thread.
+     */
+    if (virDomainObjIsActive(vm))
+        vm->def->iothreads = req_niothreads;
+
+    /* After hotplugging the IOThreads we need to re-detect the
+     * IOThreads thread_id's, adjust the cgroups, thread affinity,
+     * and the priv->iothreadpids list.
+     */
+    if ((new_niothreads = qemuMonitorGetIOThreads(priv->mon,
+                                                  &new_iothreads)) < 0) {
+        virResetLastError();
+        goto exit_monitor;
+    }
+
+    /* ohhh something went wrong */
+    if (new_niothreads != req_niothreads) {
+        virReportError(VIR_ERR_INTERNAL_ERROR,
+                       _("got wrong number of IOThread ids from QEMU monitor. "
+                         "got %d, wanted %d"),
+                       new_niothreads, req_niothreads);
+
+        /* Let's set this too - just to be correct */
+        vm->def->iothreads = new_niothreads;
+        goto exit_monitor;
+    }
+
+    if (req_niothreads > old_niothreads) {
+        /* Add our thread_id to the list of thread id's */
+        if (VIR_REALLOC_N(priv->iothreadpids, priv->niothreadpids + 1) < 0)
+            goto exit_monitor;
+        priv->iothreadpids[priv->niothreadpids] =
+           new_iothreads[new_niothreads - 1]->thread_id;
+        priv->niothreadpids++;
+    } else {
+        /* Remove our thread_id from the list of thread id's */
+        priv->iothreadpids[priv->niothreadpids - 1] = 0;
+        if (VIR_REALLOC_N(priv->iothreadpids, priv->niothreadpids - 1) < 0)
+            goto exit_monitor;
+        priv->niothreadpids--;
+    }
+
+    if (qemuDomainObjExitMonitor(driver, vm) < 0)
+        goto cleanup;
+
+    if (virDomainNumatuneGetMode(vm->def->numa, -1) ==
+        VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
+        virDomainNumatuneMaybeFormatNodeset(vm->def->numa,
+                                            priv->autoNodeset,
+                                            &mem_mask, -1) < 0)
+        goto cleanup;
+
+    /* Add */
+    if (req_niothreads > old_niothreads) {
+        unsigned int thread_id = new_iothreads[new_niothreads - 1]->thread_id;
+
+        /* Add IOThread to cgroup if present */
+        if (priv->cgroup) {
+            cgroup_iothread = qemuDomainHotplugAddCgroup(priv->cgroup,
+                                                         virCgroupNewIOThread,
+                                                         new_niothreads,
+                                                         mem_mask,
+                                                         thread_id);
+            if (!cgroup_iothread)
+                goto cleanup;
+        }
+
+        /* Inherit def->cpuset */
+        if (vm->def->cpumask) {
+            if (qemuDomainHotplugAddPin(vm->def->cpumask,
+                                        new_niothreads,
+                                        thread_id,
+                                        &vm->def->cputune.iothreadspin,
+                                        &vm->def->cputune.niothreadspin,
+                                        qemuSetupCgroupIOThreadsPin,
+                                        cgroup_iothread) < 0)
+                goto cleanup;
+        }
+    } else { /* Delete */
+        /* Remove the cgroup and pin related links */
+        if (qemuDomainHotplugDelCgroupPin(priv->cgroup,
+                                          virCgroupNewIOThread,
+                                          old_niothreads,
+                                          &vm->def->cputune.iothreadspin,
+                                          &vm->def->cputune.niothreadspin) < 0)
+            goto cleanup;
+    }
+
+    ret = 0;
+
+ cleanup:
+    if (new_iothreads) {
+        for (i = 0; i < new_niothreads; i++)
+            qemuMonitorIOThreadsInfoFree(new_iothreads[i]);
+        VIR_FREE(new_iothreads);
+    }
+    VIR_FREE(mem_mask);
+    virDomainAuditIOThread(vm, old_niothreads, req_niothreads,
+                           "update", rc == 0);
+    if (cgroup_iothread)
+        virCgroupFree(&cgroup_iothread);
+    VIR_FREE(alias);
+    return ret;
+
+ exit_monitor:
+    ignore_value(qemuDomainObjExitMonitor(driver, vm));
+    goto cleanup;
+}
+
+static int
+qemuDomainSetIOThreads(virDomainPtr dom,
+                       unsigned int niothreads,
+                       unsigned int flags)
+{
+    virQEMUDriverPtr driver = dom->conn->privateData;
+    virDomainObjPtr vm = NULL;
+    virQEMUDriverConfigPtr cfg = NULL;
+    virCapsPtr caps = NULL;
+    qemuDomainObjPrivatePtr priv;
+    virCgroupPtr cgroup_temp = NULL;
+    virBitmapPtr all_nodes = NULL;
+    char *all_nodes_str = NULL;
+    char *mem_mask = NULL;
+    virDomainDefPtr persistentDef;
+    size_t i, j;
+    int ret = -1;
+
+    virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+                  VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+    if ((unsigned short) niothreads != niothreads) {
+        virReportError(VIR_ERR_INVALID_ARG,
+                       _("argument out of range: %d"), niothreads);
+        return -1;
+    }
+
+    if (!(vm = qemuDomObjFromDomain(dom)))
+        goto cleanup;
+
+    cfg = virQEMUDriverGetConfig(driver);
+
+    if (virDomainSetIOThreadsEnsureACL(dom->conn, vm->def, flags) < 0)
+        goto cleanup;
+
+    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
+        goto cleanup;
+
+    /* Same value? Nothing to do */
+    if (niothreads == vm->def->iothreads) {
+        ret = 0;
+        goto cleanup;
+    }
+
+    priv = vm->privateData;
+
+    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+        goto cleanup;
+
+    if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+        if (virCgroupNewEmulator(priv->cgroup, false, &cgroup_temp) < 0)
+            goto endjob;
+
+        if (!(all_nodes = virNumaGetHostNodeset()))
+            goto endjob;
+
+        if (!(all_nodes_str = virBitmapFormat(all_nodes)))
+            goto endjob;
+
+        if (virCgroupGetCpusetMems(cgroup_temp, &mem_mask) < 0 ||
+            virCgroupSetCpusetMems(cgroup_temp, all_nodes_str) < 0)
+            goto endjob;
+    }
+
+    if (virDomainLiveConfigHelperMethod(caps, driver->xmlopt, vm, &flags,
+                                        &persistentDef) < 0)
+        goto endjob;
+
+    /* For a live change - let's make sure the binary supports this */
+    if (flags & VIR_DOMAIN_AFFECT_LIVE &&
+        !virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) {
+        virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+                       _("IOThreads not supported with this binary"));
+        goto endjob;
+    }
+
+    /* If removing and there is a disk using the IOThread(s) to be
+     * removed, then fail (both live and config). We need the dual
+     * loop since more than one IOThread can be removed at a time and
+     * we need to check all that are being removed. Although this is
+     * unsigned arithmetic 'i' cannot get less than zero since the code
+     * is comparing the 'id' being used which is defined as 1..n
+     */
+    if (niothreads < vm->def->iothreads) {
+        for (i = vm->def->iothreads; i > niothreads; i--) {
+            for (j = 0; j < vm->def->ndisks; j++) {
+                if (vm->def->disks[j]->iothread == i) {
+                    virReportError(VIR_ERR_INVALID_ARG,
+                                   _("cannot remove IOThread %zu since it "
+                                     "is being used by disk path '%s'"),
+                                   i, NULLSTR(vm->def->disks[j]->src->path));
+                    goto endjob;
+                }
+            }
+        }
+    }
+
+    if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+        /* Hot plug/unplug one at a time - incrementally when adding
+         * and decrementally when removing - this avoids "gaps" in
+         * the sequencing of the list if there's an error especially
+         * during the removal processing
+         */
+        if (niothreads > vm->def->iothreads) {
+            for (i = vm->def->iothreads + 1; i <= niothreads; i++)
+                if (qemuDomainHotplugIOThread(driver, vm, i) < 0)
+                    goto endjob;
+        } else {
+            /* NB: vm->def->iothreads is unsigned - so if we're going to
+             * zero IOThreads, then we cannot use "just" - 1 math for 'i'
+             */
+            for (i = vm->def->iothreads - 1;
+                 i >= niothreads && vm->def->iothreads; i--)
+                if (qemuDomainHotplugIOThread(driver, vm, i) < 0)
+                    goto endjob;
+        }
+
+        if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0)
+            goto endjob;
+    }
+
+    if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+        /* Remove iothreadpin entries for iothreads that were unplugged.
+         * We take from the end just in case there's an error "in the middle"
+         * which could leave id's such as 1, 2, 4.  If we added a new iothread
+         * it would also get id 4, which wouldn't be good
+         */
+        if (niothreads < persistentDef->iothreads) {
+            for (i = persistentDef->iothreads; i > niothreads; i--)
+                virDomainPinDel(&persistentDef->cputune.iothreadspin,
+                                &persistentDef->cputune.niothreadspin,
+                                i);
+        }
+
+        persistentDef->iothreads = niothreads;
+        if (virDomainSaveConfig(cfg->configDir, persistentDef) < 0)
+            goto endjob;
+    }
+
+    ret = 0;
+
+ endjob:
+    if (mem_mask) {
+        virErrorPtr err = virSaveLastError();
+        virCgroupSetCpusetMems(cgroup_temp, mem_mask);
+        virSetError(err);
+        virFreeError(err);
+    }
+
+    qemuDomainObjEndJob(driver, vm);
+
+ cleanup:
+    qemuDomObjEndAPI(&vm);
+    VIR_FREE(mem_mask);
+    VIR_FREE(all_nodes_str);
+    virBitmapFree(all_nodes);
+    virCgroupFree(&cgroup_temp);
+    virObjectUnref(caps);
+    virObjectUnref(cfg);
+    return ret;
+}
+
 static int qemuDomainGetSecurityLabel(virDomainPtr dom, virSecurityLabelPtr seclabel)
 {
     virQEMUDriverPtr driver = dom->conn->privateData;
@@ -19868,6 +20198,7 @@ static virHypervisorDriver qemuHypervisorDriver = {
     .domainGetMaxVcpus = qemuDomainGetMaxVcpus, /* 0.4.4 */
     .domainGetIOThreadsInfo = qemuDomainGetIOThreadsInfo, /* 1.2.14 */
     .domainPinIOThread = qemuDomainPinIOThread, /* 1.2.14 */
+    .domainSetIOThreads = qemuDomainSetIOThreads, /* 1.2.14 */
     .domainGetSecurityLabel = qemuDomainGetSecurityLabel, /* 0.6.1 */
     .domainGetSecurityLabelList = qemuDomainGetSecurityLabelList, /* 0.10.0 */
     .nodeGetSecurityModel = qemuNodeGetSecurityModel, /* 0.6.1 */
-- 
2.1.0




More information about the libvir-list mailing list