[libvirt] [PATCH v3 3/9] qemu: Implement the qemu driver fetch for IOThreads

John Ferlan jferlan at redhat.com
Tue Feb 17 21:03:52 UTC 2015


Depending on the flags passed, either attempt to return the active/live
IOThread data for the domain or the config data.

The active/live path will call into the Monitor in order to get the
IOThread data and then correlate the thread_id's returned from the
monitor to the currently running system/threads in order to ascertain
the affinity for each iothread_id.

The config path will map each of the configured IOThreads and return
any configured iothreadspin data

Both paths will peruse the 'targetDef' domain list looking for 'disks'
that have been assigned to a specific IOThread.  An IOThread may have
no resources associated

Signed-off-by: John Ferlan <jferlan at redhat.com>
---
 src/qemu/qemu_driver.c | 281 +++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 281 insertions(+)

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 1bbbe9b..2c9d08c 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5541,6 +5541,286 @@ qemuDomainGetMaxVcpus(virDomainPtr dom)
                                          VIR_DOMAIN_VCPU_MAXIMUM));
 }
 
+static int
+qemuDomainGetIOThreadsResources(virDomainDefPtr targetDef,
+                                int niothreads,
+                                virDomainIOThreadsInfoPtr **info)
+{
+    virDomainIOThreadsInfoPtr *iothrp = *info;
+    size_t i, j;
+    int ret = -1;
+
+    for (i = 0; i < targetDef->ndisks; i++) {
+        virDomainDiskDefPtr disk = targetDef->disks[i];
+
+        if (!disk->iothread)
+            continue;
+
+        /* Find the info entry for this thread_id */
+        for (j = 0; j < niothreads; j++) {
+            if (disk->iothread == iothrp[j]->iothread_id)
+                break;
+        }
+        /* Shouldn't happen, but let's not take any chances */
+        if (j == niothreads) {
+            virReportError(VIR_ERR_INTERNAL_ERROR,
+                           _("disk source '%s' assigned thread_id '%u' "
+                             "which doesn't exist"),
+                           disk->src->path, disk->iothread);
+            goto cleanup;
+        }
+
+        if (VIR_EXPAND_N(iothrp[j]->resources, iothrp[j]->nresources, 1) < 0)
+            goto cleanup;
+
+        if (VIR_STRDUP(iothrp[j]->resources[iothrp[j]->nresources - 1],
+                       disk->src->path) < 0)
+            goto cleanup;
+    }
+
+    ret = 0;
+
+ cleanup:
+    if (ret < 0) {
+        for (i = 0; i < niothreads; i++)
+            virDomainIOThreadsInfoFree(*info[i]);
+        VIR_FREE(*info);
+    }
+    return ret;
+}
+
+static int
+qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver,
+                           virDomainObjPtr vm,
+                           virDomainIOThreadsInfoPtr **info)
+{
+    qemuDomainObjPrivatePtr priv;
+    qemuMonitorIOThreadsInfoPtr *iothreads = NULL;
+    virDomainIOThreadsInfoPtr *info_ret = NULL;
+    int niothreads = 0;
+    int maxcpu, hostcpus, maplen;
+    size_t i;
+    int ret = -1;
+
+    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+        goto cleanup;
+
+    if (!virDomainObjIsActive(vm)) {
+        virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+                       _("cannot list IOThreads for an inactive domain"));
+        goto endjob;
+    }
+
+    priv = vm->privateData;
+    if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) {
+        virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+                       _("IOThreads not supported with this binary"));
+        goto endjob;
+    }
+
+    if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+        goto endjob;
+    niothreads = qemuMonitorGetIOThreads(priv->mon, &iothreads);
+    if (qemuDomainObjExitMonitor(driver, vm) < 0)
+        goto endjob;
+    if (niothreads < 0)
+        goto endjob;
+
+    /* Nothing to do */
+    if (niothreads == 0) {
+        ret = 0;
+        goto endjob;
+    }
+
+    if ((hostcpus = nodeGetCPUCount()) < 0)
+        goto endjob;
+
+    maplen = VIR_CPU_MAPLEN(hostcpus);
+    maxcpu = maplen * 8;
+    if (maxcpu > hostcpus)
+        maxcpu = hostcpus;
+
+    if (VIR_ALLOC_N(info_ret, niothreads) < 0)
+        goto endjob;
+
+    for (i = 0; i < niothreads; i++) {
+        virBitmapPtr map = NULL;
+        unsigned char *tmpmap = NULL;
+        int tmpmaplen = 0;
+
+        if (VIR_ALLOC(info_ret[i]) < 0)
+            goto endjob;
+
+        if (virStrToLong_ui(iothreads[i]->name + strlen("iothread"), NULL, 10,
+                            &info_ret[i]->iothread_id) < 0)
+            goto endjob;
+
+        if (VIR_ALLOC_N(info_ret[i]->cpumap, maplen) < 0)
+            goto endjob;
+
+        if (virProcessGetAffinity(iothreads[i]->thread_id, &map, maxcpu) < 0)
+            goto endjob;
+
+        virBitmapToData(map, &tmpmap, &tmpmaplen);
+        if (tmpmaplen > maplen)
+            tmpmaplen = maplen;
+        memcpy(info_ret[i]->cpumap, tmpmap, tmpmaplen);
+        info_ret[i]->cpumaplen = tmpmaplen;
+
+        VIR_FREE(tmpmap);
+        virBitmapFree(map);
+    }
+
+    *info = info_ret;
+    info_ret = NULL;
+    ret = niothreads;
+
+ endjob:
+    qemuDomainObjEndJob(driver, vm);
+
+ cleanup:
+    if (info_ret) {
+        for (i = 0; i < niothreads; i++)
+            virDomainIOThreadsInfoFree(info_ret[i]);
+        VIR_FREE(info_ret);
+    }
+    if (iothreads) {
+        for (i = 0; i < niothreads; i++)
+            qemuMonitorIOThreadsInfoFree(iothreads[i]);
+        VIR_FREE(iothreads);
+    }
+
+    return ret;
+}
+
+static int
+qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef,
+                             virDomainIOThreadsInfoPtr **info)
+{
+    virDomainIOThreadsInfoPtr *info_ret = NULL;
+    virDomainVcpuPinDefPtr *iothreadspin_list;
+    virBitmapPtr cpumask = NULL;
+    unsigned char *cpumap;
+    int maxcpu, hostcpus, maplen;
+    size_t i, pcpu;
+    bool pinned;
+    int ret = -1;
+
+    if (targetDef->iothreads == 0)
+        return 0;
+
+    if ((hostcpus = nodeGetCPUCount()) < 0)
+        goto cleanup;
+
+    maplen = VIR_CPU_MAPLEN(hostcpus);
+    maxcpu = maplen * 8;
+    if (maxcpu > hostcpus)
+        maxcpu = hostcpus;
+
+    if (VIR_ALLOC_N(info_ret, targetDef->iothreads) < 0)
+        goto cleanup;
+
+    for (i = 0; i < targetDef->iothreads; i++) {
+        if (VIR_ALLOC(info_ret[i]) < 0)
+            goto cleanup;
+
+        /* IOThreads being counting at 1 */
+        info_ret[i]->iothread_id = i + 1;
+
+        if (VIR_ALLOC_N(info_ret[i]->cpumap, maplen) < 0)
+            goto cleanup;
+
+        /* Initialize the cpumap */
+        info_ret[i]->cpumaplen = maplen;
+        memset(info_ret[i]->cpumap, 0xff, maplen);
+        if (maxcpu % 8)
+            info_ret[i]->cpumap[maplen - 1] &= (1 << maxcpu % 8) - 1;
+    }
+
+    /* If iothreadspin setting exists, there are unused physical cpus */
+    iothreadspin_list = targetDef->cputune.iothreadspin;
+    for (i = 0; i < targetDef->cputune.niothreadspin; i++) {
+        /* vcpuid is the iothread_id...
+         * iothread_id is the index into info_ret + 1, so we can
+         * assume that the info_ret index we want is vcpuid - 1
+         */
+        cpumap = info_ret[iothreadspin_list[i]->vcpuid - 1]->cpumap;
+        cpumask = iothreadspin_list[i]->cpumask;
+
+        for (pcpu = 0; pcpu < maxcpu; pcpu++) {
+            if (virBitmapGetBit(cpumask, pcpu, &pinned) < 0)
+                goto cleanup;
+            if (!pinned)
+                VIR_UNUSE_CPU(cpumap, pcpu);
+        }
+    }
+
+    *info = info_ret;
+    info_ret = NULL;
+    ret = targetDef->iothreads;
+
+ cleanup:
+    if (info_ret) {
+        for (i = 0; i < targetDef->iothreads; i++)
+            virDomainIOThreadsInfoFree(info_ret[i]);
+        VIR_FREE(info_ret);
+    }
+
+    return ret;
+}
+
+static int
+qemuDomainGetIOThreadsInfo(virDomainPtr dom,
+                           virDomainIOThreadsInfoPtr **info,
+                           unsigned int flags)
+{
+    virQEMUDriverPtr driver = dom->conn->privateData;
+    virDomainObjPtr vm;
+    virCapsPtr caps = NULL;
+    virDomainDefPtr targetDef = NULL;
+    int ret = -1;
+
+    virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+                  VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+    if (!(vm = qemuDomObjFromDomain(dom)))
+        goto cleanup;
+
+    if (virDomainGetIOThreadsInfoEnsureACL(dom->conn, vm->def) < 0)
+        goto cleanup;
+
+    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
+        goto cleanup;
+
+    if (virDomainLiveConfigHelperMethod(caps, driver->xmlopt, vm, &flags,
+                                        &targetDef) < 0)
+        goto cleanup;
+
+    if (flags & VIR_DOMAIN_AFFECT_LIVE)
+        targetDef = vm->def;
+
+    /* Coverity didn't realize that targetDef must be set if we got here.  */
+    sa_assert(targetDef);
+
+    if (flags & VIR_DOMAIN_AFFECT_LIVE)
+        ret = qemuDomainGetIOThreadsLive(driver, vm, info);
+    else
+        ret = qemuDomainGetIOThreadsConfig(targetDef, info);
+
+    /* If we have IOThreads, then associate the resources using IOThreads
+     * If this fails, then info is deleted and we return -1
+     */
+    if (ret > 0) {
+        if (qemuDomainGetIOThreadsResources(targetDef, ret, info) < 0)
+            ret = -1;
+    }
+
+ cleanup:
+    qemuDomObjEndAPI(&vm);
+    virObjectUnref(caps);
+    return ret;
+}
+
 static int qemuDomainGetSecurityLabel(virDomainPtr dom, virSecurityLabelPtr seclabel)
 {
     virQEMUDriverPtr driver = dom->conn->privateData;
@@ -19141,6 +19421,7 @@ static virHypervisorDriver qemuHypervisorDriver = {
     .domainGetEmulatorPinInfo = qemuDomainGetEmulatorPinInfo, /* 0.10.0 */
     .domainGetVcpus = qemuDomainGetVcpus, /* 0.4.4 */
     .domainGetMaxVcpus = qemuDomainGetMaxVcpus, /* 0.4.4 */
+    .domainGetIOThreadsInfo = qemuDomainGetIOThreadsInfo, /* 1.2.13 */
     .domainGetSecurityLabel = qemuDomainGetSecurityLabel, /* 0.6.1 */
     .domainGetSecurityLabelList = qemuDomainGetSecurityLabelList, /* 0.10.0 */
     .nodeGetSecurityModel = qemuNodeGetSecurityModel, /* 0.6.1 */
-- 
2.1.0




More information about the libvir-list mailing list