[libvirt] [PATCH 2/2] qemu: Check for thread <=> memory alignment
John Ferlan
jferlan at redhat.com
Thu Jun 23 19:21:04 UTC 2016
On 06/22/2016 12:37 PM, Martin Kletzander wrote:
> Some settings may be confusing and in case users use numad placement in
> combination with static placement we could warn them as it might not be
> wanted (but it's not forbidden).
>
> Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1254402
>
> Signed-off-by: Martin Kletzander <mkletzan at redhat.com>
> ---
> src/qemu/qemu_driver.c | 4 +-
> src/qemu/qemu_process.c | 107 +++++++++++++++++++++++++++++++++++++++++-------
> src/qemu/qemu_process.h | 6 ++-
> 3 files changed, 99 insertions(+), 18 deletions(-)
>
Perhaps could have been two patches ... one to add the driver argument
and the second to use it...
> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> index 931efae27dee..4cf9f0560092 100644
> --- a/src/qemu/qemu_driver.c
> +++ b/src/qemu/qemu_driver.c
> @@ -4702,7 +4702,7 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver,
> goto cleanup;
> }
>
> - if (qemuProcessSetupVcpu(vm, vcpu) < 0)
> + if (qemuProcessSetupVcpu(driver, vm, vcpu) < 0)
> goto cleanup;
>
> ret = 0;
> @@ -5828,7 +5828,7 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver,
>
> iothrid->thread_id = new_iothreads[idx]->thread_id;
>
> - if (qemuProcessSetupIOThread(vm, iothrid) < 0)
> + if (qemuProcessSetupIOThread(driver, vm, iothrid) < 0)
> goto cleanup;
>
> ret = 0;
> diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
> index d1247d2fd0f9..51709f8c9d58 100644
> --- a/src/qemu/qemu_process.c
> +++ b/src/qemu/qemu_process.c
> @@ -2306,6 +2306,75 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver,
> }
>
>
> +static int
> +qemuProcessCheckCpusMemsAlignment(virQEMUDriverPtr driver,
> + virDomainObjPtr vm,
> + virBitmapPtr cpumask,
> + const char *mem_mask)
> +{
> + int ret = -1;
> + int hostnodes = 0;
> + char *cpumask_str = NULL;
> + char *tmpmask_str = NULL;
> + char *mem_cpus_str = NULL;
> + virCapsPtr caps = NULL;
> + virBitmapPtr tmpmask = NULL;
> + virBitmapPtr mem_cpus = NULL;
> + virBitmapPtr mem_nodes = NULL;
> + virDomainNumatuneMemMode mem_mode;
> +
> + if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) != 0)
> + return 0;
> +
> + if (mem_mode != VIR_DOMAIN_NUMATUNE_MEM_STRICT)
> + return 0;
> +
> + if (!mem_mask || !cpumask)
> + return 0;
> +
> + if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
> + goto cleanup;
> +
> + if (!(tmpmask = virBitmapNewCopy(cpumask)))
> + goto cleanup;
> +
> + if ((hostnodes = virNumaGetMaxNode()) < 0)
> + goto cleanup;
> +
> + if (virBitmapParse(mem_mask, &mem_nodes, hostnodes) < 0)
> + goto cleanup;
> +
> + if (!(mem_cpus = virCapabilitiesGetCpusForNodemask(caps, mem_nodes)))
> + goto cleanup;
> +
> + virBitmapSubtract(tmpmask, mem_cpus);
> + if (!virBitmapIsAllClear(tmpmask)) {
> + if (!(cpumask_str = virBitmapFormat(cpumask)))
> + goto cleanup;
> +
> + if (!(tmpmask_str = virBitmapFormat(tmpmask)))
> + goto cleanup;
> +
> + if (!(mem_cpus_str = virBitmapFormat(mem_cpus)))
> + goto cleanup;
> +
> + VIR_WARN("CPUs '%s' in cpumask '%s' might not have access to any NUMA "
> + "node in memory's nodeset '%s' which consists of CPUs: '%s'.",
> + tmpmask_str, cpumask_str, mem_mask, mem_cpus_str);
Hopefully enough details ;-)
> + }
> +
> + ret = 0;
> + cleanup:
> + VIR_FREE(mem_cpus_str);
> + VIR_FREE(tmpmask_str);
> + VIR_FREE(cpumask_str);
> + virBitmapFree(mem_cpus);
Coverity complains that mem_nodes is leaked.
> + virBitmapFree(tmpmask);
> + virObjectUnref(caps);
> + return ret;
> +}
> +
> +
> /**
> * qemuProcessSetupPid:
> *
> @@ -2317,7 +2386,8 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver,
> * Returns 0 on success, -1 on error.
> */
> static int
> -qemuProcessSetupPid(virDomainObjPtr vm,
> +qemuProcessSetupPid(virQEMUDriverPtr driver,
> + virDomainObjPtr vm,
> pid_t pid,
> virCgroupThreadName nameval,
> int id,
> @@ -2390,6 +2460,10 @@ qemuProcessSetupPid(virDomainObjPtr vm,
> if ((period || quota) &&
> qemuSetupCgroupVcpuBW(cgroup, period, quota) < 0)
> goto cleanup;
> +
> + if (qemuProcessCheckCpusMemsAlignment(driver, vm,
> + use_cpumask, mem_mask) < 0)
> + goto cleanup;
It doesn't seem to matter that for an emulator virCgroupSetCpusetMems is
not called yet... But I figured I'd ask to double check!
ACK with the coverity error fixed as this seems reasonable to me.
John
> }
>
> /* Setup legacy affinity. */
> @@ -2415,9 +2489,10 @@ qemuProcessSetupPid(virDomainObjPtr vm,
>
>
> static int
> -qemuProcessSetupEmulator(virDomainObjPtr vm)
> +qemuProcessSetupEmulator(virQEMUDriverPtr driver,
> + virDomainObjPtr vm)
> {
> - return qemuProcessSetupPid(vm, vm->pid, VIR_CGROUP_THREAD_EMULATOR,
> + return qemuProcessSetupPid(driver, vm, vm->pid, VIR_CGROUP_THREAD_EMULATOR,
> 0, vm->def->cputune.emulatorpin,
> VIR_PROC_POLICY_NONE, 0);
> }
> @@ -4616,20 +4691,22 @@ qemuProcessNetworkPrepareDevices(virDomainDefPtr def)
> * Returns 0 on success, -1 on error.
> */
> int
> -qemuProcessSetupVcpu(virDomainObjPtr vm,
> +qemuProcessSetupVcpu(virQEMUDriverPtr driver,
> + virDomainObjPtr vm,
> unsigned int vcpuid)
> {
> pid_t vcpupid = qemuDomainGetVcpuPid(vm, vcpuid);
> virDomainVcpuInfoPtr vcpu = virDomainDefGetVcpu(vm->def, vcpuid);
>
> - return qemuProcessSetupPid(vm, vcpupid, VIR_CGROUP_THREAD_VCPU,
> + return qemuProcessSetupPid(driver, vm, vcpupid, VIR_CGROUP_THREAD_VCPU,
> vcpuid, vcpu->cpumask,
> vcpu->sched.policy, vcpu->sched.priority);
> }
>
>
> static int
> -qemuProcessSetupVcpus(virDomainObjPtr vm)
> +qemuProcessSetupVcpus(virQEMUDriverPtr driver,
> + virDomainObjPtr vm)
> {
> virDomainVcpuInfoPtr vcpu;
> unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def);
> @@ -4669,7 +4746,7 @@ qemuProcessSetupVcpus(virDomainObjPtr vm)
> if (!vcpu->online)
> continue;
>
> - if (qemuProcessSetupVcpu(vm, i) < 0)
> + if (qemuProcessSetupVcpu(driver, vm, i) < 0)
> return -1;
> }
>
> @@ -4678,11 +4755,12 @@ qemuProcessSetupVcpus(virDomainObjPtr vm)
>
>
> int
> -qemuProcessSetupIOThread(virDomainObjPtr vm,
> +qemuProcessSetupIOThread(virQEMUDriverPtr driver,
> + virDomainObjPtr vm,
> virDomainIOThreadIDDefPtr iothread)
> {
>
> - return qemuProcessSetupPid(vm, iothread->thread_id,
> + return qemuProcessSetupPid(driver, vm, iothread->thread_id,
> VIR_CGROUP_THREAD_IOTHREAD,
> iothread->iothread_id,
> iothread->cpumask,
> @@ -4692,14 +4770,15 @@ qemuProcessSetupIOThread(virDomainObjPtr vm,
>
>
> static int
> -qemuProcessSetupIOThreads(virDomainObjPtr vm)
> +qemuProcessSetupIOThreads(virQEMUDriverPtr driver,
> + virDomainObjPtr vm)
> {
> size_t i;
>
> for (i = 0; i < vm->def->niothreadids; i++) {
> virDomainIOThreadIDDefPtr info = vm->def->iothreadids[i];
>
> - if (qemuProcessSetupIOThread(vm, info) < 0)
> + if (qemuProcessSetupIOThread(driver, vm, info) < 0)
> return -1;
> }
>
> @@ -5124,7 +5203,7 @@ qemuProcessLaunch(virConnectPtr conn,
> goto cleanup;
>
> VIR_DEBUG("Setting emulator tuning/settings");
> - if (qemuProcessSetupEmulator(vm) < 0)
> + if (qemuProcessSetupEmulator(driver, vm) < 0)
> goto cleanup;
>
> VIR_DEBUG("Setting domain security labels");
> @@ -5205,11 +5284,11 @@ qemuProcessLaunch(virConnectPtr conn,
> goto cleanup;
>
> VIR_DEBUG("Setting vCPU tuning/settings");
> - if (qemuProcessSetupVcpus(vm) < 0)
> + if (qemuProcessSetupVcpus(driver, vm) < 0)
> goto cleanup;
>
> VIR_DEBUG("Setting IOThread tuning/settings");
> - if (qemuProcessSetupIOThreads(vm) < 0)
> + if (qemuProcessSetupIOThreads(driver, vm) < 0)
> goto cleanup;
>
> VIR_DEBUG("Setting any required VM passwords");
> diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h
> index 37081ad4c177..ea830f628f40 100644
> --- a/src/qemu/qemu_process.h
> +++ b/src/qemu/qemu_process.h
> @@ -174,9 +174,11 @@ virDomainDiskDefPtr qemuProcessFindDomainDiskByAlias(virDomainObjPtr vm,
> int qemuConnectAgent(virQEMUDriverPtr driver, virDomainObjPtr vm);
>
>
> -int qemuProcessSetupVcpu(virDomainObjPtr vm,
> +int qemuProcessSetupVcpu(virQEMUDriverPtr driver,
> + virDomainObjPtr vm,
> unsigned int vcpuid);
> -int qemuProcessSetupIOThread(virDomainObjPtr vm,
> +int qemuProcessSetupIOThread(virQEMUDriverPtr driver,
> + virDomainObjPtr vm,
> virDomainIOThreadIDDefPtr iothread);
>
> int qemuRefreshVirtioChannelState(virQEMUDriverPtr driver,
>
More information about the libvir-list
mailing list