[PATCH 4/5] qemu: Prefer -numa cpu over -numa node,cpus=

Igor Mammedov imammedo at redhat.com
Fri May 22 16:07:56 UTC 2020


On Fri, 22 May 2020 16:14:14 +0200
Michal Privoznik <mprivozn at redhat.com> wrote:

> QEMU is trying to obsolete -numa node,cpus= because that uses
> ambiguous vCPU id to [socket, die, core, thread] mapping. The new
> form is:
> 
>   -numa cpu,node-id=N,socket-id=S,die-id=D,core-id=C,thread-id=T
> 
> which is repeated for every vCPU and places it at [S, D, C, T]
> into guest NUMA node N.
> 
> While in general this is magic mapping, we can deal with it.
> Firstly, with QEMU 2.7 or newer, libvirt ensures that if topology
> is given then maxvcpus must be sockets * dies * cores * threads
> (i.e. there are no 'holes').
> Secondly, if no topology is given then libvirt itself places each
> vCPU into a different socket (basically, it fakes topology of:
> [maxvcpus, 1, 1, 1])
> Thirdly, we can copy whatever QEMU is doing when mapping vCPUs
> onto topology, to make sure vCPUs don't start to move around.
> 
> Note, migration from old to new cmd line works and therefore
> doesn't need any special handling.
> 
> Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1678085
> 
> Signed-off-by: Michal Privoznik <mprivozn at redhat.com>
> ---
>  src/qemu/qemu_command.c                       | 108 +++++++++++++++++-
>  .../hugepages-nvdimm.x86_64-latest.args       |   4 +-
>  ...memory-default-hugepage.x86_64-latest.args |  10 +-
>  .../memfd-memory-numa.x86_64-latest.args      |  10 +-
>  ...y-hotplug-nvdimm-access.x86_64-latest.args |   4 +-
>  ...ry-hotplug-nvdimm-align.x86_64-latest.args |   4 +-
>  ...ry-hotplug-nvdimm-label.x86_64-latest.args |   4 +-
>  ...ory-hotplug-nvdimm-pmem.x86_64-latest.args |   4 +-
>  ...ory-hotplug-nvdimm-ppc64.ppc64-latest.args |   4 +-
>  ...hotplug-nvdimm-readonly.x86_64-latest.args |   4 +-
>  .../memory-hotplug-nvdimm.x86_64-latest.args  |   4 +-
>  ...vhost-user-fs-fd-memory.x86_64-latest.args |   4 +-
>  ...vhost-user-fs-hugepages.x86_64-latest.args |   4 +-
>  ...host-user-gpu-secondary.x86_64-latest.args |   3 +-
>  .../vhost-user-vga.x86_64-latest.args         |   3 +-
>  15 files changed, 158 insertions(+), 16 deletions(-)
> 
> diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
> index 7d84fd8b5e..0de4fe4905 100644
> --- a/src/qemu/qemu_command.c
> +++ b/src/qemu/qemu_command.c
> @@ -7079,6 +7079,91 @@ qemuBuildNumaOldCPUs(virBufferPtr buf,
>  }
>  
>  
> +/**
> + * qemuTranlsatevCPUID:
> + *
> + * For given vCPU @id and vCPU topology (@cpu) compute corresponding
> + * @socket, @die, @core and @thread). This assumes linear topology,
> + * that is every [socket, die, core, thread] combination is valid vCPU
> + * ID and there are no 'holes'. This is ensured by
> + * qemuValidateDomainDef() if QEMU_CAPS_QUERY_HOTPLUGGABLE_CPUS is
> + * set.
I wouldn't make this assumption, each machine can have (and has) it's own layout,
and now it's not hard to change that per machine version if necessary.

I'd suppose one could pull the list of possible CPUs from QEMU started
in preconfig mode with desired -smp x,y,z using QUERY_HOTPLUGGABLE_CPUS
and then continue to configure numa with QMP commands using provided
CPUs layout.

How to present it to libvirt user I'm not sure (give them that list perhaps
and let select from it???)
But it's irrelevant, to the patch, magical IDs for socket/core/...whatever
should not be generated by libvirt anymore, but rather taken from QEMU for given
machine + -smp combination.

CCing Peter,
 as I vaguely recall him working on this issue  (preconfig + numa over QMP)


> + * Moreover, if @diesSupported is false (QEMU lacks
> + * QEMU_CAPS_SMP_DIES) then @die is set to zero and @socket is
> + * computed without taking numbed of dies into account.
> + *
> + * The algorithm is shamelessly copied over from QEMU's
> + * x86_topo_ids_from_idx() and its history (before introducing dies).
> + */
> +static void
> +qemuTranlsatevCPUID(unsigned int id,
> +                    bool diesSupported,
> +                    virCPUDefPtr cpu,
> +                    unsigned int *socket,
> +                    unsigned int *die,
> +                    unsigned int *core,
> +                    unsigned int *thread)
> +{
> +    if (cpu && cpu->sockets) {
> +        *thread = id % cpu->threads;
> +        *core = id / cpu->threads % cpu->cores;
> +        if (diesSupported) {
> +            *die = id / (cpu->cores * cpu->threads) % cpu->dies;
> +            *socket = id / (cpu->dies * cpu->cores * cpu->threads);
> +        } else {
> +            *die = 0;
> +            *socket = id / (cpu->cores * cpu->threads) % cpu->sockets;
> +        }
> +    } else {
> +        /* If no topology was provided, then qemuBuildSmpCommandLine()
> +         * puts all vCPUs into a separate socket. */
> +        *thread = 0;
> +        *core = 0;
> +        *die = 0;
> +        *socket = id;
> +    }
> +}
> +
> +
> +static void
> +qemuBuildNumaNewCPUs(virCommandPtr cmd,
> +                     virCPUDefPtr cpu,
> +                     virBitmapPtr cpumask,
> +                     size_t nodeid,
> +                     virQEMUCapsPtr qemuCaps)
> +{
> +    const bool diesSupported = virQEMUCapsGet(qemuCaps, QEMU_CAPS_SMP_DIES);
> +    ssize_t vcpuid = -1;
> +
> +    while ((vcpuid = virBitmapNextSetBit(cpumask, vcpuid)) >= 0) {
> +        unsigned int socket;
> +        unsigned int die;
> +        unsigned int core;
> +        unsigned int thread;
> +
> +        qemuTranlsatevCPUID(vcpuid, diesSupported, cpu,
> +                            &socket, &die, &core, &thread);
> +
> +        virCommandAddArg(cmd, "-numa");
> +
> +        /* The simple fact that dies are supported by QEMU doesn't mean we can
> +         * put it onto command line. QEMU will accept die-id only if -smp dies
> +         * was set to a value greater than 1. On the other hand, this allows us
> +         * to generate shorter command line. */
> +        if (diesSupported && cpu && cpu->dies > 1) {
> +            virCommandAddArgFormat(cmd,
> +                                   "cpu,node-id=%zu,socket-id=%u,die-id=%u,core-id=%u,thread-id=%u",
> +                                   nodeid, socket, die, core, thread);
> +        } else {
> +            virCommandAddArgFormat(cmd,
> +                                   "cpu,node-id=%zu,socket-id=%u,core-id=%u,thread-id=%u",
> +                                   nodeid, socket, core, thread);
> +        }
> +    }
> +}
> +
> +
>  static int
>  qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
>                      virDomainDefPtr def,
> @@ -7090,6 +7175,7 @@ qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
>      g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER;
>      g_autofree virBufferPtr nodeBackends = NULL;
>      bool needBackend = false;
> +    bool newCpus = false;
>      int ret = -1;
>      size_t ncells = virDomainNumaGetNodeCount(def->numa);
>  
> @@ -7130,7 +7216,21 @@ qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
>          qemuBuildMemPathStr(def, cmd, priv) < 0)
>          goto cleanup;
>  
> +    /* Use modern style of specifying vCPU topology only if:
> +     * -numa cpu is available, introduced in the same time as -numa
> +     *           dist, hence slightly misleading capability test, and
> +     * query-hotpluggable-cpus is avialable, because then
> +     *                         qemuValidateDomainDef() ensures that if
> +     *                         topology is specified it matches max vCPU
> +     *                         count and we can make some shortcuts in
> +     *                         qemuTranlsatevCPUID().
> +     */
> +    newCpus = virQEMUCapsGet(qemuCaps, QEMU_CAPS_NUMA_DIST) &&
> +              virQEMUCapsGet(qemuCaps, QEMU_CAPS_QUERY_HOTPLUGGABLE_CPUS);
> +
>      for (i = 0; i < ncells; i++) {
> +        virBitmapPtr cpu = virDomainNumaGetNodeCpumask(def->numa, i);
> +
>          if (needBackend) {
>              virCommandAddArg(cmd, "-object");
>              virCommandAddArgBuffer(cmd, &nodeBackends[i]);
> @@ -7139,8 +7239,9 @@ qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
>          virCommandAddArg(cmd, "-numa");
>          virBufferAsprintf(&buf, "node,nodeid=%zu", i);
>  
> -        if (qemuBuildNumaOldCPUs(&buf,
> -                                 virDomainNumaGetNodeCpumask(def->numa, i)) < 0)
> +        /* -numa cpu is supported from the same release as -numa dist */
> +        if (!newCpus &&
> +            qemuBuildNumaOldCPUs(&buf, cpu) < 0)
>              goto cleanup;
>  
>          if (needBackend)
> @@ -7150,6 +7251,9 @@ qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
>                                virDomainNumaGetNodeMemorySize(def->numa, i) / 1024);
>  
>          virCommandAddArgBuffer(cmd, &buf);
> +
> +        if (newCpus)
> +            qemuBuildNumaNewCPUs(cmd, def->cpu, cpu, i, qemuCaps);
>      }
>  
>      /* If NUMA node distance is specified for at least one pair
> diff --git a/tests/qemuxml2argvdata/hugepages-nvdimm.x86_64-latest.args b/tests/qemuxml2argvdata/hugepages-nvdimm.x86_64-latest.args
> index e80a95c84b..804fc59d74 100644
> --- a/tests/qemuxml2argvdata/hugepages-nvdimm.x86_64-latest.args
> +++ b/tests/qemuxml2argvdata/hugepages-nvdimm.x86_64-latest.args
> @@ -19,7 +19,9 @@ file=/tmp/lib/domain--1-QEMUGuest1/master-key.aes \
>  -smp 2,sockets=2,dies=1,cores=1,threads=1 \
>  -object memory-backend-file,id=ram-node0,prealloc=yes,\
>  mem-path=/dev/hugepages2M/libvirt/qemu/-1-QEMUGuest1,share=yes,size=1073741824 \
> --numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
> +-numa node,nodeid=0,memdev=ram-node0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=0,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=1,core-id=0,thread-id=0 \
>  -object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm,\
>  share=yes,size=536870912 \
>  -device nvdimm,node=0,memdev=memnvdimm0,id=nvdimm0,slot=0 \
> diff --git a/tests/qemuxml2argvdata/memfd-memory-default-hugepage.x86_64-latest.args b/tests/qemuxml2argvdata/memfd-memory-default-hugepage.x86_64-latest.args
> index 5d256c42bc..2e8d933fc2 100644
> --- a/tests/qemuxml2argvdata/memfd-memory-default-hugepage.x86_64-latest.args
> +++ b/tests/qemuxml2argvdata/memfd-memory-default-hugepage.x86_64-latest.args
> @@ -20,7 +20,15 @@ file=/tmp/lib/domain--1-instance-00000092/master-key.aes \
>  -smp 8,sockets=1,dies=1,cores=8,threads=1 \
>  -object memory-backend-memfd,id=ram-node0,hugetlb=yes,hugetlbsize=2097152,\
>  share=yes,size=15032385536,host-nodes=3,policy=preferred \
> --numa node,nodeid=0,cpus=0-7,memdev=ram-node0 \
> +-numa node,nodeid=0,memdev=ram-node0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=0,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=1,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=2,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=3,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=4,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=5,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=6,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=7,thread-id=0 \
>  -uuid 126f2720-6f8e-45ab-a886-ec9277079a67 \
>  -display none \
>  -no-user-config \
> diff --git a/tests/qemuxml2argvdata/memfd-memory-numa.x86_64-latest.args b/tests/qemuxml2argvdata/memfd-memory-numa.x86_64-latest.args
> index 5d256c42bc..2e8d933fc2 100644
> --- a/tests/qemuxml2argvdata/memfd-memory-numa.x86_64-latest.args
> +++ b/tests/qemuxml2argvdata/memfd-memory-numa.x86_64-latest.args
> @@ -20,7 +20,15 @@ file=/tmp/lib/domain--1-instance-00000092/master-key.aes \
>  -smp 8,sockets=1,dies=1,cores=8,threads=1 \
>  -object memory-backend-memfd,id=ram-node0,hugetlb=yes,hugetlbsize=2097152,\
>  share=yes,size=15032385536,host-nodes=3,policy=preferred \
> --numa node,nodeid=0,cpus=0-7,memdev=ram-node0 \
> +-numa node,nodeid=0,memdev=ram-node0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=0,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=1,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=2,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=3,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=4,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=5,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=6,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=7,thread-id=0 \
>  -uuid 126f2720-6f8e-45ab-a886-ec9277079a67 \
>  -display none \
>  -no-user-config \
> diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-access.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-access.x86_64-latest.args
> index 89138f46c4..6bf575d486 100644
> --- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-access.x86_64-latest.args
> +++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-access.x86_64-latest.args
> @@ -17,7 +17,9 @@ file=/tmp/lib/domain--1-QEMUGuest1/master-key.aes \
>  -m size=219136k,slots=16,maxmem=1099511627776k \
>  -overcommit mem-lock=off \
>  -smp 2,sockets=2,dies=1,cores=1,threads=1 \
> --numa node,nodeid=0,cpus=0-1,mem=214 \
> +-numa node,nodeid=0,mem=214 \
> +-numa cpu,node-id=0,socket-id=0,core-id=0,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=1,core-id=0,thread-id=0 \
>  -object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm,\
>  share=no,size=536870912 \
>  -device nvdimm,node=0,memdev=memnvdimm0,id=nvdimm0,slot=0 \
> diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-align.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-align.x86_64-latest.args
> index 1a8e7932dc..84571a056a 100644
> --- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-align.x86_64-latest.args
> +++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-align.x86_64-latest.args
> @@ -17,7 +17,9 @@ file=/tmp/lib/domain--1-QEMUGuest1/master-key.aes \
>  -m size=219136k,slots=16,maxmem=1099511627776k \
>  -overcommit mem-lock=off \
>  -smp 2,sockets=2,dies=1,cores=1,threads=1 \
> --numa node,nodeid=0,cpus=0-1,mem=214 \
> +-numa node,nodeid=0,mem=214 \
> +-numa cpu,node-id=0,socket-id=0,core-id=0,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=1,core-id=0,thread-id=0 \
>  -object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm,\
>  share=no,size=536870912,align=2097152 \
>  -device nvdimm,node=0,memdev=memnvdimm0,id=nvdimm0,slot=0 \
> diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-label.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-label.x86_64-latest.args
> index ef32c663de..d684d15423 100644
> --- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-label.x86_64-latest.args
> +++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-label.x86_64-latest.args
> @@ -17,7 +17,9 @@ file=/tmp/lib/domain--1-QEMUGuest1/master-key.aes \
>  -m size=219136k,slots=16,maxmem=1099511627776k \
>  -overcommit mem-lock=off \
>  -smp 2,sockets=2,dies=1,cores=1,threads=1 \
> --numa node,nodeid=0,cpus=0-1,mem=214 \
> +-numa node,nodeid=0,mem=214 \
> +-numa cpu,node-id=0,socket-id=0,core-id=0,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=1,core-id=0,thread-id=0 \
>  -object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm,\
>  share=no,size=536870912 \
>  -device nvdimm,node=0,label-size=131072,memdev=memnvdimm0,id=nvdimm0,slot=0 \
> diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-pmem.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-pmem.x86_64-latest.args
> index 5dfba9b50a..d1374772da 100644
> --- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-pmem.x86_64-latest.args
> +++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-pmem.x86_64-latest.args
> @@ -17,7 +17,9 @@ file=/tmp/lib/domain--1-QEMUGuest1/master-key.aes \
>  -m size=219136k,slots=16,maxmem=1099511627776k \
>  -overcommit mem-lock=off \
>  -smp 2,sockets=2,dies=1,cores=1,threads=1 \
> --numa node,nodeid=0,cpus=0-1,mem=214 \
> +-numa node,nodeid=0,mem=214 \
> +-numa cpu,node-id=0,socket-id=0,core-id=0,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=1,core-id=0,thread-id=0 \
>  -object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm,\
>  share=no,size=536870912,pmem=on \
>  -device nvdimm,node=0,memdev=memnvdimm0,id=nvdimm0,slot=0 \
> diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-ppc64.ppc64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-ppc64.ppc64-latest.args
> index eff80dcf80..02d2faa054 100644
> --- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-ppc64.ppc64-latest.args
> +++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-ppc64.ppc64-latest.args
> @@ -17,7 +17,9 @@ file=/tmp/lib/domain--1-QEMUGuest1/master-key.aes \
>  -m size=1048576k,slots=16,maxmem=1099511627776k \
>  -overcommit mem-lock=off \
>  -smp 2,sockets=2,dies=1,cores=1,threads=1 \
> --numa node,nodeid=0,cpus=0-1,mem=1024 \
> +-numa node,nodeid=0,mem=1024 \
> +-numa cpu,node-id=0,socket-id=0,core-id=0,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=1,core-id=0,thread-id=0 \
>  -object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm,\
>  size=537001984 \
>  -device nvdimm,node=0,label-size=131072,\
> diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-readonly.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-readonly.x86_64-latest.args
> index 7088a4f054..a757007690 100644
> --- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-readonly.x86_64-latest.args
> +++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-readonly.x86_64-latest.args
> @@ -17,7 +17,9 @@ file=/tmp/lib/domain--1-QEMUGuest1/master-key.aes \
>  -m size=219136k,slots=16,maxmem=1099511627776k \
>  -overcommit mem-lock=off \
>  -smp 2,sockets=2,dies=1,cores=1,threads=1 \
> --numa node,nodeid=0,cpus=0-1,mem=214 \
> +-numa node,nodeid=0,mem=214 \
> +-numa cpu,node-id=0,socket-id=0,core-id=0,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=1,core-id=0,thread-id=0 \
>  -object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm,\
>  share=no,size=536870912 \
>  -device nvdimm,node=0,unarmed=on,memdev=memnvdimm0,id=nvdimm0,slot=0 \
> diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm.x86_64-latest.args
> index 60d6d207c5..e673a4acad 100644
> --- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm.x86_64-latest.args
> +++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm.x86_64-latest.args
> @@ -17,7 +17,9 @@ file=/tmp/lib/domain--1-QEMUGuest1/master-key.aes \
>  -m size=1048576k,slots=16,maxmem=1099511627776k \
>  -overcommit mem-lock=off \
>  -smp 2,sockets=2,dies=1,cores=1,threads=1 \
> --numa node,nodeid=0,cpus=0-1,mem=1024 \
> +-numa node,nodeid=0,mem=1024 \
> +-numa cpu,node-id=0,socket-id=0,core-id=0,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=1,core-id=0,thread-id=0 \
>  -object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm,\
>  size=536870912 \
>  -device nvdimm,node=0,memdev=memnvdimm0,id=nvdimm0,slot=0 \
> diff --git a/tests/qemuxml2argvdata/vhost-user-fs-fd-memory.x86_64-latest.args b/tests/qemuxml2argvdata/vhost-user-fs-fd-memory.x86_64-latest.args
> index dd5f68abc5..bce4e07a5d 100644
> --- a/tests/qemuxml2argvdata/vhost-user-fs-fd-memory.x86_64-latest.args
> +++ b/tests/qemuxml2argvdata/vhost-user-fs-fd-memory.x86_64-latest.args
> @@ -20,7 +20,9 @@ file=/tmp/lib/domain--1-guest/master-key.aes \
>  -object memory-backend-file,id=ram-node0,\
>  mem-path=/var/lib/libvirt/qemu/ram/-1-guest/ram-node0,share=yes,\
>  size=15032385536 \
> --numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
> +-numa node,nodeid=0,memdev=ram-node0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=0,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=1,core-id=0,thread-id=0 \
>  -uuid 126f2720-6f8e-45ab-a886-ec9277079a67 \
>  -display none \
>  -no-user-config \
> diff --git a/tests/qemuxml2argvdata/vhost-user-fs-hugepages.x86_64-latest.args b/tests/qemuxml2argvdata/vhost-user-fs-hugepages.x86_64-latest.args
> index 258fa7813f..f21c08037d 100644
> --- a/tests/qemuxml2argvdata/vhost-user-fs-hugepages.x86_64-latest.args
> +++ b/tests/qemuxml2argvdata/vhost-user-fs-hugepages.x86_64-latest.args
> @@ -19,7 +19,9 @@ file=/tmp/lib/domain--1-guest/master-key.aes \
>  -smp 2,sockets=2,cores=1,threads=1 \
>  -object memory-backend-file,id=ram-node0,prealloc=yes,\
>  mem-path=/dev/hugepages2M/libvirt/qemu/-1-guest,share=yes,size=2147483648 \
> --numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
> +-numa node,nodeid=0,memdev=ram-node0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=0,thread-id=0 \
> +-numa cpu,node-id=0,socket-id=1,core-id=0,thread-id=0 \
>  -uuid 1ccfd97d-5eb4-478a-bbe6-88d254c16db7 \
>  -display none \
>  -no-user-config \
> diff --git a/tests/qemuxml2argvdata/vhost-user-gpu-secondary.x86_64-latest.args b/tests/qemuxml2argvdata/vhost-user-gpu-secondary.x86_64-latest.args
> index e99a5342dc..c935a8942c 100644
> --- a/tests/qemuxml2argvdata/vhost-user-gpu-secondary.x86_64-latest.args
> +++ b/tests/qemuxml2argvdata/vhost-user-gpu-secondary.x86_64-latest.args
> @@ -18,7 +18,8 @@ file=/tmp/lib/domain--1-QEMUGuest1/master-key.aes \
>  -overcommit mem-lock=off \
>  -smp 1,sockets=1,cores=1,threads=1 \
>  -object memory-backend-memfd,id=ram-node0,share=yes,size=224395264 \
> --numa node,nodeid=0,cpus=0,memdev=ram-node0 \
> +-numa node,nodeid=0,memdev=ram-node0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=0,thread-id=0 \
>  -uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
>  -display none \
>  -no-user-config \
> diff --git a/tests/qemuxml2argvdata/vhost-user-vga.x86_64-latest.args b/tests/qemuxml2argvdata/vhost-user-vga.x86_64-latest.args
> index 277bf8c646..7f90f0dfa3 100644
> --- a/tests/qemuxml2argvdata/vhost-user-vga.x86_64-latest.args
> +++ b/tests/qemuxml2argvdata/vhost-user-vga.x86_64-latest.args
> @@ -18,7 +18,8 @@ file=/tmp/lib/domain--1-QEMUGuest1/master-key.aes \
>  -overcommit mem-lock=off \
>  -smp 1,sockets=1,cores=1,threads=1 \
>  -object memory-backend-memfd,id=ram-node0,share=yes,size=224395264 \
> --numa node,nodeid=0,cpus=0,memdev=ram-node0 \
> +-numa node,nodeid=0,memdev=ram-node0 \
> +-numa cpu,node-id=0,socket-id=0,core-id=0,thread-id=0 \
>  -uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
>  -display none \
>  -no-user-config \




More information about the libvir-list mailing list