[libvirt] [PATCH 29/34] conf: Don't store vcpusched orthogonally to other vcpu info
John Ferlan
jferlan at redhat.com
Mon Jan 18 23:06:22 UTC 2016
On 01/14/2016 11:27 AM, Peter Krempa wrote:
> Due to bad design the vcpu sched element is orthogonal to the way how
> the data belongs to the corresponding objects. Now that vcpus are a
> struct that allow to store other info too, let's convert the data to the
> sane structure.
>
> The helpers for the conversion are made universal so that they can be
> reused for iothreads too.
>
> This patch also resolves https://bugzilla.redhat.com/show_bug.cgi?id=1235180
> since with the correct storage approach you can't have dangling data.
> ---
> src/conf/domain_conf.c | 231 +++++++++++++++++++++++++++++++++++++++---------
> src/conf/domain_conf.h | 8 +-
> src/qemu/qemu_driver.c | 6 +-
> src/qemu/qemu_process.c | 8 +-
> 4 files changed, 202 insertions(+), 51 deletions(-)
>
This is related to patch 26 at least w/r/t virBitmapSubtract usage.
There's also multiple things going on - between code motion and code
addition. In particular virDomainFormatSchedDef is does quite a lot...
Hopefully someone else (perhaps Martin) who's worked in the sched code
before can take a look!
> diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
> index b4f6fe9..e2dda9a 100644
> --- a/src/conf/domain_conf.c
> +++ b/src/conf/domain_conf.c
> @@ -1415,6 +1415,19 @@ virDomainDefGetVcpu(virDomainDefPtr def,
> }
>
>
> +virDomainThreadSchedParamPtr
s/^/^static / ??
> +virDomainDefGetVcpuSched(virDomainDefPtr def,
> + unsigned int vcpu)
> +{
> + virDomainVcpuInfoPtr vcpuinfo;
> +
> + if (!(vcpuinfo = virDomainDefGetVcpu(def, vcpu)))
> + return NULL;
Does there need to be a vcpuinfo->online check? (aka OnlineVcpuMap)
Will the caller need to differentiate? I know this is the parsing
code... just thinking while typing especially for non-static function.
Later thoughts made me think this should be static for parse...
> +
> + return &vcpuinfo->sched;
> +}
> +
> +
> /**
> * virDomainDefHasVcpusPin:
> * @def: domain definition
> @@ -2546,10 +2559,6 @@ void virDomainDefFree(virDomainDefPtr def)
>
> virBitmapFree(def->cputune.emulatorpin);
>
> - for (i = 0; i < def->cputune.nvcpusched; i++)
> - virBitmapFree(def->cputune.vcpusched[i].ids);
> - VIR_FREE(def->cputune.vcpusched);
> -
> for (i = 0; i < def->cputune.niothreadsched; i++)
> virBitmapFree(def->cputune.iothreadsched[i].ids);
> VIR_FREE(def->cputune.iothreadsched);
> @@ -14592,6 +14601,55 @@ virDomainSchedulerParse(xmlNodePtr node,
>
>
> static int
> +virDomainThreadSchedParseHelper(xmlNodePtr node,
> + const char *name,
> + virDomainThreadSchedParamPtr (*func)(virDomainDefPtr, unsigned int),
> + virDomainDefPtr def)
> +{
> + ssize_t next = -1;
> + virBitmapPtr map = NULL;
> + virDomainThreadSchedParamPtr sched;
> + virProcessSchedPolicy policy;
> + int priority;
> + int ret = -1;
> +
> + if (!(map = virDomainSchedulerParse(node, name, &policy, &priority)))
> + goto cleanup;
> +
Replacing the virBitmapOverlaps...
> + while ((next = virBitmapNextSetBit(map, next)) > -1) {
> + if (!(sched = func(def, next)))
> + goto cleanup;
Could this be 'continue;' ? That is, is the data required? I'm
thinking primarily of the vcpu->online == false case...
> +
> + if (sched->policy != VIR_PROC_POLICY_NONE) {
> + virReportError(VIR_ERR_XML_DETAIL,
> + _("%ssched attributes 'vcpus' must not overlap"),
Since the code will be shared in patch 30, change message to:
"%sched attribute '%s' must not overlap",
using 'name' for both %s params. Similar to virDomainFormatSchedDef has
done things.
> + name);
> + goto cleanup;
> + }
> +
> + sched->policy = policy;
> + sched->priority = priority;
> + }
> +
> + ret = 0;
> +
> + cleanup:
> + virBitmapFree(map);
> + return ret;
> +}
> +
> +
> +static int
> +virDomainVcpuThreadSchedParse(xmlNodePtr node,
> + virDomainDefPtr def)
> +{
> + return virDomainThreadSchedParseHelper(node, "vcpus",
> + virDomainDefGetVcpuSched,
> + def);
> +}
> +
> +
> +static int
> virDomainThreadSchedParse(xmlNodePtr node,
> unsigned int minid,
> unsigned int maxid,
> @@ -15145,29 +15203,10 @@ virDomainDefParseXML(xmlDocPtr xml,
> _("cannot extract vcpusched nodes"));
> goto error;
> }
> - if (n) {
> - if (VIR_ALLOC_N(def->cputune.vcpusched, n) < 0)
> - goto error;
> - def->cputune.nvcpusched = n;
>
> - for (i = 0; i < def->cputune.nvcpusched; i++) {
> - if (virDomainThreadSchedParse(nodes[i],
> - 0,
> - virDomainDefGetVcpusMax(def) - 1,
> - "vcpus",
> - &def->cputune.vcpusched[i]) < 0)
> - goto error;
> -
> - for (j = 0; j < i; j++) {
> - if (virBitmapOverlaps(def->cputune.vcpusched[i].ids,
> - def->cputune.vcpusched[j].ids)) {
> - virReportError(VIR_ERR_XML_DETAIL, "%s",
> - _("vcpusched attributes 'vcpus' "
> - "must not overlap"));
> - goto error;
> - }
> - }
> - }
> + for (i = 0; i < n; i++) {
> + if (virDomainVcpuThreadSchedParse(nodes[i], def) < 0)
> + goto error;
> }
> VIR_FREE(nodes);
>
> @@ -21504,6 +21543,128 @@ virDomainDefHasCapabilitiesFeatures(virDomainDefPtr def)
>
>
Probably a good place to note the arguments, but specifically that
"name" is used to generate the XML "<vcpusched vcpus='..." or
"<iothreadsched iothreads='..."
> static int
> +virDomainFormatSchedDef(virDomainDefPtr def,
> + virBufferPtr buf,
> + const char *name,
> + virDomainThreadSchedParamPtr (*func)(virDomainDefPtr, unsigned int),
> + virBitmapPtr resourceMap)
> +{
> + virBitmapPtr schedMap = NULL;
> + virBitmapPtr prioMap = NULL;
> + virDomainThreadSchedParamPtr sched;
> + char *tmp = NULL;
> + ssize_t next;
> + size_t i;
> + int ret = -1;
> +
> + if (!(schedMap = virBitmapNew(VIR_DOMAIN_CPUMASK_LEN)) ||
> + !(prioMap = virBitmapNew(VIR_DOMAIN_CPUMASK_LEN)))
> + goto cleanup;
> +
> + for (i = VIR_PROC_POLICY_NONE + 1; i < VIR_PROC_POLICY_LAST; i++) {
> + virBitmapClearAll(schedMap);
> +
> + /* find vcpus using a particular scheduler */
> + next = -1;
> + while ((next = virBitmapNextSetBit(resourceMap, next)) > -1) {
> + sched = func(def, next);
> +
> + if (sched->policy == i)
> + ignore_value(virBitmapSetBit(schedMap, next));
> + }
> +
> + /* it's necessary to discriminate priority levels for schedulers that
> + * have them */
> + while (!virBitmapIsAllClear(schedMap)) {
> + virBitmapPtr currentMap = NULL;
> + ssize_t nextprio;
> + bool hasPriority = false;
> + int priority;
> +
> + switch ((virProcessSchedPolicy) i) {
> + case VIR_PROC_POLICY_NONE:
> + case VIR_PROC_POLICY_BATCH:
> + case VIR_PROC_POLICY_IDLE:
> + case VIR_PROC_POLICY_LAST:
> + currentMap = schedMap;
> + break;
> +
> + case VIR_PROC_POLICY_FIFO:
> + case VIR_PROC_POLICY_RR:
> + virBitmapClearAll(prioMap);
> + hasPriority = true;
> +
> + /* we need to find a subset of vCPUs with the given scheduler
> + * that share the priority */
> + nextprio = virBitmapNextSetBit(schedMap, -1);
> + sched = func(def, nextprio);
> + priority = sched->priority;
> +
> + ignore_value(virBitmapSetBit(prioMap, nextprio));
> +
> + while ((nextprio = virBitmapNextSetBit(schedMap, nextprio)) > -1) {
> + sched = func(def, nextprio);
> + if (sched->priority == priority)
> + ignore_value(virBitmapSetBit(prioMap, nextprio));
> + }
> +
> + currentMap = prioMap;
> + break;
> + }
> +
> + /* now we have the complete group */
> + if (!(tmp = virBitmapFormat(currentMap)))
> + goto cleanup;
> +
> + virBufferAsprintf(buf,
> + "<%ssched %ss='%s' scheduler='%s'",
> + name, name, tmp,
> + virProcessSchedPolicyTypeToString(i));
This format works right because the passed name is "vcpu" or "iothread"
> + VIR_FREE(tmp);
> +
> + if (hasPriority)
> + virBufferAsprintf(buf, " priority='%d'", priority);
> +
> + virBufferAddLit(buf, "/>\n");
> +
> + /* subtract all vCPUs that were already found */
> + virBitmapSubtract(schedMap, currentMap);
> + }
> + }
This is one heckuva loop - I hope others can look as well because my
eyes and brain decided to run in the other direction ;-)
> +
> + ret = 0;
> +
> + cleanup:
> + virBitmapFree(schedMap);
> + virBitmapFree(prioMap);
> + return ret;
> +}
> +
> +
> +static int
> +virDomainFormatVcpuSchedDef(virDomainDefPtr def,
> + virBufferPtr buf)
> +{
> + virBitmapPtr allcpumap;
> + int ret;
> +
> + if (virDomainDefGetVcpusMax(def) == 0)
> + return 0;
Hmm... a zero for maxvcpus... In patch 2 we disallow machines with 0
vcpus online... Just a strange check.
> +
> + if (!(allcpumap = virBitmapNew(virDomainDefGetVcpusMax(def))))
use of same function - although I assume the compiler will optimize that
for you anyway...
> + return -1;
> +
> + virBitmapSetAll(allcpumap);
> +
> + ret = virDomainFormatSchedDef(def, buf, "vcpu", virDomainDefGetVcpuSched,
> + allcpumap);
This differs slightly from Parse which uses "vcpus" - I'm wondering if
it should be consistent. At the very least documented...
> +
> + virBitmapFree(allcpumap);
> + return ret;
> +}
> +
> +
> +static int
> virDomainCputuneDefFormat(virBufferPtr buf,
> virDomainDefPtr def)
> {
> @@ -21577,22 +21738,8 @@ virDomainCputuneDefFormat(virBufferPtr buf,
> VIR_FREE(cpumask);
> }
>
> - for (i = 0; i < def->cputune.nvcpusched; i++) {
> - virDomainThreadSchedParamPtr sp = &def->cputune.vcpusched[i];
> - char *ids = NULL;
> -
> - if (!(ids = virBitmapFormat(sp->ids)))
> - goto cleanup;
> -
> - virBufferAsprintf(&childrenBuf, "<vcpusched vcpus='%s' scheduler='%s'",
> - ids, virProcessSchedPolicyTypeToString(sp->policy));
> - VIR_FREE(ids);
> -
> - if (sp->policy == VIR_PROC_POLICY_FIFO ||
> - sp->policy == VIR_PROC_POLICY_RR)
> - virBufferAsprintf(&childrenBuf, " priority='%d'", sp->priority);
> - virBufferAddLit(&childrenBuf, "/>\n");
> - }
> + if (virDomainFormatVcpuSchedDef(def, &childrenBuf) < 0)
> + goto cleanup;
>
> for (i = 0; i < def->cputune.niothreadsched; i++) {
> virDomainThreadSchedParamPtr sp = &def->cputune.iothreadsched[i];
> diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
> index a2c8eac..85740bc 100644
> --- a/src/conf/domain_conf.h
> +++ b/src/conf/domain_conf.h
> @@ -2112,8 +2112,6 @@ struct _virDomainCputune {
> long long emulator_quota;
> virBitmapPtr emulatorpin;
>
> - size_t nvcpusched;
> - virDomainThreadSchedParamPtr vcpusched;
> size_t niothreadsched;
> virDomainThreadSchedParamPtr iothreadsched;
> };
> @@ -2125,6 +2123,9 @@ typedef virDomainVcpuInfo *virDomainVcpuInfoPtr;
> struct _virDomainVcpuInfo {
> bool online;
> virBitmapPtr cpumask;
> +
> + /* note: the sched.ids bitmap is unused so it doens't have to be cleared */
s/doens't/doesn't/
> + virDomainThreadSchedParam sched;
> };
>
> typedef struct _virDomainBlkiotune virDomainBlkiotune;
> @@ -2333,6 +2334,9 @@ unsigned int virDomainDefGetVcpus(const virDomainDef *def);
> virBitmapPtr virDomainDefGetVcpumap(const virDomainDef *def);
> virDomainVcpuInfoPtr virDomainDefGetVcpu(virDomainDefPtr def, unsigned int vcpu)
> ATTRIBUTE_RETURN_CHECK;
> +virDomainThreadSchedParamPtr virDomainDefGetVcpuSched(virDomainDefPtr def,
> + unsigned int vcpu)
> + ATTRIBUTE_RETURN_CHECK;
>
Not in libvirt_private.syms... So far this isn't external to
domain_conf.c - so probably should be static to there. Concern is
calling from driver/qemu code could result in returning an 'offline'
definition.
> unsigned long long virDomainDefGetMemoryInitial(const virDomainDef *def);
> void virDomainDefSetMemoryTotal(virDomainDefPtr def, unsigned long long size);
> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> index f253248..dfed936 100644
> --- a/src/qemu/qemu_driver.c
> +++ b/src/qemu/qemu_driver.c
> @@ -4713,9 +4713,9 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver,
> }
> }
>
> - if (qemuProcessSetSchedParams(vcpu, vcpupid,
> - vm->def->cputune.nvcpusched,
> - vm->def->cputune.vcpusched) < 0)
> + if (vcpuinfo->sched.policy != VIR_PROC_POLICY_NONE &&
> + virProcessSetScheduler(vcpupid, vcpuinfo->sched.policy,
> + vcpuinfo->sched.priority) < 0)
> goto cleanup;
>
> ret = 0;
> diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
> index c0043c9..3c1c6d8 100644
> --- a/src/qemu/qemu_process.c
> +++ b/src/qemu/qemu_process.c
> @@ -2287,12 +2287,12 @@ qemuProcessSetSchedulers(virDomainObjPtr vm)
> for (i = 0; i < virDomainDefGetVcpusMax(vm->def); i++) {
> virDomainVcpuInfoPtr vcpu = virDomainDefGetVcpu(vm->def, i);
>
> - if (!vcpu->online)
> + if (!vcpu->online ||
> + vcpu->sched.policy == VIR_PROC_POLICY_NONE)
> continue;
>
> - if (qemuProcessSetSchedParams(i, qemuDomainGetVcpuPid(vm, i),
> - vm->def->cputune.nvcpusched,
> - vm->def->cputune.vcpusched) < 0)
> + if (virProcessSetScheduler(qemuDomainGetVcpuPid(vm, i),
> + vcpu->sched.policy, vcpu->sched.priority) < 0)
> return -1;
> }
>
More information about the libvir-list
mailing list