<div dir="ltr"><br><div class="gmail_extra"><br><div class="gmail_quote">On Fri, Aug 5, 2016 at 7:26 PM, Peter Krempa <span dir="ltr"><<a href="mailto:pkrempa@redhat.com" target="_blank">pkrempa@redhat.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">For hotplug purposes it's necessary to retrieve data using<br>
query-hotpluggable-cpus while the old query-cpus API report thread IDs<br>
and order of hotplug.<br>
<br>
This patch adds code that merges the data using a rather non-trivial<br>
algorithm and fills the data to the qemuMonitorCPUInfo structure for<br>
adding to appropriate place in the domain definition.<br>
---<br>
src/qemu/qemu_domain.c | 2 +-<br>
src/qemu/qemu_monitor.c | 197 ++++++++++++++++++++++++++++++<wbr>++++++++++++++++--<br>
src/qemu/qemu_monitor.h | 23 +++++-<br>
3 files changed, 212 insertions(+), 10 deletions(-)<br>
<br>
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c<br>
index 0201a4a..1be3ede 100644<br>
--- a/src/qemu/qemu_domain.c<br>
+++ b/src/qemu/qemu_domain.c<br>
@@ -5776,7 +5776,7 @@ qemuDomainRefreshVcpuInfo(virQ<wbr>EMUDriverPtr driver,<br>
if (qemuDomainObjEnterMonitorAsyn<wbr>c(driver, vm, asyncJob) < 0)<br>
return -1;<br>
<br>
- rc = qemuMonitorGetCPUInfo(qemuDoma<wbr>inGetMonitor(vm), &info, maxvcpus);<br>
+ rc = qemuMonitorGetCPUInfo(qemuDoma<wbr>inGetMonitor(vm), &info, maxvcpus, false);<br>
<br>
if (qemuDomainObjExitMonitor(driv<wbr>er, vm) < 0)<br>
goto cleanup;<br>
diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c<br>
index f87f431..12a7fa6 100644<br>
--- a/src/qemu/qemu_monitor.c<br>
+++ b/src/qemu/qemu_monitor.c<br>
@@ -1656,13 +1656,36 @@ qemuMonitorSystemReset(qemuMon<wbr>itorPtr mon)<br>
}<br>
<br>
<br>
+static void<br>
+qemuMonitorCPUInfoClear(qemuM<wbr>onitorCPUInfoPtr cpus,<br>
+ size_t ncpus)<br>
+{<br>
+ size_t i;<br>
+<br>
+ for (i = 0; i < ncpus; i++) {<br>
+ cpus[i].id = 0;<br>
+ cpus[i].socket_id = -1;<br>
+ cpus[i].core_id = -1;<br>
+ cpus[i].thread_id = -1;<br>
+ cpus[i].vcpus = 0;<br>
+ cpus[i].tid = 0;<br>
+<br>
+ VIR_FREE(cpus[i].qom_path);<br>
+ VIR_FREE(cpus[i].alias);<br>
+ VIR_FREE(cpus[i].type);<br>
+ }<br>
+}<br>
+<br>
+<br>
void<br>
qemuMonitorCPUInfoFree(qemuMo<wbr>nitorCPUInfoPtr cpus,<br>
- size_t ncpus ATTRIBUTE_UNUSED)<br>
+ size_t ncpus)<br>
{<br>
if (!cpus)<br>
return;<br>
<br>
+ qemuMonitorCPUInfoClear(cpus, ncpus);<br>
+<br>
VIR_FREE(cpus);<br>
}<br>
<br>
@@ -1683,10 +1706,148 @@ qemuMonitorQueryCpusFree(struc<wbr>t qemuMonitorQueryCpusEntry *entries,<br>
<br>
<br>
/**<br>
+ * Legacy approach doesn't allow out of order cpus, thus no complex matching<br>
+ * algorithm is necessary */<br>
+static void<br>
+qemuMonitorGetCPUInfoLegacy(s<wbr>truct qemuMonitorQueryCpusEntry *cpuentries,<br>
+ size_t ncpuentries,<br>
+ qemuMonitorCPUInfoPtr vcpus,<br>
+ size_t maxvcpus)<br>
+{<br>
+ size_t i;<br>
+<br>
+ for (i = 0; i < ncpuentries && i < maxvcpus; i++) {<br>
+ if (i < ncpuentries)<br>
+ vcpus[i].tid = cpuentries[i].tid;<br>
+<br>
+ /* for legacy hotplug to work we need to fake the vcpu count added by<br>
+ * enabling a given vcpu */<br>
+ vcpus[i].vcpus = 1;<br>
+ }<br>
+}<br>
+<br>
+<br>
+/**<br>
+ * qemuMonitorGetCPUInfoHotplug:<br>
+ *<br>
+ * This function stitches together data retrieved via query-hotpluggable-cpus<br>
+ * which returns entities on the hotpluggable level (which may describe more<br>
+ * than one guest logical vcpu) with the output of query-cpus, having an entry<br>
+ * per enabled guest logical vcpu.<br>
+ *<br>
+ * query-hotpluggable-cpus conveys following information:<br>
+ * - topology information and number of logical vcpus this entry creates<br>
+ * - device type name of the entry that needs to be used when hotplugging<br>
+ * - qom path in qemu which can be used to map the entry against query-cpus<br>
+ *<br>
+ * query-cpus conveys following information:<br>
+ * - thread id of a given guest logical vcpu<br>
+ * - order in which the vcpus were inserted<br>
+ * - qom path to allow mapping the two together<br>
+ *<br>
+ * The libvirt's internal structure has an entry for each possible (even<br>
+ * disabled) guest vcpu. The purpose is to map the data together so that we are<br>
+ * certain of the thread id mapping and the information required for vcpu<br>
+ * hotplug.<br>
+ *<br>
+ * This function returns 0 on success and -1 on error, but does not report<br>
+ * libvirt errors so that fallback approach can be used.<br>
+ */<br>
+static int<br>
+qemuMonitorGetCPUInfoHotplug(<wbr>struct qemuMonitorQueryHotpluggableCp<wbr>usEntry *hotplugvcpus,<br>
+ size_t nhotplugvcpus,<br>
+ struct qemuMonitorQueryCpusEntry *cpuentries,<br>
+ size_t ncpuentries,<br>
+ qemuMonitorCPUInfoPtr vcpus,<br>
+ size_t maxvcpus)<br>
+{<br>
+ int order = 1;<br>
+ size_t totalvcpus = 0;<br>
+ size_t i;<br>
+ size_t j;<br>
+<br>
+ /* ensure that the total vcpu count reported by query-hotpluggable-cpus equals<br>
+ * to the libvirt maximum cpu count */<br>
+ for (i = 0; i < nhotplugvcpus; i++)<br>
+ totalvcpus += hotplugvcpus[i].vcpus;<br>
+<br>
+ if (totalvcpus != maxvcpus) {<br>
+ VIR_DEBUG("expected '%zu' total vcpus got '%zu'", maxvcpus, totalvcpus);<br>
+ return -1;<br>
+ }<br>
+<br>
+ /* Note the order in which the hotpluggable entities are inserted by<br>
+ * matching them to the query-cpus entries */<br>
+ for (i = 0; i < ncpuentries; i++) {<br>
+ for (j = 0; j < nhotplugvcpus; j++) {<br>
+ if (!cpuentries[i].qom_path ||<br>
+ !hotplugvcpus[j].qom_path ||<br>
+ !STRPREFIX(cpuentries[i].qom_p<wbr>ath, hotplugvcpus[j].qom_path))<br>
+ continue;<br>
+<br>
+ /* add ordering info for hotpluggable entries */<br>
+ if (hotplugvcpus[j].enable_id == 0)<br>
+ hotplugvcpus[j].enable_id = order++;<br>
+<br>
+ break;<br>
+ }<br>
+ }<br>
+<br>
+ /* transfer appropriate data from the hotpluggable list to corresponding<br>
+ * entries. the entries returned by qemu may in fact describe multiple<br>
+ * logical vcpus in the guest */<br>
+ j = 0;<br>
+ for (i = 0; i < nhotplugvcpus; i++) {<br>
+ vcpus[j].socket_id = hotplugvcpus[i].socket_id;<br>
+ vcpus[j].core_id = hotplugvcpus[i].core_id;<br>
+ vcpus[j].thread_id = hotplugvcpus[i].thread_id;<br>
+ vcpus[j].vcpus = hotplugvcpus[i].vcpus;<br>
+ VIR_STEAL_PTR(vcpus[j].qom_pat<wbr>h, hotplugvcpus[i].qom_path);<br>
+ VIR_STEAL_PTR(vcpus[j].alias, hotplugvcpus[i].alias);<br>
+ VIR_STEAL_PTR(vcpus[j].type, hotplugvcpus[i].type);<br>
+ vcpus[j].id = hotplugvcpus[i].enable_id;<br>
+<br>
+ /* skip over vcpu entries covered by this hotpluggable entry */<br>
+ j += hotplugvcpus[i].vcpus;<br>
+ }<br>
+<br>
+ /* match entries from query cpus to the output array taking into account<br>
+ * multi-vcpu objects */<br>
+ for (j = 0; j < ncpuentries; j++) {<br>
+ /* find the correct entry or beginning of group of entries */<br>
+ for (i = 0; i < maxvcpus; i++) {<br>
+ if (cpuentries[j].qom_path && vcpus[i].qom_path &&<br>
+ STRPREFIX(cpuentries[j].qom_pa<wbr>th, vcpus[i].qom_path))<br>
+ break;<br>
+ }<br>
+<br>
+ if (i == maxvcpus) {<br>
+ VIR_DEBUG("too many query-vcpus entries for a given "<br></blockquote><div> </div><div>s/query-vcpus/query-cpus <br><br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
+ "query-hotpluggable-cpus entry");<br>
+ return -1;<br>
+ }<br>
+<br>
+ if (vcpus[i].vcpus != 1) {<br>
+ /* find a possibly empty vcpu thread for core granularity systems */<br>
+ for (; i < maxvcpus; i++) {<br>
+ if (vcpus[i].tid == 0)<br>
+ break;<br>
+ }<br>
+ }<br>
+<br>
+ vcpus[i].tid = cpuentries[j].tid;<br>
+ }<br>
+<br>
+ return 0;<br>
+}<br>
+<br>
+<br>
+/**<br>
* qemuMonitorGetCPUInfo:<br>
* @mon: monitor<br>
* @cpus: pointer filled by array of qemuMonitorCPUInfo structures<br>
* @maxvcpus: total possible number of vcpus<br>
+ * @hotplug: query data relevant for hotplug support<br>
*<br>
* Detects VCPU information. If qemu doesn't support or fails reporting<br>
* information this function will return success as other parts of libvirt<br>
@@ -1698,20 +1859,32 @@ qemuMonitorQueryCpusFree(struc<wbr>t qemuMonitorQueryCpusEntry *entries,<br>
int<br>
qemuMonitorGetCPUInfo(qemuMon<wbr>itorPtr mon,<br>
qemuMonitorCPUInfoPtr *vcpus,<br>
- size_t maxvcpus)<br>
+ size_t maxvcpus,<br>
+ bool hotplug)<br>
{<br>
- qemuMonitorCPUInfoPtr info = NULL;<br>
+ struct qemuMonitorQueryHotpluggableCp<wbr>usEntry *hotplugcpus = NULL;<br>
+ size_t nhotplugcpus = 0;<br>
struct qemuMonitorQueryCpusEntry *cpuentries = NULL;<br>
size_t ncpuentries = 0;<br>
- size_t i;<br>
int ret = -1;<br>
int rc;<br>
+ qemuMonitorCPUInfoPtr info = NULL;<br>
<br>
- QEMU_CHECK_MONITOR(mon);<br>
+ if (hotplug)<br>
+ QEMU_CHECK_MONITOR_JSON(mon);<br>
+ else<br>
+ QEMU_CHECK_MONITOR(mon);<br>
<br>
if (VIR_ALLOC_N(info, maxvcpus) < 0)<br>
return -1;<br>
<br>
+ /* initialize a few non-zero defaults */<br>
+ qemuMonitorCPUInfoClear(info, maxvcpus);<br>
+<br>
+ if (hotplug &&<br>
+ (qemuMonitorJSONGetHotpluggabl<wbr>eCPUs(mon, &hotplugcpus, &nhotplugcpus)) < 0)<br>
+ goto cleanup;<br>
+<br>
if (mon->json)<br>
rc = qemuMonitorJSONQueryCPUs(mon, &cpuentries, &ncpuentries);<br>
else<br>
@@ -1726,15 +1899,23 @@ qemuMonitorGetCPUInfo(qemuMoni<wbr>torPtr mon,<br>
goto cleanup;<br>
}<br>
<br>
- for (i = 0; i < ncpuentries; i++)<br>
- info[i].tid = cpuentries[i].tid;<br>
+ if (!hotplugcpus ||<br>
+ qemuMonitorGetCPUInfoHotplug(h<wbr>otplugcpus, nhotplugcpus,<br>
+ cpuentries, ncpuentries,<br>
+ info, maxvcpus) < 0) {<br>
+ /* Fallback to the legacy algorithm. Hotplug paths will make sure that<br>
+ * the apropriate data is present */<br>
+ qemuMonitorCPUInfoClear(info, maxvcpus);<br>
+ qemuMonitorGetCPUInfoLegacy(cp<wbr>uentries, ncpuentries, info, maxvcpus);<br>
+ }<br>
<br>
VIR_STEAL_PTR(*vcpus, info);<br>
ret = 0;<br>
<br>
cleanup:<br>
- qemuMonitorCPUInfoFree(info, maxvcpus);<br>
+ qemuMonitorQueryHotpluggableCp<wbr>usFree(hotplugcpus, nhotplugcpus);<br>
qemuMonitorQueryCpusFree(cpue<wbr>ntries, ncpuentries);<br>
+ qemuMonitorCPUInfoFree(info, maxvcpus);<br>
return ret;<br>
}<br>
<br>
diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h<br>
index 58f8327..b838725 100644<br>
--- a/src/qemu/qemu_monitor.h<br>
+++ b/src/qemu/qemu_monitor.h<br>
@@ -409,6 +409,9 @@ struct qemuMonitorQueryHotpluggableCp<wbr>usEntry {<br>
int socket_id;<br>
int core_id;<br>
int thread_id;<br>
+<br>
+ /* internal data */<br>
+ int enable_id;<br>
};<br>
void qemuMonitorQueryHotpluggableCp<wbr>usFree(struct qemuMonitorQueryHotpluggableCp<wbr>usEntry *entries,<br>
size_t nentries);<br>
@@ -416,6 +419,23 @@ void qemuMonitorQueryHotpluggableCp<wbr>usFree(struct qemuMonitorQueryHotpluggableCp<wbr>u<br>
<br>
struct _qemuMonitorCPUInfo {<br>
pid_t tid;<br>
+ int id; /* order of enabling of the given cpu */<br>
+<br>
+ /* topology info for hotplug purposes. Hotplug of given vcpu impossible if<br>
+ * all entries are -1 */<br>
+ int socket_id;<br>
+ int core_id;<br>
+ int thread_id;<br>
+ unsigned int vcpus; /* number of vcpus added if given entry is hotplugged */<br>
+<br>
+ /* name of the qemu type to add in case of hotplug */<br>
+ char *type;<br>
+<br>
+ /* alias of an hotpluggable entry. Entries with alias can be hot-unplugged */<br>
+ char *alias;<br>
+<br>
+ /* internal for use in the matching code */<br>
+ char *qom_path;<br>
};<br>
typedef struct _qemuMonitorCPUInfo qemuMonitorCPUInfo;<br>
typedef qemuMonitorCPUInfo *qemuMonitorCPUInfoPtr;<br>
@@ -424,7 +444,8 @@ void qemuMonitorCPUInfoFree(qemuMon<wbr>itorCPUInfoPtr list,<br>
size_t nitems);<br>
int qemuMonitorGetCPUInfo(qemuMoni<wbr>torPtr mon,<br>
qemuMonitorCPUInfoPtr *vcpus,<br>
- size_t maxvcpus);<br>
+ size_t maxvcpus,<br>
+ bool hotplug);<br>
<br>
int qemuMonitorGetVirtType(qemuMon<wbr>itorPtr mon,<br>
virDomainVirtType *virtType);<br>
<span><font color="#888888">--<br>
2.9.2<br>
<br>
--<br>
libvir-list mailing list<br>
<a href="mailto:libvir-list@redhat.com" target="_blank">libvir-list@redhat.com</a><br>
<a href="https://www.redhat.com/mailman/listinfo/libvir-list" rel="noreferrer" target="_blank">https://www.redhat.com/mailman<wbr>/listinfo/libvir-list</a><br>
</font></span></blockquote></div><br></div></div>