[libvirt] [PATCHv5 1/8] qemu: bulk stats: extend internal collection API

Francesco Romani fromani at redhat.com
Mon Sep 15 08:48:04 UTC 2014


Future patches which will implement more
bulk stats groups for QEMU will need to access
the connection object.

To accomodate that, a few changes are needed:

* enrich internal prototype to pass qemu driver object.
* add per-group flag to mark if one collector needs
  monitor access or not.
* if at least one collector of the requested stats
  needs monitor access, thus we must start a query job
  for each domain. The specific collectors will
  run nested monitor jobs inside that.
* although requested, monitor could be not available.
  pass a flag to workers to signal the availability
  of monitor, in order to gather as much data as
  is possible anyway.

Signed-off-by: Francesco Romani <fromani at redhat.com>
---
 src/qemu/qemu_driver.c | 62 +++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 54 insertions(+), 8 deletions(-)

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 73edda3..39e9d27 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -17356,7 +17356,8 @@ qemuConnectGetDomainCapabilities(virConnectPtr conn,
 
 
 static int
-qemuDomainGetStatsState(virDomainObjPtr dom,
+qemuDomainGetStatsState(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
+                        virDomainObjPtr dom,
                         virDomainStatsRecordPtr record,
                         int *maxparams,
                         unsigned int privflags ATTRIBUTE_UNUSED)
@@ -17379,8 +17380,17 @@ qemuDomainGetStatsState(virDomainObjPtr dom,
 }
 
 
+typedef enum {
+    QEMU_DOMAIN_STATS_HAVE_MONITOR = (1 << 0), /* QEMU monitor available */
+} qemuDomainStatsFlags;
+
+
+#define HAVE_MONITOR(flags) ((flags) & QEMU_DOMAIN_STATS_HAVE_MONITOR)
+
+
 typedef int
-(*qemuDomainGetStatsFunc)(virDomainObjPtr dom,
+(*qemuDomainGetStatsFunc)(virQEMUDriverPtr driver,
+                          virDomainObjPtr dom,
                           virDomainStatsRecordPtr record,
                           int *maxparams,
                           unsigned int flags);
@@ -17388,11 +17398,12 @@ typedef int
 struct qemuDomainGetStatsWorker {
     qemuDomainGetStatsFunc func;
     unsigned int stats;
+    bool monitor;
 };
 
 static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[] = {
-    { qemuDomainGetStatsState, VIR_DOMAIN_STATS_STATE},
-    { NULL, 0 }
+    { qemuDomainGetStatsState, VIR_DOMAIN_STATS_STATE, false },
+    { NULL, 0, false }
 };
 
 
@@ -17424,6 +17435,20 @@ qemuDomainGetStatsCheckSupport(unsigned int *stats,
 }
 
 
+static bool
+qemuDomainGetStatsNeedMonitor(unsigned int stats)
+{
+    size_t i;
+
+    for (i = 0; qemuDomainGetStatsWorkers[i].func; i++)
+        if (stats & qemuDomainGetStatsWorkers[i].stats)
+            if (qemuDomainGetStatsWorkers[i].monitor)
+                return true;
+
+    return false;
+}
+
+
 static int
 qemuDomainGetStats(virConnectPtr conn,
                    virDomainObjPtr dom,
@@ -17441,8 +17466,8 @@ qemuDomainGetStats(virConnectPtr conn,
 
     for (i = 0; qemuDomainGetStatsWorkers[i].func; i++) {
         if (stats & qemuDomainGetStatsWorkers[i].stats) {
-            if (qemuDomainGetStatsWorkers[i].func(dom, tmp, &maxparams,
-                                                  flags) < 0)
+            if (qemuDomainGetStatsWorkers[i].func(conn->privateData, dom, tmp,
+                                                  &maxparams, flags) < 0)
                 goto cleanup;
         }
     }
@@ -17481,6 +17506,8 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
     int nstats = 0;
     size_t i;
     int ret = -1;
+    unsigned int privflags = 0;
+    unsigned int domflags = 0;
 
     if (ndoms)
         virCheckFlags(VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS, -1);
@@ -17515,7 +17542,11 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
     if (VIR_ALLOC_N(tmpstats, ndoms + 1) < 0)
         goto cleanup;
 
+    if (qemuDomainGetStatsNeedMonitor(stats))
+        privflags |= QEMU_DOMAIN_STATS_HAVE_MONITOR;
+
     for (i = 0; i < ndoms; i++) {
+        domflags = privflags;
         virDomainStatsRecordPtr tmp = NULL;
 
         if (!(dom = qemuDomObjFromDomain(doms[i])))
@@ -17525,12 +17556,22 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
             !virConnectGetAllDomainStatsCheckACL(conn, dom->def))
             continue;
 
-        if (qemuDomainGetStats(conn, dom, stats, &tmp, flags) < 0)
-            goto cleanup;
+        if (HAVE_MONITOR(domflags) &&
+             qemuDomainObjBeginJob(driver, dom, QEMU_JOB_QUERY) < 0)
+            /* As it was never requested. Gather as much as possible anyway. */
+            domflags &= ~QEMU_DOMAIN_STATS_HAVE_MONITOR;
+
+        if (qemuDomainGetStats(conn, dom, stats, &tmp, domflags) < 0)
+            goto endjob;
 
         if (tmp)
             tmpstats[nstats++] = tmp;
 
+        if (HAVE_MONITOR(domflags) && !qemuDomainObjEndJob(driver, dom)) {
+            dom = NULL;
+            goto cleanup;
+        }
+
         virObjectUnlock(dom);
         dom = NULL;
     }
@@ -17540,6 +17581,11 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
 
     ret = nstats;
 
+ endjob:
+    if (HAVE_MONITOR(domflags) && dom)
+        if (!qemuDomainObjEndJob(driver, dom))
+            dom = NULL;
+
  cleanup:
     if (dom)
         virObjectUnlock(dom);
-- 
1.9.3




More information about the libvir-list mailing list