[virt-tools-list] [PATCH 2/2] Adds support to VIR_DOMAIN_CPU_STATS_F_VCPU in qemu_driver.

Hu Tao hutao at cn.fujitsu.com
Wed Apr 18 11:14:56 UTC 2012


---
 src/qemu/qemu_driver.c |  152 ++++++++++++++++++++++++++++++++++++++++++-----
 src/util/cgroup.c      |    4 +-
 tools/virsh.c          |   17 ++++--
 3 files changed, 150 insertions(+), 23 deletions(-)

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 0d3b0bd..165b5f3 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -12377,19 +12377,110 @@ qemuDomainGetTotalcpuStats(virCgroupPtr group,
     return nparams;
 }
 
+/* get the cpu time from cpuacct cgroup group, saving
+   cpu time value in cpu_time. caller is responsible
+   for freeing memory allocated for cpu_time.
+   return 0 on success, -1 otherwise */
+static int getVcpuPercpuStats(virCgroupPtr group,
+                              unsigned long long **cpu_time,
+                              unsigned int *num)
+{
+    int ret = -1;
+    unsigned long long *ptime = NULL;
+    char *buf = NULL;
+    char *pos;
+    unsigned long long tmp;
+
+    if (virCgroupGetCpuacctPercpuUsage(group, &buf))
+        goto error;
+
+    pos = buf;
+    *num = 0;
+    while (virStrToLong_ull(pos, &pos, 10, &tmp) == 0)
+        (*num)++;
+
+    if (*num > 0) {
+        int i;
+
+        if (VIR_ALLOC_N(ptime, *num) < 0)
+            goto error;
+
+        pos = buf;
+        for (i = 0; i < *num; i++)
+            virStrToLong_ull(pos, &pos, 10, ptime + i);
+        *cpu_time = ptime;
+        ret = 0;
+    }
+error:
+    return ret;
+}
+
+static int getSumVcpuPercpuStats(virCgroupPtr group,
+                                 unsigned int nvcpu,
+                                 unsigned long long **sum_cpu_time,
+                                 unsigned int *num)
+{
+    unsigned long long *cpu_time[nvcpu];
+    unsigned int ncpu_time[nvcpu];
+    unsigned int max = 0;
+    unsigned long long *tmp = NULL;
+    virCgroupPtr group_vcpu = NULL;
+    int ret = -1;
+    int i, j;
+
+    for (i = 0; i < nvcpu; i++) {
+        ret = virCgroupForVcpu(group, i, &group_vcpu, 0);
+        if (ret < 0) {
+            qemuReportError(VIR_ERR_INTERNAL_ERROR,
+                            _("cpuacct parse error"));
+            goto error;
+        }
+        ret = getVcpuPercpuStats(group_vcpu,
+                                 &cpu_time[i],
+                                 &ncpu_time[i]);
+        if (ret < 0)
+            goto error;
+        if (max < ncpu_time[i])
+            max = ncpu_time[i];
+    }
+
+    if (max > 0) {
+        if (VIR_ALLOC_N(tmp, max) < 0)
+            goto error;
+
+        memset(tmp, 0, sizeof(*tmp) * max);
+        for (i = 0; i < nvcpu; i++) {
+            for (j = 0; j < ncpu_time[i]; j++)
+                tmp[j] += cpu_time[i][j];
+        }
+        *sum_cpu_time = tmp;
+        *num = max;
+        ret = 0;
+    }
+
+    for (i = 0; i < nvcpu; i++)
+        VIR_FREE(cpu_time[i]);
+
+error:
+    return ret;
+}
+
 static int
 qemuDomainGetPercpuStats(virDomainPtr domain,
+                         virDomainObjPtr vm,
                          virCgroupPtr group,
                          virTypedParameterPtr params,
                          unsigned int nparams,
                          int start_cpu,
-                         unsigned int ncpus)
+                         unsigned int ncpus,
+                         unsigned int flags)
 {
     char *map = NULL;
     int rv = -1;
     int i, max_id;
     char *pos;
     char *buf = NULL;
+    qemuDomainObjPrivatePtr priv = vm->privateData;
     virTypedParameterPtr ent;
     int param_idx;
 
@@ -12425,22 +12516,48 @@ qemuDomainGetPercpuStats(virDomainPtr domain,
     if (max_id - start_cpu > ncpus - 1)
         max_id = start_cpu + ncpus - 1;
 
-    for (i = 0; i <= max_id; i++) {
+    if (flags & VIR_DOMAIN_CPU_STATS_F_VCPU) {
+        unsigned long long *sum_cpu_time;
         unsigned long long cpu_time;
+        unsigned int n;
 
-        if (!map[i]) {
-            cpu_time = 0;
-        } else if (virStrToLong_ull(pos, &pos, 10, &cpu_time) < 0) {
-            qemuReportError(VIR_ERR_INTERNAL_ERROR,
-                            _("cpuacct parse error"));
-            goto cleanup;
+        getSumVcpuPercpuStats(group,
+                              priv->nvcpupids,
+                              &sum_cpu_time,
+                              &n);
+
+        for (i = 0; i <= max_id && i < n; i++) {
+            if (i < start_cpu)
+                continue;
+
+            if (!map[i])
+                cpu_time = 0;
+            else
+                cpu_time = sum_cpu_time[i];
+            if (virTypedParameterAssign(&params[(i - start_cpu) * nparams + param_idx],
+                                        VIR_DOMAIN_CPU_STATS_CPUTIME,
+                                        VIR_TYPED_PARAM_ULLONG,
+                                        cpu_time) < 0)
+                goto cleanup;
+        }
+    } else {
+        for (i = 0; i <= max_id; i++) {
+            unsigned long long cpu_time;
+
+            if (!map[i]) {
+                cpu_time = 0;
+            } else if (virStrToLong_ull(pos, &pos, 10, &cpu_time) < 0) {
+                qemuReportError(VIR_ERR_INTERNAL_ERROR,
+                                _("cpuacct parse error"));
+                goto cleanup;
+            }
+            if (i < start_cpu)
+                continue;
+            ent = &params[ (i - start_cpu) * nparams + param_idx];
+            if (virTypedParameterAssign(ent, VIR_DOMAIN_CPU_STATS_CPUTIME,
+                                        VIR_TYPED_PARAM_ULLONG, cpu_time) < 0)
+                goto cleanup;
         }
-        if (i < start_cpu)
-            continue;
-        ent = &params[ (i - start_cpu) * nparams + param_idx];
-        if (virTypedParameterAssign(ent, VIR_DOMAIN_CPU_STATS_CPUTIME,
-                                    VIR_TYPED_PARAM_ULLONG, cpu_time) < 0)
-            goto cleanup;
     }
     rv = param_idx + 1;
 cleanup:
@@ -12464,7 +12581,8 @@ qemuDomainGetCPUStats(virDomainPtr domain,
     int ret = -1;
     bool isActive;
 
-    virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1);
+    virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY |
+                  VIR_DOMAIN_CPU_STATS_F_VCPU, -1);
 
     qemuDriverLock(driver);
 
@@ -12497,8 +12615,8 @@ qemuDomainGetCPUStats(virDomainPtr domain,
     if (start_cpu == -1)
         ret = qemuDomainGetTotalcpuStats(group, params, nparams);
     else
-        ret = qemuDomainGetPercpuStats(domain, group, params, nparams,
-                                       start_cpu, ncpus);
+        ret = qemuDomainGetPercpuStats(domain, vm, group, params, nparams,
+                                       start_cpu, ncpus, flags);
 cleanup:
     virCgroupFree(&group);
     if (vm)
diff --git a/src/util/cgroup.c b/src/util/cgroup.c
index ad49bc2..5b32881 100644
--- a/src/util/cgroup.c
+++ b/src/util/cgroup.c
@@ -530,7 +530,9 @@ static int virCgroupMakeGroup(virCgroupPtr parent, virCgroupPtr group,
             continue;
 
         /* We need to control cpu bandwidth for each vcpu now */
-        if ((flags & VIR_CGROUP_VCPU) && (i != VIR_CGROUP_CONTROLLER_CPU)) {
+        if ((flags & VIR_CGROUP_VCPU) &&
+            (i != VIR_CGROUP_CONTROLLER_CPU &&
+             i != VIR_CGROUP_CONTROLLER_CPUACCT)) {
             /* treat it as unmounted and we can use virCgroupAddTask */
             VIR_FREE(group->controllers[i].mountPoint);
             continue;
diff --git a/tools/virsh.c b/tools/virsh.c
index 5009b6b..c952dde 100644
--- a/tools/virsh.c
+++ b/tools/virsh.c
@@ -5563,6 +5563,7 @@ static const vshCmdOptDef opts_cpu_stats[] = {
     {"total", VSH_OT_BOOL, 0, N_("Show total statistics only")},
     {"start", VSH_OT_INT, 0, N_("Show statistics from this CPU")},
     {"count", VSH_OT_INT, 0, N_("Number of shown CPUs at most")},
+    {"vcpu", VSH_OT_BOOL, 0, N_("Show vcpu statistics")},
     {NULL, 0, 0, NULL},
 };
 
@@ -5573,6 +5574,7 @@ cmdCPUStats(vshControl *ctl, const vshCmd *cmd)
     virTypedParameterPtr params = NULL;
     int i, j, pos, max_id, cpu = -1, show_count = -1, nparams;
     bool show_total = false, show_per_cpu = false;
+    unsigned int flags = 0;
 
     if (!vshConnectionUsability(ctl, ctl->conn))
         return false;
@@ -5580,6 +5582,11 @@ cmdCPUStats(vshControl *ctl, const vshCmd *cmd)
     if (!(dom = vshCommandOptDomain(ctl, cmd, NULL)))
         return false;
 
+    ;
+    if (vshCommandOptBool(cmd, "vcpu")) {
+        flags |= VIR_DOMAIN_CPU_STATS_F_VCPU;
+    }
+
     show_total = vshCommandOptBool(cmd, "total");
     if (vshCommandOptInt(cmd, "start", &cpu) > 0)
         show_per_cpu = true;
@@ -5600,13 +5607,13 @@ cmdCPUStats(vshControl *ctl, const vshCmd *cmd)
         cpu = 0;
 
     /* get number of cpus on the node */
-    if ((max_id = virDomainGetCPUStats(dom, NULL, 0, 0, 0, 0)) < 0)
+    if ((max_id = virDomainGetCPUStats(dom, NULL, 0, 0, 0, flags)) < 0)
         goto failed_stats;
     if (show_count < 0 || show_count > max_id)
         show_count = max_id;
 
     /* get percpu information */
-    if ((nparams = virDomainGetCPUStats(dom, NULL, 0, 0, 1, 0)) < 0)
+    if ((nparams = virDomainGetCPUStats(dom, NULL, 0, 0, 1, flags)) < 0)
         goto failed_stats;
 
     if (!nparams) {
@@ -5620,7 +5627,7 @@ cmdCPUStats(vshControl *ctl, const vshCmd *cmd)
     while (show_count) {
         int ncpus = MIN(show_count, 128);
 
-        if (virDomainGetCPUStats(dom, params, nparams, cpu, ncpus, 0) < 0)
+        if (virDomainGetCPUStats(dom, params, nparams, cpu, ncpus, flags) < 0)
             goto failed_stats;
 
         for (i = 0; i < ncpus; i++) {
@@ -5654,7 +5661,7 @@ do_show_total:
         goto cleanup;
 
     /* get supported num of parameter for total statistics */
-    if ((nparams = virDomainGetCPUStats(dom, NULL, 0, -1, 1, 0)) < 0)
+    if ((nparams = virDomainGetCPUStats(dom, NULL, 0, -1, 1, flags)) < 0)
         goto failed_stats;
 
     if (!nparams) {
@@ -5666,7 +5673,7 @@ do_show_total:
         goto failed_params;
 
     /* passing start_cpu == -1 gives us domain's total status */
-    if ((nparams = virDomainGetCPUStats(dom, params, nparams, -1, 1, 0)) < 0)
+    if ((nparams = virDomainGetCPUStats(dom, params, nparams, -1, 1, flags)) < 0)
         goto failed_stats;
 
     vshPrint(ctl, _("Total:\n"));
-- 
1.7.1




More information about the virt-tools-list mailing list