[libvirt] [PATCH 2/2] qemu: migration: show disks stats for nbd migration

Nikolay Shirokovskiy nshirokovskiy at virtuozzo.com
Tue Dec 20 07:05:50 UTC 2016


There is no disks statistics when migrating with VIR_MIGRATE_NON_SHARED_*
for qemu that supports nbd. The reason is that disks are copied via disk mirroring
and not in the scope of migration job itself. Let's enhance qemuMigrationFetchJobStatus
to take mirror jobs into account.

Now qemuMigrationFetchJobStatus fetches both migration and nbd stats and we
need to skip fetching migration stats when migration is not running for
virDomainGetJob{Info, Stats} paths. For this purpose check_status flag
is added to qemuMigrationFetchJobStatus.

Another tricky part is that on post copy migration stats are updated
again on confirm phase. At this time no block jobs are active and
thus disks statistics gathered earlier will not be overwritten.

'total' field is set from 'end' field of block job info for the
sake of simplicity. This is true only when there is no guest disk
activity during migration. If there is an activity then 'end' will
grow while 'total' is an estimation that should stay constant.
I guess this can be fixed by setting 'total' to disk 'capacity'.

There is also known possible corner case issue with this implementation.
There is a chance that client asking for stats at the process of the
mirroring stopping on successfull migration will see only part of mirroring disks
and thus will receive inconsisent partial info.
---
 docs/news.html.in         |  4 +++
 src/qemu/qemu_driver.c    |  5 ++-
 src/qemu/qemu_migration.c | 88 ++++++++++++++++++++++++++++++++++++++++++-----
 src/qemu/qemu_migration.h |  3 +-
 4 files changed, 88 insertions(+), 12 deletions(-)

diff --git a/docs/news.html.in b/docs/news.html.in
index 5a34674..7384f0a 100644
--- a/docs/news.html.in
+++ b/docs/news.html.in
@@ -42,6 +42,10 @@
           cpu cycles, stalled backend cpu cycles, and ref cpu
           cycles by applications running on the platform
           </li>
+          <li>qemu: show disks stats for nbd disks migration<br/>
+          Show disks stats in migrations stats in disks copy phase
+          of migration with non-shared disks.
+          </li>
         </ul>
       </li>
       <li><strong>Bug fixes</strong>
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 1a46433..c33a8ce 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -13010,8 +13010,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver,
     if (completed)
         fetch = false;
 
-    /* Do not ask QEMU if migration is not even running yet  */
-    if (!priv->job.current || !priv->job.current->stats.status)
+    if (!priv->job.current)
         fetch = false;
 
     if (fetch) {
@@ -13050,7 +13049,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver,
         jobInfo->type == VIR_DOMAIN_JOB_UNBOUNDED) {
         if (fetch)
             ret = qemuMigrationFetchJobStatus(driver, vm, QEMU_ASYNC_JOB_NONE,
-                                              jobInfo);
+                                              jobInfo, true);
         else
             ret = qemuDomainJobInfoUpdateTime(jobInfo);
     } else {
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 0f4a6cf..9bc7dcc 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -2579,26 +2579,98 @@ qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo)
     }
 }
 
+static int
+qemuMigrationFetchMirrorStats(virQEMUDriverPtr driver,
+                              virDomainObjPtr vm,
+                              qemuDomainAsyncJob asyncJob,
+                              qemuMonitorMigrationStatsPtr stats)
+{
+    size_t i;
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+    bool nbd = false;
+    virHashTablePtr blockinfo = NULL;
 
-int
-qemuMigrationFetchJobStatus(virQEMUDriverPtr driver,
-                            virDomainObjPtr vm,
-                            qemuDomainAsyncJob asyncJob,
-                            qemuDomainJobInfoPtr jobInfo)
+    for (i = 0; i < vm->def->ndisks; i++) {
+        virDomainDiskDefPtr disk = vm->def->disks[i];
+        if (QEMU_DOMAIN_DISK_PRIVATE(disk)->migrating) {
+            nbd = true;
+            break;
+        }
+    }
+
+    if (!nbd)
+        return 0;
+
+    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
+        return -1;
+
+    blockinfo = qemuMonitorGetAllBlockJobInfo(priv->mon);
+
+    if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockinfo)
+        return -1;
+
+    for (i = 0; i < vm->def->ndisks; i++) {
+        virDomainDiskDefPtr disk = vm->def->disks[i];
+        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
+        qemuMonitorBlockJobInfoPtr data;
+
+        if (!diskPriv->migrating)
+            continue;
+
+        if (!(data = virHashLookup(blockinfo, disk->info.alias)))
+            continue;
+
+        stats->disk_transferred += data->cur;
+        stats->disk_total += data->end;
+        stats->disk_remaining += data->end - data->cur;
+    }
+
+    virHashFree(blockinfo);
+    return 0;
+}
+
+static int
+qemuMigrationFetchJobStats(virQEMUDriverPtr driver,
+                           virDomainObjPtr vm,
+                           qemuDomainAsyncJob asyncJob,
+                           qemuDomainJobInfoPtr jobInfo,
+                           bool check_status)
 {
     qemuDomainObjPrivatePtr priv = vm->privateData;
     int rv;
 
+    if (check_status && !priv->job.current->stats.status)
+        return 0;
+
     if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
         return -1;
 
-    memset(&jobInfo->stats, 0, sizeof(jobInfo->stats));
     rv = qemuMonitorGetMigrationStats(priv->mon, &jobInfo->stats);
 
     if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0)
         return -1;
 
     qemuMigrationUpdateJobType(jobInfo);
+
+    return 0;
+}
+
+int
+qemuMigrationFetchJobStatus(virQEMUDriverPtr driver,
+                            virDomainObjPtr vm,
+                            qemuDomainAsyncJob asyncJob,
+                            qemuDomainJobInfoPtr jobInfo,
+                            bool check_status)
+{
+    memset(&jobInfo->stats, 0, sizeof(jobInfo->stats));
+
+    if (qemuMigrationFetchJobStats(driver, vm, asyncJob, jobInfo,
+        check_status) < 0)
+        return -1;
+
+    if (qemuMigrationFetchMirrorStats(driver, vm, asyncJob, &jobInfo->stats) < 0)
+        return -1;
+
     return qemuDomainJobInfoUpdateTime(jobInfo);
 }
 
@@ -2630,7 +2702,7 @@ qemuMigrationUpdateJobStatus(virQEMUDriverPtr driver,
     qemuDomainJobInfoPtr jobInfo = priv->job.current;
     qemuDomainJobInfo newInfo = *jobInfo;
 
-    if (qemuMigrationFetchJobStatus(driver, vm, asyncJob, &newInfo) < 0)
+    if (qemuMigrationFetchJobStatus(driver, vm, asyncJob, &newInfo, false) < 0)
         return -1;
 
     *jobInfo = newInfo;
@@ -4240,7 +4312,7 @@ qemuMigrationConfirmPhase(virQEMUDriverPtr driver,
             reason == VIR_DOMAIN_PAUSED_POSTCOPY &&
             qemuMigrationFetchJobStatus(driver, vm,
                                         QEMU_ASYNC_JOB_MIGRATION_OUT,
-                                        jobInfo) < 0)
+                                        jobInfo, false) < 0)
             VIR_WARN("Could not refresh migration statistics");
 
         qemuDomainJobInfoUpdateTime(jobInfo);
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index 14c6178..59a4bbc 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -248,7 +248,8 @@ int qemuMigrationCancel(virQEMUDriverPtr driver,
 int qemuMigrationFetchJobStatus(virQEMUDriverPtr driver,
                                 virDomainObjPtr vm,
                                 qemuDomainAsyncJob asyncJob,
-                                qemuDomainJobInfoPtr jobInfo);
+                                qemuDomainJobInfoPtr jobInfo,
+                                bool check_status);
 
 int qemuMigrationErrorInit(virQEMUDriverPtr driver);
 void qemuMigrationErrorSave(virQEMUDriverPtr driver,
-- 
1.8.3.1




More information about the libvir-list mailing list