[PATCH 09/11] qemu: use snapshot-save for modern qemu to create snapshot

Nikolay Shirokovskiy nikolay.shirokovskiy at openvz.org
Thu Mar 31 11:19:19 UTC 2022


Signed-off-by: Nikolay Shirokovskiy <nikolay.shirokovskiy at openvz.org>
---
 src/qemu/qemu_process.c  |  11 +++
 src/qemu/qemu_snapshot.c | 179 +++++++++++++++++++++++++++++++++++++--
 2 files changed, 182 insertions(+), 8 deletions(-)

diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 9918423701..6ed7eaaa83 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -958,6 +958,17 @@ qemuProcessHandleJobStatusChange(qemuMonitor *mon G_GNUC_UNUSED,
               jobname, vm, vm->def->name,
               qemuMonitorJobStatusTypeToString(status), status);
 
+    if (STREQ(jobname, "snapshot-save") ||
+        STREQ(jobname, "snapshot-delete") ||
+        STREQ(jobname, "snapshot-load")) {
+        if (status == QEMU_MONITOR_JOB_STATUS_CONCLUDED && priv->job.current) {
+            priv->job.current->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
+            virDomainObjBroadcast(vm);
+        }
+
+        goto cleanup;
+    }
+
     if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) {
         VIR_DEBUG("job '%s' handled by old blockjob handler", jobname);
         goto cleanup;
diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
index 54eafb5020..9f81befe85 100644
--- a/src/qemu/qemu_snapshot.c
+++ b/src/qemu/qemu_snapshot.c
@@ -142,6 +142,131 @@ qemuSnapshotForEachQcow2(virQEMUDriver *driver,
 }
 
 
+static GPtrArray *
+qemuSnapshotGetDisksNodes(virDomainSnapshotDef *snapdef,
+                          virDomainDef *def,
+                          const char **memoryNode)
+{
+    g_autoptr(GPtrArray) devices = g_ptr_array_new();
+    size_t i;
+
+    if (memoryNode)
+        *memoryNode = NULL;
+
+    for (i = 0; i < snapdef->ndisks; i++) {
+        if (snapdef->disks[i].snapshot == VIR_DOMAIN_SNAPSHOT_LOCATION_INTERNAL)
+            g_ptr_array_add(devices, def->disks[i]->src->nodeformat);
+
+        if (memoryNode && STREQ(snapdef->memorydisk, snapdef->disks[i].name))
+            *memoryNode = def->disks[i]->src->nodeformat;
+    }
+
+    if (memoryNode && !*memoryNode) {
+        virReportError(VIR_ERR_INTERNAL_ERROR,
+                       _("cannot find vmstate disk '%s'"), snapdef->memorydisk);
+        return NULL;
+    }
+
+    return g_steal_pointer(&devices);
+}
+
+
+static int
+qemuSnapshotDismissJob(virQEMUDriver *driver,
+                       virDomainObj *vm,
+                       virDomainAsyncJob asyncJob,
+                       const char *jobid)
+{
+    qemuDomainObjPrivate *priv = vm->privateData;
+    int rc;
+
+    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
+        return -1;
+
+    rc = qemuMonitorJobDismiss(priv->mon, jobid);
+    qemuDomainObjExitMonitor(vm);
+    if (rc < 0)
+        return -1;
+
+    return 0;
+}
+
+
+static int
+qemuSnapshotWaitJob(virQEMUDriver *driver,
+                    virDomainObj *vm,
+                    virDomainAsyncJob asyncJob,
+                    const char *jobid)
+{
+    qemuDomainObjPrivate *priv = vm->privateData;
+    qemuMonitorJobInfo **jobs = NULL;
+    size_t njobs = 0;
+    qemuMonitorJobInfo *job = NULL;
+    int ret = -1;
+    size_t i;
+    int rc;
+
+    while (priv->job.current->status != VIR_DOMAIN_JOB_STATUS_COMPLETED) {
+        /*
+         * We can't do much if wait fails and if domain is still active as in
+         * order to cleanup we need to call job-cancel and again wait for
+         * concluded state.
+         */
+        if (virDomainObjWait(vm) < 0)
+            return -1;
+    }
+
+    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
+        goto cleanup;
+    rc = qemuMonitorGetJobInfo(priv->mon, &jobs, &njobs);
+    qemuDomainObjExitMonitor(vm);
+    if (rc < 0)
+        goto cleanup;
+
+    for (i = 0; i < njobs; i++) {
+        if (STREQ_NULLABLE(jobs[i]->id, jobid)) {
+            job = jobs[i];
+            break;
+        }
+    }
+
+    if (!job) {
+        virReportError(VIR_ERR_INTERNAL_ERROR,
+                       _("cannot obtain status of '%s' job"), jobid);
+        goto cleanup;
+    }
+    if (job->status != QEMU_MONITOR_JOB_STATUS_CONCLUDED) {
+        virReportError(VIR_ERR_INTERNAL_ERROR,
+                       _("unexpected '%s' job status '%s'"), jobid,
+                       qemuMonitorJobStatusTypeToString(job->status));
+        goto cleanup;
+    }
+    if (job->error) {
+        virReportError(VIR_ERR_OPERATION_FAILED,
+                       _("'%s' job failed '%s'"), jobid, job->error);
+        goto cleanup;
+    }
+
+    ret = 0;
+
+ cleanup:
+    if (virDomainObjIsActive(vm)) {
+        virErrorPtr err;
+
+        virErrorPreserveLast(&err);
+        if (qemuSnapshotDismissJob(driver, vm, asyncJob, jobid) < 0)
+            VIR_WARN("failed to dismiss job '%s'", jobid);
+        virErrorRestore(&err);
+    }
+
+    for (i = 0; i < njobs; i++)
+        qemuMonitorJobInfoFree(jobs[i]);
+    g_free(jobs);
+
+    return ret;
+}
+
+
 /* Discard one snapshot (or its metadata), without reparenting any children.  */
 static int
 qemuSnapshotDiscard(virQEMUDriver *driver,
@@ -453,6 +578,38 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver,
 }
 
 
+static int
+qemuSnapshotCreateActiveInternalRun(virQEMUDriver *driver,
+                                    virDomainObj *vm,
+                                    virDomainMomentObj *snap)
+{
+    qemuDomainObjPrivate *priv = vm->privateData;
+    virDomainSnapshotDef *snapdef = virDomainSnapshotObjGetDef(snap);
+    g_autoptr(GPtrArray) devices = g_ptr_array_new();
+    const char *memoryNode;
+    int rc;
+
+    if (!(devices = qemuSnapshotGetDisksNodes(snapdef, vm->def, &memoryNode)))
+        return -1;
+
+    if (qemuDomainObjEnterMonitorAsync(driver, vm,
+                                       VIR_ASYNC_JOB_SNAPSHOT) < 0)
+        return -1;
+    rc = qemuMonitorSnapshotSave(priv->mon,
+                                 "snapshot-save",
+                                 snap->def->name,
+                                 memoryNode,
+                                 (const char **)devices->pdata,
+                                 devices->len);
+    qemuDomainObjExitMonitor(vm);
+    if (rc < 0)
+        return -1;
+
+    return qemuSnapshotWaitJob(driver, vm, VIR_ASYNC_JOB_SNAPSHOT,
+                               "snapshot-save");
+}
+
+
 /* The domain is expected to be locked and active. */
 static int
 qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
@@ -465,6 +622,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
     bool resume = false;
     virDomainSnapshotDef *snapdef = virDomainSnapshotObjGetDef(snap);
     bool halt = !!(flags & VIR_DOMAIN_SNAPSHOT_CREATE_HALT);
+    bool modern = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_SNAPSHOT_SAVE);
     int ret = -1;
 
     if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
@@ -478,14 +636,19 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
         resume = true;
     }
 
-    if (qemuDomainObjEnterMonitorAsync(driver, vm,
-                                       VIR_ASYNC_JOB_SNAPSHOT) < 0)
-        return -1;
-
-    ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name);
-    qemuDomainObjExitMonitor(vm);
-    if (ret < 0)
-        goto cleanup;
+    if (modern) {
+        if (qemuSnapshotCreateActiveInternalRun(driver, vm, snap) < 0)
+            goto cleanup;
+        ret = 0;
+    } else {
+        if (qemuDomainObjEnterMonitorAsync(driver, vm,
+                                           VIR_ASYNC_JOB_SNAPSHOT) < 0)
+            return -1;
+        ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name);
+        qemuDomainObjExitMonitor(vm);
+        if (ret < 0)
+            goto cleanup;
+    }
 
     if (!(snapdef->cookie = (virObject *) qemuDomainSaveCookieNew(vm)))
         goto cleanup;
-- 
2.35.1



More information about the libvir-list mailing list