[libvirt] [PATCH v15] support offline migration

liguang lig.fnst at cn.fujitsu.com
Wed Nov 21 08:28:49 UTC 2012


original migration did not aware of offline case,
so, try to support offline migration quietly
(did not disturb original migration) by pass
VIR_MIGRATE_OFFLINE flag to migration APIs if only
the domain is really inactive, and
migration process will not puzzled by domain
offline and exit unexpectedly.
these changes did not take care of disk images the
domain required, for them could be transferred by
other APIs as suggested, then VIR_MIGRATE_OFFLINE
must not combined with VIR_MIGRATE_NON_SHARED_*.
and you must do a persistent migration at same time,
do "virsh migrate --offline --persistent ...".

Signed-off-by: liguang <lig.fnst at cn.fujitsu.com>
---
 include/libvirt/libvirt.h.in |    1 +
 src/libvirt.c                |    4 +
 src/qemu/qemu_driver.c       |   16 +++---
 src/qemu/qemu_migration.c    |  140 +++++++++++++++++++++++++++---------------
 src/qemu/qemu_migration.h    |    9 ++-
 tools/virsh-domain.c         |    5 ++
 tools/virsh.pod              |    5 +-
 7 files changed, 117 insertions(+), 63 deletions(-)

diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
index 49a361a..ea625b3 100644
--- a/include/libvirt/libvirt.h.in
+++ b/include/libvirt/libvirt.h.in
@@ -1092,6 +1092,7 @@ typedef enum {
                                                * whole migration process; this will be used automatically
                                                * when supported */
     VIR_MIGRATE_UNSAFE            = (1 << 9), /* force migration even if it is considered unsafe */
+    VIR_MIGRATE_OFFLINE           = (1 << 10), /* offline migrate */
 } virDomainMigrateFlags;
 
 /* Domain migration. */
diff --git a/src/libvirt.c b/src/libvirt.c
index bdb1dc6..6d749d9 100644
--- a/src/libvirt.c
+++ b/src/libvirt.c
@@ -4827,6 +4827,10 @@ virDomainMigrateVersion3(virDomainPtr domain,
     if (uri_out)
         uri = uri_out; /* Did domainMigratePrepare3 change URI? */
 
+    if (flags & VIR_MIGRATE_OFFLINE) {
+        cancelled = 0;
+        goto finish;
+    }
     /* Perform the migration.  The driver isn't supposed to return
      * until the migration is complete. The src VM should remain
      * running, but in paused state until the destination can
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 595c452..1ba1665 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -9625,7 +9625,7 @@ qemudDomainMigratePrepareTunnel(virConnectPtr dconn,
 
     ret = qemuMigrationPrepareTunnel(driver, dconn,
                                      NULL, 0, NULL, NULL, /* No cookies in v2 */
-                                     st, dname, dom_xml);
+                                     st, dname, dom_xml, flags);
 
 cleanup:
     qemuDriverUnlock(driver);
@@ -9685,7 +9685,7 @@ qemudDomainMigratePrepare2(virConnectPtr dconn,
     ret = qemuMigrationPrepareDirect(driver, dconn,
                                      NULL, 0, NULL, NULL, /* No cookies */
                                      uri_in, uri_out,
-                                     dname, dom_xml);
+                                     dname, dom_xml, flags);
 
 cleanup:
     qemuDriverUnlock(driver);
@@ -9827,7 +9827,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
         asyncJob = QEMU_ASYNC_JOB_NONE;
     }
 
-    if (!virDomainObjIsActive(vm)) {
+    if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
         virReportError(VIR_ERR_OPERATION_INVALID,
                        "%s", _("domain is not running"));
         goto endjob;
@@ -9836,9 +9836,9 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
     /* Check if there is any ejected media.
      * We don't want to require them on the destination.
      */
-
-    if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
-        goto endjob;
+    if (!(flags & VIR_MIGRATE_OFFLINE) &&
+        qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
+            goto endjob;
 
     if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname,
                                    cookieout, cookieoutlen,
@@ -9922,7 +9922,7 @@ qemuDomainMigratePrepare3(virConnectPtr dconn,
                                      cookiein, cookieinlen,
                                      cookieout, cookieoutlen,
                                      uri_in, uri_out,
-                                     dname, dom_xml);
+                                     dname, dom_xml, flags);
 
 cleanup:
     qemuDriverUnlock(driver);
@@ -9967,7 +9967,7 @@ qemuDomainMigratePrepareTunnel3(virConnectPtr dconn,
     ret = qemuMigrationPrepareTunnel(driver, dconn,
                                      cookiein, cookieinlen,
                                      cookieout, cookieoutlen,
-                                     st, dname, dom_xml);
+                                     st, dname, dom_xml, flags);
     qemuDriverUnlock(driver);
 
 cleanup:
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index d52ec59..53171df 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -1442,6 +1442,24 @@ char *qemuMigrationBegin(struct qemud_driver *driver,
                                 QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0)
         goto cleanup;
 
+    if (flags & VIR_MIGRATE_OFFLINE) {
+        if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
+                     VIR_MIGRATE_NON_SHARED_INC)) {
+            virReportError(VIR_ERR_OPERATION_INVALID,
+                           "%s",
+                           _("offline migration cannot handle "
+                             "non-shared storage"));
+            goto cleanup;
+        }
+        if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
+            virReportError(VIR_ERR_OPERATION_INVALID,
+                           "%s",
+                           _("offline migration must be specified with "
+                             "the persistent flag set"));
+            goto cleanup;
+        }
+    }
+
     if (xmlin) {
         if (!(def = virDomainDefParseString(driver->caps, xmlin,
                                             QEMU_EXPECTED_VIRT_TYPES,
@@ -1499,7 +1517,8 @@ qemuMigrationPrepareAny(struct qemud_driver *driver,
                         const char *dname,
                         const char *dom_xml,
                         const char *migrateFrom,
-                        virStreamPtr st)
+                        virStreamPtr st,
+                        unsigned long flags)
 {
     virDomainDefPtr def = NULL;
     virDomainObjPtr vm = NULL;
@@ -1609,15 +1628,18 @@ qemuMigrationPrepareAny(struct qemud_driver *driver,
     /* Start the QEMU daemon, with the same command-line arguments plus
      * -incoming $migrateFrom
      */
-    if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], NULL, NULL,
-                         VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
-                         VIR_QEMU_PROCESS_START_PAUSED |
-                         VIR_QEMU_PROCESS_START_AUTODESROY) < 0) {
-        virDomainAuditStart(vm, "migrated", false);
-        /* Note that we don't set an error here because qemuProcessStart
-         * should have already done that.
-         */
-        goto endjob;
+    if (!(flags & VIR_MIGRATE_OFFLINE)) {
+        if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0],
+                             NULL, NULL,
+                             VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
+                             VIR_QEMU_PROCESS_START_PAUSED |
+                             VIR_QEMU_PROCESS_START_AUTODESROY) < 0) {
+            virDomainAuditStart(vm, "migrated", false);
+            /* Note that we don't set an error here because qemuProcessStart
+             * should have already done that.
+             */
+            goto endjob;
+        }
     }
 
     if (tunnel) {
@@ -1625,7 +1647,8 @@ qemuMigrationPrepareAny(struct qemud_driver *driver,
             virReportSystemError(errno, "%s",
                                  _("cannot pass pipe for tunnelled migration"));
             virDomainAuditStart(vm, "migrated", false);
-            qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0);
+            if (!(flags & VIR_MIGRATE_OFFLINE))
+                qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0);
             goto endjob;
         }
         dataFD[1] = -1; /* 'st' owns the FD now & will close it */
@@ -1640,13 +1663,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver,
         VIR_DEBUG("Received no lockstate");
     }
 
-    if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen,
-                                QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) {
-        /* We could tear down the whole guest here, but
-         * cookie data is (so far) non-critical, so that
-         * seems a little harsh. We'll just warn for now.
-         */
-        VIR_WARN("Unable to encode migration cookie");
+    if (!(flags & VIR_MIGRATE_OFFLINE)) {
+        if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen,
+                                    QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) {
+            /* We could tear down the whole guest here, but
+             * cookie data is (so far) non-critical, so that
+             * seems a little harsh. We'll just warn for now.
+             */
+            VIR_WARN("Unable to encode migration cookie");
+        }
     }
 
     if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0)
@@ -1708,7 +1733,8 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver,
                            int *cookieoutlen,
                            virStreamPtr st,
                            const char *dname,
-                           const char *dom_xml)
+                           const char *dom_xml,
+                           unsigned long flags)
 {
     int ret;
 
@@ -1722,7 +1748,7 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver,
      */
     ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen,
                                   cookieout, cookieoutlen, dname, dom_xml,
-                                  "stdio", st);
+                                  "stdio", st, flags);
     return ret;
 }
 
@@ -1737,7 +1763,8 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver,
                            const char *uri_in,
                            char **uri_out,
                            const char *dname,
-                           const char *dom_xml)
+                           const char *dom_xml,
+                           unsigned long flags)
 {
     static int port = 0;
     int this_port;
@@ -1833,7 +1860,7 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver,
 
     ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen,
                                   cookieout, cookieoutlen, dname, dom_xml,
-                                  migrateFrom, NULL);
+                                  migrateFrom, NULL, flags);
 cleanup:
     VIR_FREE(hostname);
     if (ret != 0)
@@ -2675,7 +2702,9 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver,
              uri, &uri_out, flags, dname, resource, dom_xml);
         qemuDomainObjExitRemoteWithDriver(driver, vm);
     }
+
     VIR_FREE(dom_xml);
+
     if (ret == -1)
         goto cleanup;
 
@@ -2858,7 +2887,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver,
     }
 
     /* domain may have been stopped while we were talking to remote daemon */
-    if (!virDomainObjIsActive(vm)) {
+    if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
         virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                        _("guest unexpectedly quit"));
         goto cleanup;
@@ -2921,7 +2950,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver,
     if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
         goto cleanup;
 
-    if (!virDomainObjIsActive(vm)) {
+    if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
         virReportError(VIR_ERR_OPERATION_INVALID,
                        "%s", _("domain is not running"));
         goto endjob;
@@ -3245,26 +3274,27 @@ qemuMigrationFinish(struct qemud_driver *driver,
      * object, but if no, clean up the empty qemu process.
      */
     if (retcode == 0) {
-        if (!virDomainObjIsActive(vm)) {
+        if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
             virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                            _("guest unexpectedly quit"));
             goto endjob;
         }
 
-        if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) {
-            qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
-                            VIR_QEMU_PROCESS_STOP_MIGRATED);
-            virDomainAuditStop(vm, "failed");
-            event = virDomainEventNewFromObj(vm,
-                                             VIR_DOMAIN_EVENT_STOPPED,
-                                             VIR_DOMAIN_EVENT_STOPPED_FAILED);
-            goto endjob;
+        if (!(flags & VIR_MIGRATE_OFFLINE)) {
+            if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) {
+                qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
+                                VIR_QEMU_PROCESS_STOP_MIGRATED);
+                virDomainAuditStop(vm, "failed");
+                event = virDomainEventNewFromObj(vm,
+                                                 VIR_DOMAIN_EVENT_STOPPED,
+                                                 VIR_DOMAIN_EVENT_STOPPED_FAILED);
+                goto endjob;
+            }
+            if (mig->network)
+                if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0)
+                    VIR_WARN("unable to provide network data for relocation");
         }
 
-        if (mig->network)
-            if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0)
-                VIR_WARN("unable to provide network data for relocation");
-
         if (flags & VIR_MIGRATE_PERSIST_DEST) {
             virDomainDefPtr vmdef;
             if (vm->persistent)
@@ -3312,7 +3342,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
             event = NULL;
         }
 
-        if (!(flags & VIR_MIGRATE_PAUSED)) {
+        if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) {
             /* run 'cont' on the destination, which allows migration on qemu
              * >= 0.10.6 to work properly.  This isn't strictly necessary on
              * older qemu's, but it also doesn't hurt anything there
@@ -3350,20 +3380,26 @@ qemuMigrationFinish(struct qemud_driver *driver,
 
         dom = virGetDomain(dconn, vm->def->name, vm->def->uuid);
 
-        event = virDomainEventNewFromObj(vm,
-                                         VIR_DOMAIN_EVENT_RESUMED,
-                                         VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
-        if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
-            virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER);
-            if (event)
-                qemuDomainEventQueue(driver, event);
+        if (!(flags & VIR_MIGRATE_OFFLINE)) {
             event = virDomainEventNewFromObj(vm,
-                                             VIR_DOMAIN_EVENT_SUSPENDED,
-                                             VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
+                                             VIR_DOMAIN_EVENT_RESUMED,
+                                             VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
+            if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
+                virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
+                                     VIR_DOMAIN_PAUSED_USER);
+                if (event)
+                    qemuDomainEventQueue(driver, event);
+                event = virDomainEventNewFromObj(vm,
+                                                 VIR_DOMAIN_EVENT_SUSPENDED,
+                                                 VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
+            }
         }
-        if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
-            VIR_WARN("Failed to save status on vm %s", vm->def->name);
-            goto endjob;
+
+        if (virDomainObjIsActive(vm)) {
+            if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
+                VIR_WARN("Failed to save status on vm %s", vm->def->name);
+                goto endjob;
+            }
         }
 
         /* Guest is successfully running, so cancel previous auto destroy */
@@ -3430,6 +3466,9 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
     if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
         return -1;
 
+    if (flags & VIR_MIGRATE_OFFLINE)
+        goto done;
+
     /* Did the migration go as planned?  If yes, kill off the
      * domain object, but if no, resume CPUs
      */
@@ -3465,6 +3504,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
         }
     }
 
+done:
     qemuMigrationCookieFree(mig);
     rv = 0;
 
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index 7a2269a..f2dc5aa 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -36,7 +36,8 @@
      VIR_MIGRATE_NON_SHARED_DISK |              \
      VIR_MIGRATE_NON_SHARED_INC |               \
      VIR_MIGRATE_CHANGE_PROTECTION |            \
-     VIR_MIGRATE_UNSAFE)
+     VIR_MIGRATE_UNSAFE |                       \
+     VIR_MIGRATE_OFFLINE)
 
 enum qemuMigrationJobPhase {
     QEMU_MIGRATION_PHASE_NONE = 0,
@@ -97,7 +98,8 @@ int qemuMigrationPrepareTunnel(struct qemud_driver *driver,
                                int *cookieoutlen,
                                virStreamPtr st,
                                const char *dname,
-                               const char *dom_xml);
+                               const char *dom_xml,
+                               unsigned long flags);
 
 int qemuMigrationPrepareDirect(struct qemud_driver *driver,
                                virConnectPtr dconn,
@@ -108,7 +110,8 @@ int qemuMigrationPrepareDirect(struct qemud_driver *driver,
                                const char *uri_in,
                                char **uri_out,
                                const char *dname,
-                               const char *dom_xml);
+                               const char *dom_xml,
+                               unsigned long flags);
 
 int qemuMigrationPerform(struct qemud_driver *driver,
                          virConnectPtr conn,
diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c
index cc47383..5d18bdf 100644
--- a/tools/virsh-domain.c
+++ b/tools/virsh-domain.c
@@ -6661,6 +6661,7 @@ static const vshCmdInfo info_migrate[] = {
 
 static const vshCmdOptDef opts_migrate[] = {
     {"live", VSH_OT_BOOL, 0, N_("live migration")},
+    {"offline", VSH_OT_BOOL, 0, N_("offline (domain's inactive) migration")},
     {"p2p", VSH_OT_BOOL, 0, N_("peer-2-peer migration")},
     {"direct", VSH_OT_BOOL, 0, N_("direct migration")},
     {"tunneled", VSH_OT_ALIAS, 0, "tunnelled"},
@@ -6746,6 +6747,10 @@ doMigrate(void *opaque)
     if (vshCommandOptBool(cmd, "unsafe"))
         flags |= VIR_MIGRATE_UNSAFE;
 
+    if (vshCommandOptBool(cmd, "offline")) {
+        flags |= VIR_MIGRATE_OFFLINE;
+    }
+
     if (xmlfile &&
         virFileReadAll(xmlfile, 8192, &xml) < 0) {
         vshError(ctl, _("file '%s' doesn't exist"), xmlfile);
diff --git a/tools/virsh.pod b/tools/virsh.pod
index 29be39e..b3ef64e 100644
--- a/tools/virsh.pod
+++ b/tools/virsh.pod
@@ -1026,13 +1026,14 @@ I<--total> for only the total stats, I<start> for only the per-cpu
 stats of the CPUs from I<start>, I<count> for only I<count> CPUs'
 stats.
 
-=item B<migrate> [I<--live>] [I<--direct>] [I<--p2p> [I<--tunnelled>]]
+=item B<migrate> [I<--live>] [I<--offline>] [I<--direct>] [I<--p2p> [I<--tunnelled>]]
 [I<--persistent>] [I<--undefinesource>] [I<--suspend>] [I<--copy-storage-all>]
 [I<--copy-storage-inc>] [I<--change-protection>] [I<--unsafe>] [I<--verbose>]
 I<domain> I<desturi> [I<migrateuri>] [I<dname>]
 [I<--timeout> B<seconds>] [I<--xml> B<file>]
 
-Migrate domain to another host.  Add I<--live> for live migration; I<--p2p>
+Migrate domain to another host.  Add I<--live> for live migration;
+I<--offline> for offline (domain's inactive) migration; <--p2p>
 for peer-2-peer migration; I<--direct> for direct migration; or I<--tunnelled>
 for tunnelled migration.  I<--persistent> leaves the domain persistent on
 destination host, I<--undefinesource> undefines the domain on the source host,
-- 
1.7.1




More information about the libvir-list mailing list