[libvirt] [PATCH 4/6] qemu: Refactor do{Tunnel, Native}Migrate functions

Daniel Veillard veillard at redhat.com
Mon Aug 15 09:48:03 UTC 2011


On Mon, Aug 15, 2011 at 09:58:14AM +0200, Jiri Denemark wrote:
> The core of these two functions is very similar and most of it is even
> exactly the same. Factor out the core functionality into a separate
> function to remove code duplication and make further changes easier.
> ---
>  src/qemu/qemu_migration.c |  499 ++++++++++++++++++++++-----------------------
>  1 files changed, 239 insertions(+), 260 deletions(-)
> 
> diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
> index 4d0e062..1cabbe0 100644
> --- a/src/qemu/qemu_migration.c
> +++ b/src/qemu/qemu_migration.c
> @@ -1265,122 +1265,37 @@ cleanup:
>  }
>  
>  
> -/* Perform migration using QEMU's native TCP migrate support,
> - * not encrypted obviously
> - */
> -static int doNativeMigrate(struct qemud_driver *driver,
> -                           virDomainObjPtr vm,
> -                           const char *uri,
> -                           const char *cookiein,
> -                           int cookieinlen,
> -                           char **cookieout,
> -                           int *cookieoutlen,
> -                           unsigned int flags,
> -                           const char *dname ATTRIBUTE_UNUSED,
> -                           unsigned long resource)
> -{
> -    int ret = -1;
> -    xmlURIPtr uribits = NULL;
> -    qemuDomainObjPrivatePtr priv = vm->privateData;
> -    unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
> -    qemuMigrationCookiePtr mig = NULL;
> -    VIR_DEBUG("driver=%p, vm=%p, uri=%s, cookiein=%s, cookieinlen=%d, "
> -              "cookieout=%p, cookieoutlen=%p, flags=%x, dname=%s, resource=%lu",
> -              driver, vm, uri, NULLSTR(cookiein), cookieinlen,
> -              cookieout, cookieoutlen, flags, NULLSTR(dname), resource);
> -
> -    if (virLockManagerPluginUsesState(driver->lockManager) &&
> -        !cookieout) {
> -        qemuReportError(VIR_ERR_INTERNAL_ERROR,
> -                        _("Migration with lock driver %s requires cookie support"),
> -                        virLockManagerPluginGetName(driver->lockManager));
> -        return -1;
> -    }
> -
> -    if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
> -                                       QEMU_MIGRATION_COOKIE_GRAPHICS)))
> -        goto cleanup;
> -
> -    if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig) < 0)
> -        VIR_WARN("unable to provide data for graphics client relocation");
> -
> -    /* Issue the migrate command. */
> -    if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://")) {
> -        /* HACK: source host generates bogus URIs, so fix them up */
> -        char *tmpuri;
> -        if (virAsprintf(&tmpuri, "tcp://%s", uri + strlen("tcp:")) < 0) {
> -            virReportOOMError();
> -            goto cleanup;
> -        }
> -        uribits = xmlParseURI(tmpuri);
> -        VIR_FREE(tmpuri);
> -    } else {
> -        uribits = xmlParseURI(uri);
> -    }
> -    if (!uribits) {
> -        qemuReportError(VIR_ERR_INTERNAL_ERROR,
> -                        _("cannot parse URI %s"), uri);
> -        goto cleanup;
> -    }
> -
> -    /* Before EnterMonitor, since qemuProcessStopCPUs already does that */
> -    if (!(flags & VIR_MIGRATE_LIVE) &&
> -        virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
> -        if (qemuMigrationSetOffline(driver, vm) < 0)
> -            goto cleanup;
> -    }
> -
> -    if (qemuDomainObjEnterMonitorAsync(driver, vm,
> -                                       QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
> -        goto cleanup;
> -
> -    if (resource > 0 &&
> -        qemuMonitorSetMigrationSpeed(priv->mon, resource) < 0) {
> -        qemuDomainObjExitMonitorWithDriver(driver, vm);
> -        goto cleanup;
> -    }
> -
> -    if (flags & VIR_MIGRATE_NON_SHARED_DISK)
> -        background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
> -
> -    if (flags & VIR_MIGRATE_NON_SHARED_INC)
> -        background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
> -
> -    if (qemuMonitorMigrateToHost(priv->mon, background_flags, uribits->server,
> -                                 uribits->port) < 0) {
> -        qemuDomainObjExitMonitorWithDriver(driver, vm);
> -        goto cleanup;
> -    }
> -    qemuDomainObjExitMonitorWithDriver(driver, vm);
> -
> -    if (qemuMigrationWaitForCompletion(driver, vm,
> -                                       QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
> -        goto cleanup;
> -
> -    /* When migration completed, QEMU will have paused the
> -     * CPUs for us, but unless we're using the JSON monitor
> -     * we won't have been notified of this, so might still
> -     * think we're running. For v2 protocol this doesn't
> -     * matter because we'll kill the VM soon, but for v3
> -     * this is important because we stay paused until the
> -     * confirm3 step, but need to release the lock state
> -     */
> -    if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
> -        if (qemuMigrationSetOffline(driver, vm) < 0)
> -            goto cleanup;
> -    }
> -
> -    if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0)
> -        VIR_WARN("Unable to encode migration cookie");
> -
> -    ret = 0;
> +enum qemuMigrationDestinationType {
> +    MIGRATION_DEST_HOST,
> +    MIGRATION_DEST_UNIX,
> +};
>  
> -cleanup:
> -    qemuMigrationCookieFree(mig);
> -    xmlFreeURI(uribits);
> -    return ret;
> -}
> +enum qemuMigrationForwardType {
> +    MIGRATION_FWD_DIRECT,
> +    MIGRATION_FWD_STREAM,
> +};
>  
> +typedef struct _qemuMigrationSpec qemuMigrationSpec;
> +typedef qemuMigrationSpec *qemuMigrationSpecPtr;
> +struct _qemuMigrationSpec {
> +    enum qemuMigrationDestinationType destType;
> +    union {
> +        struct {
> +            const char *name;
> +            int port;
> +        } host;
> +
> +        struct {
> +            const char *file;
> +            int sock;
> +        } unics; /* this sucks but "unix" is a macro defined to 1 */

  stylistic: I would go for unx, unics is so reminiscent of Multics <sigh/>
or unix_socket, that one should be clean.

> +    } dest;
> +
> +    enum qemuMigrationForwardType fwdType;
> +    union {
> +        virStreamPtr stream;
> +    } fwd;
> +};
>  
>  #define TUNNEL_SEND_BUF_SIZE 65536
>  
> @@ -1483,101 +1398,33 @@ cleanup:
>      return rv;
>  }
>  
> -
> -static int doTunnelMigrate(struct qemud_driver *driver,
> -                           virDomainObjPtr vm,
> -                           virStreamPtr st,
> -                           const char *cookiein,
> -                           int cookieinlen,
> -                           char **cookieout,
> -                           int *cookieoutlen,
> -                           unsigned long flags,
> -                           unsigned long resource)
> +static int
> +qemuMigrationRun(struct qemud_driver *driver,
> +                 virDomainObjPtr vm,
> +                 const char *cookiein,
> +                 int cookieinlen,
> +                 char **cookieout,
> +                 int *cookieoutlen,
> +                 unsigned int flags,
> +                 unsigned long resource,
> +                 qemuMigrationSpecPtr spec)
>  {
> -    qemuDomainObjPrivatePtr priv = vm->privateData;
> -    int client_sock = -1;
> -    int qemu_sock = -1;
> -    struct sockaddr_un sa_qemu, sa_client;
> -    socklen_t addrlen;
> -    int status;
> -    unsigned long long transferred, remaining, total;
> -    char *unixfile = NULL;
> -    unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
>      int ret = -1;
> +    unsigned int migrate_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
> +    qemuDomainObjPrivatePtr priv = vm->privateData;
>      qemuMigrationCookiePtr mig = NULL;
>      qemuMigrationIOThreadPtr iothread = NULL;
> -    VIR_DEBUG("driver=%p, vm=%p, st=%p, cookiein=%s, cookieinlen=%d, "
> -              "cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu",
> -              driver, vm, st, NULLSTR(cookiein), cookieinlen,
> -              cookieout, cookieoutlen, flags, resource);

  I would still keep a VIR_DEBUG() even if there is one on the callers,
because that code is new, not that simple, and I would go for increased
tracability/verbosity in that case

> +    int fd = -1;
>  
>      if (virLockManagerPluginUsesState(driver->lockManager) &&
>          !cookieout) {
>          qemuReportError(VIR_ERR_INTERNAL_ERROR,
> -                        _("Migration with lock driver %s requires cookie support"),
> +                        _("Migration with lock driver %s requires"
> +                          " cookie support"),
>                          virLockManagerPluginGetName(driver->lockManager));
>          return -1;
>      }
>  
> -    if (!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX) &&
> -        !qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) {
> -        qemuReportError(VIR_ERR_OPERATION_FAILED,
> -                        "%s", _("Source qemu is too old to support tunnelled migration"));
> -        goto cleanup;
> -    }
> -
> -
> -    if (virAsprintf(&unixfile, "%s/qemu.tunnelmigrate.src.%s",
> -                    driver->libDir, vm->def->name) < 0) {
> -        virReportOOMError();
> -        goto cleanup;
> -    }
> -
> -    qemu_sock = socket(AF_UNIX, SOCK_STREAM, 0);
> -    if (qemu_sock < 0) {
> -        virReportSystemError(errno, "%s",
> -                             _("cannot open tunnelled migration socket"));
> -        goto cleanup;
> -    }
> -    memset(&sa_qemu, 0, sizeof(sa_qemu));
> -    sa_qemu.sun_family = AF_UNIX;
> -    if (virStrcpy(sa_qemu.sun_path, unixfile,
> -                  sizeof(sa_qemu.sun_path)) == NULL) {
> -        qemuReportError(VIR_ERR_INTERNAL_ERROR,
> -                        _("Unix socket '%s' too big for destination"),
> -                        unixfile);
> -        goto cleanup;
> -    }
> -    unlink(unixfile);
> -    if (bind(qemu_sock, (struct sockaddr *)&sa_qemu, sizeof(sa_qemu)) < 0) {
> -        virReportSystemError(errno,
> -                             _("Cannot bind to unix socket '%s' for tunnelled migration"),
> -                             unixfile);
> -        goto cleanup;
> -    }
> -    if (listen(qemu_sock, 1) < 0) {
> -        virReportSystemError(errno,
> -                             _("Cannot listen on unix socket '%s' for tunnelled migration"),
> -                             unixfile);
> -        goto cleanup;
> -    }
> -
> -    if (chown(unixfile, driver->user, driver->group) < 0) {
> -        virReportSystemError(errno,
> -                             _("Cannot change unix socket '%s' owner"),
> -                             unixfile);
> -        goto cleanup;
> -    }
> -
> -    /* the domain may have shutdown or crashed while we had the locks dropped
> -     * in qemuDomainObjEnterRemoteWithDriver, so check again
> -     */
> -    if (!virDomainObjIsActive(vm)) {
> -        qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> -                        _("guest unexpectedly quit"));
> -        goto cleanup;
> -    }
> -
>      if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
>                                         QEMU_MIGRATION_COOKIE_GRAPHICS)))
>          goto cleanup;
> @@ -1585,8 +1432,7 @@ static int doTunnelMigrate(struct qemud_driver *driver,
>      if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig) < 0)
>          VIR_WARN("unable to provide data for graphics client relocation");
>  
> -    /*   3. start migration on source */
> -    /* Before EnterMonitor, since qemuProcessStopCPUs already does that */
> +    /* Before EnterMonitor, since qemuMigrationSetOffline already does that */
>      if (!(flags & VIR_MIGRATE_LIVE) &&
>          virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
>          if (qemuMigrationSetOffline(driver, vm) < 0)
> @@ -1604,25 +1450,31 @@ static int doTunnelMigrate(struct qemud_driver *driver,
>      }
>  
>      if (flags & VIR_MIGRATE_NON_SHARED_DISK)
> -        background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
> +        migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
> +
>      if (flags & VIR_MIGRATE_NON_SHARED_INC)
> -        background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
> -
> -    if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX)) {
> -        ret = qemuMonitorMigrateToUnix(priv->mon, background_flags,
> -                                               unixfile);
> -    } else if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) {
> -        const char *args[] = { "nc", "-U", unixfile, NULL };
> -        ret = qemuMonitorMigrateToCommand(priv->mon, QEMU_MONITOR_MIGRATE_BACKGROUND, args);
> -    } else {
> -        ret = -1;
> +        migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
> +
> +    switch (spec->destType) {
> +    case MIGRATION_DEST_HOST:
> +        ret = qemuMonitorMigrateToHost(priv->mon, migrate_flags,
> +                                       spec->dest.host.name,
> +                                       spec->dest.host.port);
> +        break;
> +
> +    case MIGRATION_DEST_UNIX:
> +        if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX)) {
> +            ret = qemuMonitorMigrateToUnix(priv->mon, migrate_flags,
> +                                           spec->dest.unics.file);
> +        } else {
> +            const char *args[] = { "nc", "-U", spec->dest.unics.file, NULL };
> +            ret = qemuMonitorMigrateToCommand(priv->mon, migrate_flags, args);
> +        }
> +        break;
>      }
>      qemuDomainObjExitMonitorWithDriver(driver, vm);
> -    if (ret < 0) {
> -        qemuReportError(VIR_ERR_OPERATION_FAILED,
> -                        "%s", _("tunnelled migration monitor command failed"));
> +    if (ret < 0)
>          goto cleanup;
> -    }
>      ret = -1;
>  
>      if (!virDomainObjIsActive(vm)) {
> @@ -1634,42 +1486,31 @@ static int doTunnelMigrate(struct qemud_driver *driver,
>      /* From this point onwards we *must* call cancel to abort the
>       * migration on source if anything goes wrong */
>  
> -    /* it is also possible that the migrate didn't fail initially, but
> -     * rather failed later on.  Check the output of "info migrate"
> -     */
> -    if (qemuDomainObjEnterMonitorAsync(driver, vm,
> -                                       QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
> -        goto cancel;
> -    if (qemuMonitorGetMigrationStatus(priv->mon,
> -                                      &status,
> -                                      &transferred,
> -                                      &remaining,
> -                                      &total) < 0) {
> -        qemuDomainObjExitMonitorWithDriver(driver, vm);
> -        goto cancel;
> -    }
> -    qemuDomainObjExitMonitorWithDriver(driver, vm);
> -
> -    if (status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) {
> -        qemuReportError(VIR_ERR_OPERATION_FAILED,
> -                        "%s",_("migrate failed"));
> -        goto cancel;
> -    }
> +    if (spec->destType == MIGRATION_DEST_UNIX) {
> +        /* It is also possible that the migrate didn't fail initially, but
> +         * rather failed later on.  Check its status before waiting for a
> +         * connection from qemu which may never be initiated.
> +         */
> +        if (qemuMigrationUpdateJobStatus(driver, vm, _("migration job"),
> +                                         QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
> +            goto cancel;
>  
> -    addrlen = sizeof(sa_client);
> -    while ((client_sock = accept(qemu_sock, (struct sockaddr *)&sa_client, &addrlen)) < 0) {
> -        if (errno == EAGAIN || errno == EINTR)
> -            continue;
> -        virReportSystemError(errno, "%s",
> -                             _("tunnelled migration failed to accept from qemu"));
> -        goto cancel;
> +        while ((fd = accept(spec->dest.unics.sock, NULL, NULL)) < 0) {
> +            if (errno == EAGAIN || errno == EINTR)
> +                continue;
> +            virReportSystemError(errno, "%s",
> +                                 _("failed to accept connection from qemu"));
> +            goto cancel;
> +        }
>      }
>  
> -    if (!(iothread = qemuMigrationStartTunnel(st, client_sock)))
> +    if (spec->fwdType != MIGRATION_FWD_DIRECT &&
> +        !(iothread = qemuMigrationStartTunnel(spec->fwd.stream, fd)))
>          goto cancel;
>  
> -    ret = qemuMigrationWaitForCompletion(driver, vm,
> -                                         QEMU_ASYNC_JOB_MIGRATION_OUT);
> +    if (qemuMigrationWaitForCompletion(driver, vm,
> +                                       QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
> +        goto cleanup;
>  
>      /* When migration completed, QEMU will have paused the
>       * CPUs for us, but unless we're using the JSON monitor
> @@ -1684,30 +1525,169 @@ static int doTunnelMigrate(struct qemud_driver *driver,
>              goto cleanup;
>      }
>  
> -    /* Close now to ensure the IO thread quits & is joinable in next method */
> -    VIR_FORCE_CLOSE(client_sock);
> +    ret = 0;
>  
> -    if (qemuMigrationStopTunnel(iothread) < 0)
> -        ret = -1;
> +cleanup:
> +    if (spec->fwdType != MIGRATION_FWD_DIRECT) {
> +        /* Close now to ensure the IO thread quits & is joinable */
> +        VIR_FORCE_CLOSE(fd);
> +        if (iothread && qemuMigrationStopTunnel(iothread) < 0)
> +            ret = -1;
> +    }
>  
>      if (ret == 0 &&
>          qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0)
>          VIR_WARN("Unable to encode migration cookie");
>  
> +    qemuMigrationCookieFree(mig);
> +
> +    return ret;
> +
>  cancel:
> -    if (ret != 0 && virDomainObjIsActive(vm)) {
> -        VIR_FORCE_CLOSE(client_sock);
> -        VIR_FORCE_CLOSE(qemu_sock);
> +    if (virDomainObjIsActive(vm)) {
>          if (qemuDomainObjEnterMonitorAsync(driver, vm,
>                                             QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
>              qemuMonitorMigrateCancel(priv->mon);
>              qemuDomainObjExitMonitorWithDriver(driver, vm);
>          }
>      }
> +    goto cleanup;
> +}
> +
> +/* Perform migration using QEMU's native TCP migrate support,
> + * not encrypted obviously
> + */
> +static int doNativeMigrate(struct qemud_driver *driver,
> +                           virDomainObjPtr vm,
> +                           const char *uri,
> +                           const char *cookiein,
> +                           int cookieinlen,
> +                           char **cookieout,
> +                           int *cookieoutlen,
> +                           unsigned int flags,
> +                           unsigned long resource)
> +{
> +    xmlURIPtr uribits = NULL;
> +    int ret;
> +    qemuMigrationSpec spec;
> +
> +    VIR_DEBUG("driver=%p, vm=%p, uri=%s, cookiein=%s, cookieinlen=%d, "
> +              "cookieout=%p, cookieoutlen=%p, flags=%x, resource=%lu",
> +              driver, vm, uri, NULLSTR(cookiein), cookieinlen,
> +              cookieout, cookieoutlen, flags, resource);
> +
> +    if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://")) {
> +        /* HACK: source host generates bogus URIs, so fix them up */
> +        char *tmpuri;
> +        if (virAsprintf(&tmpuri, "tcp://%s", uri + strlen("tcp:")) < 0) {
> +            virReportOOMError();
> +            return -1;
> +        }
> +        uribits = xmlParseURI(tmpuri);
> +        VIR_FREE(tmpuri);
> +    } else {
> +        uribits = xmlParseURI(uri);
> +    }
> +    if (!uribits) {
> +        qemuReportError(VIR_ERR_INTERNAL_ERROR,
> +                        _("cannot parse URI %s"), uri);
> +        return -1;
> +    }
> +
> +    spec.destType = MIGRATION_DEST_HOST;
> +    spec.dest.host.name = uribits->server;
> +    spec.dest.host.port = uribits->port;
> +    spec.fwdType = MIGRATION_FWD_DIRECT;
> +
> +    ret = qemuMigrationRun(driver, vm, cookiein, cookieinlen, cookieout,
> +                           cookieoutlen, flags, resource, &spec);
> +    xmlFreeURI(uribits);
> +
> +    return ret;
> +}
> +
> +
> +static int doTunnelMigrate(struct qemud_driver *driver,
> +                           virDomainObjPtr vm,
> +                           virStreamPtr st,
> +                           const char *cookiein,
> +                           int cookieinlen,
> +                           char **cookieout,
> +                           int *cookieoutlen,
> +                           unsigned long flags,
> +                           unsigned long resource)
> +{
> +    qemuDomainObjPrivatePtr priv = vm->privateData;
> +    int qemu_sock = -1;
> +    struct sockaddr_un sa_qemu;
> +    char *unixfile = NULL;
> +    int ret = -1;
> +    qemuMigrationSpec spec;
> +
> +    VIR_DEBUG("driver=%p, vm=%p, st=%p, cookiein=%s, cookieinlen=%d, "
> +              "cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu",
> +              driver, vm, st, NULLSTR(cookiein), cookieinlen,
> +              cookieout, cookieoutlen, flags, resource);
> +
> +    if (!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX) &&
> +        !qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) {
> +        qemuReportError(VIR_ERR_OPERATION_FAILED,
> +                        "%s", _("Source qemu is too old to support tunnelled migration"));
> +        goto cleanup;
> +    }
> +
> +    if (virAsprintf(&unixfile, "%s/qemu.tunnelmigrate.src.%s",
> +                    driver->libDir, vm->def->name) < 0) {
> +        virReportOOMError();
> +        goto cleanup;
> +    }
> +
> +    qemu_sock = socket(AF_UNIX, SOCK_STREAM, 0);
> +    if (qemu_sock < 0) {
> +        virReportSystemError(errno, "%s",
> +                             _("cannot open tunnelled migration socket"));
> +        goto cleanup;
> +    }
> +    memset(&sa_qemu, 0, sizeof(sa_qemu));
> +    sa_qemu.sun_family = AF_UNIX;
> +    if (virStrcpy(sa_qemu.sun_path, unixfile,
> +                  sizeof(sa_qemu.sun_path)) == NULL) {
> +        qemuReportError(VIR_ERR_INTERNAL_ERROR,
> +                        _("Unix socket '%s' too big for destination"),
> +                        unixfile);
> +        goto cleanup;
> +    }
> +    unlink(unixfile);
> +    if (bind(qemu_sock, (struct sockaddr *)&sa_qemu, sizeof(sa_qemu)) < 0) {
> +        virReportSystemError(errno,
> +                             _("Cannot bind to unix socket '%s' for tunnelled migration"),
> +                             unixfile);
> +        goto cleanup;
> +    }
> +    if (listen(qemu_sock, 1) < 0) {
> +        virReportSystemError(errno,
> +                             _("Cannot listen on unix socket '%s' for tunnelled migration"),
> +                             unixfile);
> +        goto cleanup;
> +    }
> +
> +    if (chown(unixfile, driver->user, driver->group) < 0) {
> +        virReportSystemError(errno,
> +                             _("Cannot change unix socket '%s' owner"),
> +                             unixfile);
> +        goto cleanup;
> +    }
> +
> +    spec.destType = MIGRATION_DEST_UNIX;
> +    spec.dest.unics.file = unixfile;
> +    spec.dest.unics.sock = qemu_sock;
> +    spec.fwdType = MIGRATION_FWD_STREAM;
> +    spec.fwd.stream = st;
> +
> +    ret = qemuMigrationRun(driver, vm, cookiein, cookieinlen, cookieout,
> +                           cookieoutlen, flags, resource, &spec);
>  
>  cleanup:
> -    qemuMigrationCookieFree(mig);
> -    VIR_FORCE_CLOSE(client_sock);
>      VIR_FORCE_CLOSE(qemu_sock);
>      if (unixfile) {
>          unlink(unixfile);
> @@ -1811,7 +1791,7 @@ static int doPeer2PeerMigrate2(struct qemud_driver *driver,
>          ret = doNativeMigrate(driver, vm, uri_out,
>                                cookie, cookielen,
>                                NULL, NULL, /* No out cookie with v2 migration */
> -                              flags, dname, resource);
> +                              flags, resource);
>  
>      /* Perform failed. Make sure Finish doesn't overwrite the error */
>      if (ret < 0)
> @@ -1957,7 +1937,7 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver,
>          ret = doNativeMigrate(driver, vm, uri_out,
>                                cookiein, cookieinlen,
>                                &cookieout, &cookieoutlen,
> -                              flags, dname, resource);
> +                              flags, resource);
>  
>      /* Perform failed. Make sure Finish doesn't overwrite the error */
>      if (ret < 0) {
> @@ -2185,7 +2165,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver,
>          qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
>          ret = doNativeMigrate(driver, vm, uri, cookiein, cookieinlen,
>                                cookieout, cookieoutlen,
> -                              flags, dname, resource);
> +                              flags, resource);
>      }
>      if (ret < 0)
>          goto endjob;
> @@ -2254,7 +2234,6 @@ qemuMigrationPerformPhase(struct qemud_driver *driver,
>                            char **cookieout,
>                            int *cookieoutlen,
>                            unsigned long flags,
> -                          const char *dname,
>                            unsigned long resource)
>  {
>      virDomainEventPtr event = NULL;
> @@ -2275,7 +2254,7 @@ qemuMigrationPerformPhase(struct qemud_driver *driver,
>      resume = virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING;
>      ret = doNativeMigrate(driver, vm, uri, cookiein, cookieinlen,
>                            cookieout, cookieoutlen,
> -                          flags, dname, resource);
> +                          flags, resource);
>  
>      if (ret < 0 && resume &&
>          virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
> @@ -2367,7 +2346,7 @@ qemuMigrationPerform(struct qemud_driver *driver,
>              return qemuMigrationPerformPhase(driver, conn, vm, uri,
>                                               cookiein, cookieinlen,
>                                               cookieout, cookieoutlen,
> -                                             flags, dname, resource);
> +                                             flags, resource);
>          } else {
>              return qemuMigrationPerformJob(driver, conn, vm, xmlin, dconnuri,
>                                             uri, cookiein, cookieinlen,

  It's a bit hard to really prove that there is no semantic change
in the refactoring, but the principle sounds good, and it looks okay.

  ACK,

Daniel

-- 
Daniel Veillard      | libxml Gnome XML XSLT toolkit  http://xmlsoft.org/
daniel at veillard.com  | Rpmfind RPM search engine http://rpmfind.net/
http://veillard.com/ | virtualization library  http://libvirt.org/




More information about the libvir-list mailing list