[libvirt PATCH 61/80] qemu: Refactor qemuMigrationDstPrepareFresh

Peter Krempa pkrempa at redhat.com
Thu May 12 14:44:18 UTC 2022


On Tue, May 10, 2022 at 17:21:22 +0200, Jiri Denemark wrote:
> Offline migration jumps over a big part of qemuMigrationDstPrepareFresh.
> Let's move that part into a new qemuMigrationDstPrepareActive function
> to make the code easier to follow.
> 
> Signed-off-by: Jiri Denemark <jdenemar at redhat.com>
> ---
>  src/qemu/qemu_migration.c | 374 +++++++++++++++++++++-----------------
>  1 file changed, 206 insertions(+), 168 deletions(-)
> 
> diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
> index f1e3774034..dc608fb8a4 100644
> --- a/src/qemu/qemu_migration.c
> +++ b/src/qemu/qemu_migration.c
> @@ -3118,6 +3118,200 @@ qemuMigrationDstPrepareAnyBlockDirtyBitmaps(virDomainObj *vm,
>  }
>  
>  
> +static int
> +qemuMigrationDstPrepareActive(virQEMUDriver *driver,
> +                              virDomainObj *vm,
> +                              virConnectPtr dconn,
> +                              qemuMigrationCookie *mig,
> +                              virStreamPtr st,
> +                              const char *protocol,
> +                              unsigned short port,
> +                              const char *listenAddress,
> +                              size_t nmigrate_disks,
> +                              const char **migrate_disks,
> +                              int nbdPort,
> +                              const char *nbdURI,
> +                              qemuMigrationParams *migParams,
> +                              unsigned long flags)
> +{
> +    qemuDomainObjPrivate *priv = vm->privateData;
> +    qemuDomainJobPrivate *jobPriv = priv->job.privateData;
> +    qemuProcessIncomingDef *incoming = NULL;
> +    g_autofree char *tlsAlias = NULL;
> +    virObjectEvent *event = NULL;
> +    virErrorPtr origErr = NULL;
> +    int dataFD[2] = { -1, -1 };
> +    bool stopProcess = false;
> +    unsigned int startFlags;
> +    bool relabel = false;
> +    bool tunnel = !!st;
> +    int ret = -1;
> +    int rv;
> +
> +    if (STREQ_NULLABLE(protocol, "rdma") &&
> +        !virMemoryLimitIsSet(vm->def->mem.hard_limit)) {
> +        virReportError(VIR_ERR_OPERATION_INVALID, "%s",
> +                       _("cannot start RDMA migration with no memory hard "
> +                         "limit set"));

linebreaks

> +        goto error;
> +    }
> +
> +    if (qemuMigrationDstPrecreateStorage(vm, mig->nbd,
> +                                         nmigrate_disks, migrate_disks,
> +                                         !!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0)
> +        goto error;
> +
> +    if (tunnel &&
> +        virPipe(dataFD) < 0)
> +        goto error;
> +
> +    startFlags = VIR_QEMU_PROCESS_START_AUTODESTROY;
> +
> +    if (qemuProcessInit(driver, vm, mig->cpu, VIR_ASYNC_JOB_MIGRATION_IN,
> +                        true, startFlags) < 0)
> +        goto error;
> +    stopProcess = true;
> +
> +    if (!(incoming = qemuMigrationDstPrepare(vm, tunnel, protocol,
> +                                             listenAddress, port,
> +                                             dataFD[0])))
> +        goto error;
> +
> +    if (qemuProcessPrepareDomain(driver, vm, startFlags) < 0)
> +        goto error;
> +
> +    if (qemuProcessPrepareHost(driver, vm, startFlags) < 0)
> +        goto error;
> +
> +    rv = qemuProcessLaunch(dconn, driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
> +                           incoming, NULL,
> +                           VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
> +                           startFlags);
> +    if (rv < 0) {
> +        if (rv == -2)
> +            relabel = true;
> +        goto error;
> +    }
> +    relabel = true;
> +
> +    if (tunnel) {
> +        if (virFDStreamOpen(st, dataFD[1]) < 0) {
> +            virReportSystemError(errno, "%s",
> +                                 _("cannot pass pipe for tunnelled migration"));
> +            goto error;
> +        }
> +        dataFD[1] = -1; /* 'st' owns the FD now & will close it */
> +    }
> +
> +    if (STREQ_NULLABLE(protocol, "rdma") &&
> +        vm->def->mem.hard_limit > 0 &&
> +        virProcessSetMaxMemLock(vm->pid, vm->def->mem.hard_limit << 10) < 0) {
> +        goto error;
> +    }
> +
> +    if (qemuMigrationDstPrepareAnyBlockDirtyBitmaps(vm, mig, migParams, flags) < 0)
> +        goto error;
> +
> +    if (qemuMigrationParamsCheck(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
> +                                 migParams, mig->caps->automatic) < 0)
> +        goto error;
> +
> +    /* Migrations using TLS need to add the "tls-creds-x509" object and
> +     * set the migration TLS parameters */
> +    if (flags & VIR_MIGRATE_TLS) {
> +        if (qemuMigrationParamsEnableTLS(driver, vm, true,
> +                                         VIR_ASYNC_JOB_MIGRATION_IN,
> +                                         &tlsAlias, NULL,
> +                                         migParams) < 0)
> +            goto error;
> +    } else {
> +        if (qemuMigrationParamsDisableTLS(vm, migParams) < 0)
> +            goto error;
> +    }
> +
> +    if (qemuMigrationParamsApply(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
> +                                 migParams) < 0)
> +        goto error;
> +
> +    if (mig->nbd &&
> +        flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC) &&
> +        virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_SERVER)) {
> +        const char *nbdTLSAlias = NULL;
> +
> +        if (flags & VIR_MIGRATE_TLS) {
> +            if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_TLS)) {
> +                virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
> +                               _("QEMU NBD server does not support TLS transport"));
> +                goto error;
> +            }
> +
> +            nbdTLSAlias = tlsAlias;
> +        }
> +
> +        if (qemuMigrationDstStartNBDServer(driver, vm, incoming->address,
> +                                           nmigrate_disks, migrate_disks,
> +                                           nbdPort, nbdURI,
> +                                           nbdTLSAlias) < 0) {
> +            goto error;
> +        }
> +    }
> +
> +    if (mig->lockState) {
> +        VIR_DEBUG("Received lockstate %s", mig->lockState);
> +        VIR_FREE(priv->lockState);
> +        priv->lockState = g_steal_pointer(&mig->lockState);
> +    } else {
> +        VIR_DEBUG("Received no lockstate");
> +    }
> +
> +    if (qemuMigrationDstRun(driver, vm, incoming->uri,
> +                            VIR_ASYNC_JOB_MIGRATION_IN) < 0)
> +        goto error;
> +
> +    if (qemuProcessFinishStartup(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
> +                                 false, VIR_DOMAIN_PAUSED_MIGRATION) < 0)
> +        goto error;
> +
> +    if (!(flags & VIR_MIGRATE_OFFLINE)) {
> +        virDomainAuditStart(vm, "migrated", true);
> +        event = virDomainEventLifecycleNewFromObj(vm,
> +                                         VIR_DOMAIN_EVENT_STARTED,
> +                                         VIR_DOMAIN_EVENT_STARTED_MIGRATED);

Alignment.

> +    }
> +
> +    ret = 0;
> +

Reviewed-by: Peter Krempa <pkrempa at redhat.com>


More information about the libvir-list mailing list