[Libguestfs] [PATCH nbdkit] plugins: python: Add imageio plugin example

Nir Soffer nsoffer at redhat.com
Thu Aug 6 23:11:41 UTC 2020


On Fri, Aug 7, 2020 at 1:54 AM Nir Soffer <nirsof at gmail.com> wrote:
>
> This is mainly for testing the new parallel python threading model, but
> it also an example how to manage multiple connection from a plugin.
>
> I tested this with local imageio server, serving qcow2 image on local
> SSD.
>
> Start imageio server from imageio source:
>
>     ./ovirt-imageio -c test
>
> Create test disk:
>
>     qemu-img create -f qcow2 /var/tmp/disk.qcow2 6g
>
> Add ticket to accessing the image, using nbd example ticket:
>
>     curl --unix-socket ../daemon/test/daemon.sock \
>         --upload-file examples/nbd.json http://localhost/ticket/nbd
>
> Start qemu-nbd, serving the image for imageio:
>
>     qemu-nbd --socket=/tmp/nbd.sock --persistent --shared=8 --format=qcow2 \
>         --aio=native --cache=none --discard=unmap  /var/tmp/disk.qcow2
>
> Start nbdkit with this plugin:
>
>     ./nbdkit -U nbd.sock -t4 -f python ./plugins/python/examples/imageio.py \
>         transfer_url=https://localhost:54322/images/nbd connections=4 secure=no
>
> Finally, upload the image using qemu-img:
>
>     time qemu-img convert -n -f raw -O raw -W /var/tmp/fedora-32.raw \
>         nbd+unix:///?socket=./nbd.sock
>
> I tested with 1 and 4 threads/connections, creating new empty qcow2
> image before each test.
>
> 1 connections, 4 threads:
>
> real    0m7.885s
> user    0m0.663s
> sys     0m0.803s
>
> 4 connections, 4 threads:
>
> real    0m3.336s
> user    0m0.439s
> sys     0m0.651s
>
> This is what we see on imageio side:
>
> 1 connection:
>
> [connection 1 ops, 7.866482 s]
> [dispatch 2630 ops, 6.488580 s]
> [extents 1 ops, 0.002326 s]
> [zero 1176 ops, 0.661475 s, 4.73 GiB, 7.15 GiB/s]
> [write 1451 ops, 5.475842 s, 1.27 GiB, 237.08 MiB/s]
> [flush 2 ops, 0.029208 s]
>
> 4 connections:
>
> [connection 1 ops, 3.289038 s]
> [dispatch 670 ops, 2.679317 s]
> [extents 1 ops, 0.010870 s]
> [write 383 ops, 2.172633 s, 333.70 MiB, 153.59 MiB/s]
> [zero 286 ops, 0.346506 s, 1.29 GiB, 3.72 GiB/s]
>
> [connection 1 ops, 3.303300 s]
> [dispatch 632 ops, 2.711896 s]
> [zero 273 ops, 0.380406 s, 1.12 GiB, 2.93 GiB/s]
> [extents 1 ops, 0.000485 s]
> [write 358 ops, 2.182803 s, 310.67 MiB, 142.33 MiB/s]
>
> [connection 1 ops, 3.318177 s]
> [dispatch 669 ops, 2.759531 s]
> [extents 1 ops, 0.064217 s]
> [write 354 ops, 2.067320 s, 336.70 MiB, 162.87 MiB/s]
> [zero 313 ops, 0.470069 s, 1.20 GiB, 2.55 GiB/s]
> [flush 1 ops, 0.002421 s]
>
> [connection 1 ops, 3.280020 s]
> [dispatch 662 ops, 2.685547 s]
> [zero 304 ops, 0.431782 s, 1.13 GiB, 2.62 GiB/s]
> [extents 1 ops, 0.000424 s]
> [write 356 ops, 2.101127 s, 317.17 MiB, 150.95 MiB/s]
> [flush 1 ops, 0.000127 s]
>
> Results are not very stable, but the trend is clear. We can use this
> to optimize the virt-v2v.
>
> Signed-off-by: Nir Soffer <nsoffer at redhat.com>
> ---
>  plugins/python/examples/imageio.py | 167 +++++++++++++++++++++++++++++
>  1 file changed, 167 insertions(+)
>  create mode 100644 plugins/python/examples/imageio.py
>
> diff --git a/plugins/python/examples/imageio.py b/plugins/python/examples/imageio.py
> new file mode 100644
> index 00000000..e77fd2f4
> --- /dev/null
> +++ b/plugins/python/examples/imageio.py
> @@ -0,0 +1,167 @@
> +# Example Python plugin.
> +#
> +# This example can be freely used for any purpose.
> +#
> +# Upload and download images to oVirt with nbdkit and qemu-img.
> +#
> +# Install ovirt-imageio-client
> +#
> +#   dnf copr enable nsoffer/ovirt-imageio-preview
> +#   dnf install ovirt-imageio-client
> +#
> +# To upload or download images, you need to start an image transfer. The
> +# easiest way is using oVirt image_transfer.py example:
> +#
> +#  /usr/share/doc/python3-ovirt-enigne-sdk4/eamples/image_transfer.py \
> +#      --engine-url https://my.engine \
> +#      --username admin at internal \
> +#      --password-file password \
> +#      --cafile my.engine.pem \
> +#      upload disk-uuid
> +#
> +# This will print the trasnfer URL for this image transfer.
> +#
> +# Run this example from the build directory:
> +#
> +#   ./nbdkit -t4 -f -v -U /tmp/nbd.sock -t4 python \
> +#       ./plugins/python/examples/imageio.py \
> +#       transfer_url=https://server:54322/images/ticket-id \
> +#       connections=4 \
> +#       secure=no
> +#
> +# Note that number of nbdkit threads and imageio connections should match.
> +#
> +# To upload an image run:
> +#
> +#   qemu-img convert -f qcow2 -O raw disk.img nbd:///?socket=tmp/nbd.sock

Should be:

    qemu-img convert -n -f raw -O raw disk.img nbd+unix:///?socket=/tmp/nbd.sock

> +#
> +# Downloading image is not efficient with this version, since we don't report
> +# extents yet.

Do we support reporting extents from python plugin?

> +#
> +# The -f -v arguments are optional.  They cause the server to stay in
> +# the foreground and print debugging, which is useful when testing.
> +
> +import queue
> +import threading
> +from contextlib import contextmanager
> +
> +from ovirt_imageio.client import ImageioClient
> +
> +import nbdkit
> +
> +# Using version 2 supporting the buffer protocol for better performance.
> +API_VERSION = 2
> +
> +# Plugin configuration, can be set using key=value in the command line.
> +params = {
> +    "secure": True,
> +    "ca_file": "",
> +    "connections": 1,
> +    "transfer_url": None,
> +}
> +
> +
> +def config(key, value):
> +    """
> +    Parse the url parameter which contains the transfer URL that we want to
> +    serve.
> +    """
> +    if key == "transfer_url":
> +        params["transfer_url"] = value
> +    elif key == "connections":
> +        params["connections"] = int(value)
> +    elif key == "ca_file":
> +        params["ca_file"] = value
> +    elif key == "secure":
> +        params["secure"] = boolify(key, value)
> +    else:
> +        raise RuntimeError("unknown parameter: {!r}".format(key))
> +
> +
> +def boolify(key, value):
> +    v = value.lower()
> +    if v in ("yes", "true", "1"):
> +        return True
> +    if v in ("no", "false", 0):
> +        return False
> +    raise RuntimeError("Invalid boolean value for {}: {!r}".format(key, value))
> +
> +
> +def config_complete():
> +    """
> +    Called when configuration completed.
> +    """
> +    if params["transfer_url"] is None:
> +        raise RuntimeError("'transfer_url' parameter is required")
> +
> +
> +def thread_model():
> +    """
> +    Using parallel model to speed up transfer with multiple connections to
> +    imageio server.
> +    """
> +    return nbdkit.THREAD_MODEL_PARALLEL
> +
> +
> +def open(readonly):
> +    """
> +    Called once when plugin is loaded. We created a pool of connected clients
> +    that will be used for requests later.
> +    """
> +    pool = queue.Queue()
> +    for i in range(params["connections"]):
> +        client = ImageioClient(
> +            params["transfer_url"],
> +            cafile=params["ca_file"],
> +            secure=params["secure"])
> +        pool.put(client)
> +    return { "pool": pool }
> +
> +
> +def close(h):
> +    """
> +    Called when plugin is closed. Close and remove all clients from the pool.
> +    """
> +    pool = h["pool"]
> +    while not pool.empty():
> +        client = pool.get()
> +        client.close()
> +
> +
> + at contextmanager
> +def client(h):
> +    """
> +    Context manager fetching an imageio client from the pool. Blocks until a
> +    client is available.
> +    """
> +    pool = h["pool"]
> +    client = pool.get()
> +    try:
> +        yield client
> +    finally:
> +        pool.put(client)
> +
> +
> +def get_size(h):
> +    with client(h) as c:
> +        return c.size()
> +
> +
> +def pread(h, buf, offset, flags):
> +    with client(h) as c:
> +        c.read(offset, buf)
> +
> +
> +def pwrite(h, buf, offset, flags):
> +    with client(h) as c:
> +        c.write(offset, buf)
> +
> +
> +def zero(h, count, offset, flags):
> +    with client(h) as c:
> +        c.zero(offset, count)
> +
> +
> +def flush(h, flags):
> +    with client(h) as c:
> +        c.flush()
> --
> 2.25.4
>




More information about the Libguestfs mailing list