[Libguestfs] [PATCH nbdkit 3/3] server: Remove explicit connection parameter, use TLS instead.

Eric Blake eblake at redhat.com
Tue Feb 11 17:46:06 UTC 2020


On 2/11/20 11:15 AM, Richard W.M. Jones wrote:
> Since commit 86fdb48c6a5362d66865493d9d2172166f99722e we have stored
> the connection object in thread-local storage.
> 
> In this very large, but mostly mechanical change we stop passing the
> connection pointer around everywhere, and instead use the value stored
> in thread-local storage.
> 
> This assumes a 1-1 mapping between the connection and the current
> thread which is true in *most* places.  Occasionally we still have the
> explicit connection pointer, especially just before we launch a thread
> or call threadlocal_set_conn.
> ---
>   server/internal.h                    | 191 +++++++++----------
>   server/backend.c                     | 160 +++++++++-------
>   server/connections.c                 |  68 ++++---
>   server/crypto.c                      |  14 +-
>   server/filters.c                     | 270 +++++++++++++--------------
>   server/locks.c                       |  12 +-
>   server/plugins.c                     |  64 +++----
>   server/protocol-handshake-newstyle.c | 172 +++++++++--------
>   server/protocol-handshake-oldstyle.c |   7 +-
>   server/protocol-handshake.c          |  40 ++--
>   server/protocol.c                    | 127 ++++++-------
>   server/public.c                      |   2 +-
>   server/test-public.c                 |   2 +-
>   13 files changed, 574 insertions(+), 555 deletions(-)

Makes sense to me (and not too hard to rebase my NBD_INFO_INIT_STATE 
stuff on top of it).  Are we sure that there is not going to be a speed 
penalty (from frequent access to the thread-local storage, compared to 
previous access through a parameter stored in a register)?

A few comments:

> +++ b/server/filters.c
> @@ -49,13 +49,13 @@ struct backend_filter {
>     struct nbdkit_filter filter;
>   };
>   
> -/* Literally a backend, a connection pointer, and the filter's handle.
> +/* Literally a backend and the filter's handle.
> + *
>    * This is the implementation of our handle in .open, and serves as
>    * a stable ‘void *nxdata’ in the filter API.
>    */
> -struct b_conn {
> +struct b_h {
>     struct backend *b;
> -  struct connection *conn;
>     void *handle;
>   };
>   
> @@ -186,22 +186,22 @@ filter_config_complete (struct backend *b)
>   static int
>   next_preconnect (void *nxdata, int readonly)
>   {
> -  struct b_conn *b_conn = nxdata;
> -  return b_conn->b->preconnect (b_conn->b, b_conn->conn, readonly);
> +  struct b_h *b_h = nxdata;
> +  return b_h->b->preconnect (b_h->b, readonly);
>   }

None of the next_*() wrappers use b_h->handle, they all stick to b_h->b. 
  I don't think that matters too much, other than...

> @@ -267,101 +266,101 @@ filter_close (struct backend *b, struct connection *conn, void *handle)
>   
>   /* The next_functions structure contains pointers to backend
>    * functions.  However because these functions are all expecting a
> - * backend and a connection, we cannot call them directly, but must
> + * backend and a handle, we cannot call them directly, but must
>    * write some next_* functions that unpack the two parameters from a
> - * single ‘void *nxdata’ struct pointer (‘b_conn’).
> + * single ‘void *nxdata’ struct pointer (‘b_h’).

...this comment is slightly off.  In fact, if ALL we use is b_h->b, we 
could probably populate next_ops with backend_* functions rather than 
next_* wrapper functions:

> @@ -410,8 +409,8 @@ static int
>   next_cache (void *nxdata, uint32_t count, uint64_t offset,
>               uint32_t flags, int *err)
>   {
> -  struct b_conn *b_conn = nxdata;
> -  return backend_cache (b_conn->b, b_conn->conn, count, offset, flags, err);
> +  struct b_h *b_h = nxdata;
> +  return backend_cache (b_h->b, count, offset, flags, err);
>   }
>   
>   static struct nbdkit_next_ops next_ops = {

as in
next_ops = {
...
   .cache = backend_cache,
};


>   static int
> -filter_cache (struct backend *b, struct connection *conn, void *handle,
> +filter_cache (struct backend *b, void *handle,
>                 uint32_t count, uint64_t offset,
>                 uint32_t flags, int *err)
>   {
>     struct backend_filter *f = container_of (b, struct backend_filter, backend);
> -  struct b_conn *nxdata = handle;
> +  struct b_h *nxdata = handle;
>   
> -  assert (nxdata->b == b->next && nxdata->conn == conn);
> +  assert (nxdata->b == b->next);
>     if (f->filter.cache)
>       return f->filter.cache (&next_ops, nxdata, nxdata->handle,
>                               count, offset, flags, err);
>     else
> -    return backend_cache (b->next, conn, count, offset, flags, err);
> +    return backend_cache (b->next, count, offset, flags, err);

and here, we could call:

struct backend_filter *f = container_of (b, struct backend_filter, backend);

if (f->filter.cache)
   return f->filter.cache (&next_ops, b->next, handle,
                           count, offset, flags, err);
else
   return backend_cache (b->next, count, offset, flags, err);

and drop the malloc() wrapper around the handle altogether 
(test-layers.sh will confirm that we still have a stable pointer).

That can be a separate patch; for the sake of _this_ patch, keeping 
things mechanical (with maybe a tweak to the comment) is fine.

>   }
>   
>   static struct backend filter_functions = {
> diff --git a/server/locks.c b/server/locks.c
> index ef6726d8..d187b422 100644
> --- a/server/locks.c
> +++ b/server/locks.c
> @@ -91,8 +91,12 @@ unlock_connection (void)
>   }
>   
>   void
> -lock_request (struct connection *conn)
> +lock_request (void)
>   {
> +  struct connection *conn = GET_CONN;
> +
> +  assert (conn != NULL);
> +

Here's a site where we could exploit the macro guaranteeing a non-null 
result.

-- 
Eric Blake, Principal Software Engineer
Red Hat, Inc.           +1-919-301-3226
Virtualization:  qemu.org | libvirt.org




More information about the Libguestfs mailing list