[dm-devel] [PATCH 04/14] libmultipath: uevent_dispatch(): process uevents one by one

Benjamin Marzinski bmarzins at redhat.com
Sat Apr 2 03:31:33 UTC 2022


> On Thu, Mar 31, 2022 at 12:15:00AM +0200, mwilck at suse.com wrote:
> The main rationale for delaying uevents is that the
> uevent dispatcher may have to wait for other threads to release the
> vecs lock, may the vecs lock for an extended amount of time, and
> even sleep occasionally. By delaying them, we have the chance
> to accumulate events for the same path device ("filtering") or
> WWID ("merging"), thus avoiding duplicate work if we merge these
> into one.
> 
> A similar effect can be obtained in the uevent dispatcher itself
> by looking for new uevents after each dispatched event, and trying
> to merge the newly arrived events with those that remained
> in the queue.

I'm not sure how I feel about this commit existing on its own.
uevent_filter() can leak memory if it deletes a uevent that already has
merged uevents.  I see that you fixed this in your next patch.  The
question is, does the reviewing simplification from splitting the
patches make up for adding an error in this one.  If you want to leave
it in as its own commit, you should probably flag that it creates a
memory leak that will be fixed in a future patch.

-Ben
 
> Signed-off-by: Martin Wilck <mwilck at suse.com>
> ---
>  libmultipath/list.h   | 29 +++++++++++++++++++++++++++++
>  libmultipath/uevent.c | 37 ++++++++++++++++++++-----------------
>  2 files changed, 49 insertions(+), 17 deletions(-)
> 
> diff --git a/libmultipath/list.h b/libmultipath/list.h
> index ced021f..ddea99f 100644
> --- a/libmultipath/list.h
> +++ b/libmultipath/list.h
> @@ -246,6 +246,35 @@ static inline void list_splice_tail_init(struct list_head *list,
>  #define list_entry(ptr, type, member) \
>  	container_of(ptr, type, member)
>  
> +
> +/**
> + * list_pop - unlink and return the first list element
> + * @head:	the &struct list_head pointer.
> + */
> +static inline struct list_head *list_pop(struct list_head *head)
> +{
> +	struct list_head *tmp;
> +
> +	if (list_empty(head))
> +		return NULL;
> +	tmp = head->next;
> +	list_del_init(tmp);
> +	return tmp;
> +}
> +
> +/**
> + * list_pop_entry - unlink and return the entry of the first list element
> + * @head:	the &struct list_head pointer.
> + * @type:	the type of the struct this is embedded in.
> + * @member:	the name of the list_struct within the struct.
> + */
> +#define list_pop_entry(head, type, member)		\
> +({							\
> +	struct list_head *__h = list_pop(head);		\
> +							\
> +	(__h ? container_of(__h, type, member) : NULL);	\
> +})
> +
>  /**
>   * list_for_each	-	iterate over a list
>   * @pos:	the &struct list_head to use as a loop counter.
> diff --git a/libmultipath/uevent.c b/libmultipath/uevent.c
> index fe60ae3..602eccb 100644
> --- a/libmultipath/uevent.c
> +++ b/libmultipath/uevent.c
> @@ -366,6 +366,8 @@ uevent_merge(struct uevent *later, struct list_head *tmpq)
>  				later->action, later->kernel, later->wwid);
>  
>  			list_move(&earlier->node, &later->merge_node);
> +			list_splice_init(&earlier->merge_node,
> +					 &later->merge_node);
>  		}
>  	}
>  }
> @@ -386,16 +388,15 @@ merge_uevq(struct list_head *tmpq)
>  static void
>  service_uevq(struct list_head *tmpq)
>  {
> -	struct uevent *uev, *tmp;
> +	struct uevent *uev = list_pop_entry(tmpq, typeof(*uev), node);
>  
> -	list_for_each_entry_safe(uev, tmp, tmpq, node) {
> -		list_del_init(&uev->node);
> -
> -		pthread_cleanup_push(cleanup_uev, uev);
> -		if (my_uev_trigger && my_uev_trigger(uev, my_trigger_data))
> -			condlog(0, "uevent trigger error");
> -		pthread_cleanup_pop(1);
> -	}
> +	if (uev == NULL)
> +		return;
> +	condlog(4, "servicing uevent '%s %s'", uev->action, uev->kernel);
> +	pthread_cleanup_push(cleanup_uev, uev);
> +	if (my_uev_trigger && my_uev_trigger(uev, my_trigger_data))
> +		condlog(0, "uevent trigger error");
> +	pthread_cleanup_pop(1);
>  }
>  
>  static void uevent_cleanup(void *arg)
> @@ -432,33 +433,35 @@ static void cleanup_global_uevq(void *arg __attribute__((unused)))
>  int uevent_dispatch(int (*uev_trigger)(struct uevent *, void * trigger_data),
>  		    void * trigger_data)
>  {
> +	LIST_HEAD(uevq_work);
> +
>  	my_uev_trigger = uev_trigger;
>  	my_trigger_data = trigger_data;
>  
>  	mlockall(MCL_CURRENT | MCL_FUTURE);
>  
> +	pthread_cleanup_push(cleanup_uevq, &uevq_work);
>  	while (1) {
> -		LIST_HEAD(uevq_tmp);
>  
>  		pthread_cleanup_push(cleanup_mutex, uevq_lockp);
>  		pthread_mutex_lock(uevq_lockp);
> -		servicing_uev = 0;
>  
> -		while (list_empty(&uevq))
> +		servicing_uev = !list_empty(&uevq_work);
> +
> +		while (list_empty(&uevq_work) && list_empty(&uevq))
>  			pthread_cond_wait(uev_condp, uevq_lockp);
>  
>  		servicing_uev = 1;
> -		list_splice_init(&uevq, &uevq_tmp);
> +		list_splice_tail_init(&uevq, &uevq_work);
>  		pthread_cleanup_pop(1);
>  
>  		if (!my_uev_trigger)
>  			break;
>  
> -		pthread_cleanup_push(cleanup_uevq, &uevq_tmp);
> -		merge_uevq(&uevq_tmp);
> -		service_uevq(&uevq_tmp);
> -		pthread_cleanup_pop(1);
> +		merge_uevq(&uevq_work);
> +		service_uevq(&uevq_work);
>  	}
> +	pthread_cleanup_pop(1);
>  	condlog(3, "Terminating uev service queue");
>  	return 0;
>  }
> -- 
> 2.35.1


More information about the dm-devel mailing list