[dm-devel] Another experimental dm target... an encryption target

Christophe Saout christophe at saout.de
Wed Jul 30 09:19:04 UTC 2003


Am Di, 2003-07-29 um 22.02 schrieb Joe Thornber:

> The only place I'm using pages is in kcopyd, I needed different
> semantics to a mempool, so I did it a different way.  You should be
> able to use mempools.

Ah, ok. I thought these were exported, but they're private. You're
right.

> > Would that be a strategy? Or is it too complicated?
> 
> You have nicely described the mempool code ;) This is already done for
> you, see mm/mempool.c it's not long, but it's essential that you
> understand how it will behave.

Ah, I haven't really looked at the mempool code. You can write your own
allocation function (instead of mempool_alloc_slab). That's nice.

> > Yes, I don't think that code's too clever either. If you are out of
> > memory, your swap is full and the kernel wants to write out dirty pages
> > to reclaim memory but the writes are stuck waiting for buffers to do the
> > encryption, you run into a deadlock.
> 
> yep, this is the big deadlock that mempools were invented to fix.

I tested it. Put my swap on the crypto block device, did some io on it
and allocate large chunks of memory very fast... and boom - system hung.

Ok. I've modified the code to use page pools.

That was easy. The most complicated part is too be able to split bios if
we run out of memory.

There are two defines that can be tweaked:

#define MIN_POOL_PAGES 16
#define MIN_BIO_PAGES  8

The first one is the number of emergency pool pages, that would be 64k
on i386.

The second one is the number the buffer code always wants at least to be
able allocate without failing. It can wait for memory to get freed up.

When at least MIN_BIO_PAGES pages are allocated and more are needed, it
tries to allocate the rest of the pages without __GFP_WAIT. If that
fails the partial bio gets submitted and the code tries to allocate the
next bio (again waiting for the pages).

I couldn't get the new code to deadlock. I found it relatively
impressive how the system still was "usable".

So I've attached two patches. The first one implements the bio splitting
code. This one introduces a lot of complexity.

The second one is small, it simply switches to mempool allocation of
pages and removes the allocation retry code from the loop driver.

BTW: My file backed dm target produced a lot of page allocation failures
(which the kernel sent to syslog) under extreme memory pressure. I'm not
allocating pages myself there, so it's something else. Hmm.

--
Christophe Saout <christophe at saout.de>
Please avoid sending me Word or PowerPoint attachments.
See http://www.fsf.org/philosophy/no-word-attachments.html
-------------- next part --------------
--- dm-crypt.c.1	2003-07-29 22:09:48.000000000 +0200
+++ dm-crypt.c.2	2003-07-30 15:24:57.000000000 +0200
@@ -21,7 +21,23 @@
 struct crypt_io {
 	struct dm_target *target;
 	struct bio *bio;
-	struct bio *clone;
+	struct bio *first_clone;
+	atomic_t pending;
+	int error;
+};
+
+/*
+ * context holding the current state of a multi-part conversion
+ */
+struct convert_context {
+	struct bio *bio_in;
+	struct bio *bio_out;
+	unsigned int offset_in;
+	unsigned int offset_out;
+	int idx_in;
+	int idx_out;
+	sector_t sector;
+	int write;
 };
 
 /*
@@ -56,7 +72,10 @@
 	u8 key[0];
 };
 
-#define MIN_IOS 256
+#define MIN_IOS        256
+#define MIN_POOL_PAGES 16
+#define MIN_BIO_PAGES  4
+
 static kmem_cache_t *_io_cache;
 
 static inline struct crypt_io *crypt_alloc_io(struct crypt_c *cc)
@@ -103,52 +122,62 @@
 	return r;
 }
 
+static void
+crypt_convert_init(struct crypt_c *cc, struct convert_context *ctx,
+                   struct bio *bio_out, struct bio *bio_in,
+                   sector_t sector, int write)
+{
+	ctx->bio_in = bio_in;
+	ctx->bio_out = bio_out;
+	ctx->offset_in = 0;
+	ctx->offset_out = 0;
+	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
+	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
+	ctx->sector = sector + cc->iv_offset;
+	ctx->write = write;
+}
+
 /*
  * Encrypt / decrypt data from one bio to another one (may be the same)
  */
-static int crypt_convert(struct crypt_c *cc, struct bio *bio_out,
-                         struct bio *bio_in, sector_t sector, int write)
+static int crypt_convert(struct crypt_c *cc,
+                         struct convert_context *ctx)
 {
-	unsigned int offset_in = 0;
-	unsigned int offset_out = 0;
-	int idx_in = bio_in->bi_idx;
-	int idx_out = bio_out->bi_idx;
 	int r = 0;
 
- 	sector += cc->iv_offset;
-
-	while(idx_in < bio_in->bi_vcnt) {
-		struct bio_vec *bv_in = bio_iovec_idx(bio_in, idx_in);
-		struct bio_vec *bv_out = bio_iovec_idx(bio_out, idx_out);
+	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
+	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
+		struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
+		struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
 		struct scatterlist sg_in = {
 			.page = bv_in->bv_page,
-			.offset = bv_in->bv_offset + offset_in,
+			.offset = bv_in->bv_offset + ctx->offset_in,
 			.length = 1 << SECTOR_SHIFT
 		};
 		struct scatterlist sg_out = {
 			.page = bv_out->bv_page,
-			.offset = bv_out->bv_offset + offset_out,
+			.offset = bv_out->bv_offset + ctx->offset_out,
 			.length = 1 << SECTOR_SHIFT
 		};
 
-		offset_in += sg_in.length;
-		if (offset_in >= bv_in->bv_len) {
-			offset_in = 0;
-			idx_in++;
+		ctx->offset_in += sg_in.length;
+		if (ctx->offset_in >= bv_in->bv_len) {
+			ctx->offset_in = 0;
+			ctx->idx_in++;
 		}
 
-		offset_out += sg_out.length;
-		if (offset_out >= bv_out->bv_len) {
-			offset_out = 0;
-			idx_out++;
+		ctx->offset_out += sg_out.length;
+		if (ctx->offset_out >= bv_out->bv_len) {
+			ctx->offset_out = 0;
+			ctx->idx_out++;
 		}
 
 		r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
-		                              write, sector);
+		                              ctx->write, ctx->sector);
 		if (r < 0)
 			break;
 
-		sector++;
+		ctx->sector++;
 	}
 
 	return r;
@@ -156,25 +185,47 @@
 
 /*
  * Generate a new unfragmented bio with the given size
- * This shoule never violate the device limitations
+ * May return a smaller bio when running out of pages
+ * This should never violate the device limitations
  */
-static struct bio *crypt_alloc_sized_bio(unsigned int size)
+static struct bio *crypt_alloc_sized_bio(unsigned int size,
+                                         struct bio *base_bio,
+                                         int *bio_vec_idx)
 {
 	struct bio *bio;
-	struct bio_vec *bv;
 	int nr_iovecs = dm_div_up(size, PAGE_SIZE);
+	int gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
 	int i;
 
-	bio = bio_alloc(__GFP_NOWARN, nr_iovecs);
+	if (base_bio)
+		bio = bio_clone(base_bio, GFP_NOIO);
+	else
+		bio = bio_alloc(GFP_NOIO, nr_iovecs);
 	if (!bio)
 		return NULL;
 
-	bio->bi_vcnt = nr_iovecs;
-	bio->bi_size = size;
-	__bio_for_each_segment(bv, bio, i, 0) {
-		bv->bv_page = alloc_page(__GFP_NOWARN|__GFP_HIGHMEM);
+	/* if the last bio was not complete, continue where that one ends */
+	bio->bi_idx = *bio_vec_idx;
+	bio->bi_vcnt = *bio_vec_idx;
+	bio->bi_size = 0;
+
+	/* bio->bi_idx pages have already been allocated */
+	size -= bio->bi_idx * PAGE_SIZE;
+
+	for(i = bio->bi_idx; i < nr_iovecs; i++) {
+		struct bio_vec *bv = bio_iovec_idx(bio, i);
+
+		bv->bv_page = alloc_page(gfp_mask);
 		if (!bv->bv_page)
-			goto oom;
+			break;
+
+		/*
+		 * if additional pages cannot be allocated without waiting
+		 * return a partially allocated bio, the caller will then try
+		 * to allocate additional bios while submitting this partial bio
+		 */
+		if ((i - bio->bi_idx) == (MIN_BIO_PAGES - 1))
+			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
 
 		bv->bv_offset = 0;
 		if (size > PAGE_SIZE)
@@ -182,19 +233,28 @@
 		else
 			bv->bv_len = size;
 
-		size -= PAGE_SIZE;
+		bio->bi_size += bv->bv_len;
+		bio->bi_vcnt++;
+		size -= bv->bv_len;
 	}
 
-	return bio;
+	if (!bio->bi_size) {
+		bio_put(bio);
+		return NULL;
+	}
 
-oom:
-	while(--i >= 0)
-		__free_page(bio->bi_io_vec[i].bv_page);
-	bio_put(bio);
-	return NULL;
+	/*
+	 * remember the last bio_vec allocated to be able to correctly
+	 * continue after splitting caused by memory pressure
+	 */
+	*bio_vec_idx = bio->bi_vcnt;
+
+	return bio;
 }
 
-static struct bio *crypt_alloc_buffer(struct bio *bio)
+static struct bio *crypt_alloc_buffer(struct bio *old_bio,
+                                      struct bio *base_bio,
+                                      int *bio_vec_idx)
 {
 	struct bio *new_bio;
 	int retries = 50;
@@ -209,7 +269,8 @@
 		int flags = current->flags;
 
 		current->flags &= ~PF_MEMALLOC;
-		new_bio = crypt_alloc_sized_bio(bio->bi_size);
+		new_bio = crypt_alloc_sized_bio(old_bio->bi_size, base_bio,
+                                      bio_vec_idx);
 		if (flags & PF_MEMALLOC)
 			current->flags |= PF_MEMALLOC;
 
@@ -220,22 +281,41 @@
 		}
 	} while(!new_bio);
 
-	new_bio->bi_rw = bio->bi_rw;
-
 	return new_bio;
 }
 
-static void crypt_free_buffer(struct bio *bio)
+static void crypt_free_buffer_pages(struct bio *bio, unsigned int bytes)
 {
-	struct bio_vec *bv;
-	int i;
+	int i = bio->bi_idx;
 
-	BUG_ON(bio_flagged(bio, BIO_CLONED));
-
-	__bio_for_each_segment(bv, bio, i, 0)
+	while(bytes) {
+		struct bio_vec *bv = bio_iovec_idx(bio, i++);
 		__free_page(bv->bv_page);
+		bytes -= bv->bv_len;
+	}
+}
 
-	bio_put(bio);
+/*
+ * One of the bios was finished. Check for completion of
+ * the whole request and correctly cleanup the buffer.
+ */
+static void dec_pending(struct crypt_io *io, int error)
+{
+	struct crypt_c *cc = (struct crypt_c*) io->target->private;
+
+	if (!atomic_dec_and_test(&io->pending))
+		return;
+
+	if (io->first_clone)
+		bio_put(io->first_clone);
+
+	if (error < 0)
+		io->error = error;
+
+	if (io->bio)
+		bio_endio(io->bio, io->bio->bi_size, io->error);
+
+	crypt_free_io(cc, io);
 }
 
 /*
@@ -295,6 +375,7 @@
 	while (1) {
 		struct bio *bio;
 		struct crypt_io *io;
+		struct convert_context ctx;
 
 		while (down_interruptible(&cc->bh_mutex) == -EINTR);
 
@@ -306,13 +387,12 @@
 
 		io = (struct crypt_io*) bio->bi_private;
 
-		r = crypt_convert(cc, io->bio, io->bio,
-		                  io->bio->bi_sector - ti->begin, 0);
+		crypt_convert_init(cc, &ctx, io->bio, io->bio,
+		                   io->bio->bi_sector - ti->begin, 0);
+		r = crypt_convert(cc, &ctx);
 
 		bio_put(bio);
-
-		bio_endio(io->bio, io->bio->bi_size, r);
-		crypt_free_io(cc, io);
+		dec_pending(io, r);
 	}
 
 	up(&cc->sem);
@@ -512,28 +592,31 @@
 	struct crypt_io *io = (struct crypt_io*) bio->bi_private;
 	struct crypt_c *cc = (struct crypt_c*) io->target->private;
 
-	if (bio->bi_size)
-		return 1;
-
-	if (bio_rw(bio) == WRITE)
-		crypt_free_buffer(bio);
-	else {
+	if (bio_rw(bio) == WRITE) {
 		/*
-		 * successful reads get decrypted by the worker thread
-		 * because we never want to decrypt in an irq context
+		 * free the processed pages, even if
+		 * it's only a partially completed write
 		 */
-		if (bio_flagged(bio, BIO_UPTODATE)) {
-			cryptio_queue_bio(cc, bio);
-			up(&cc->bh_mutex);
+		crypt_free_buffer_pages(bio, done);
+	}
 
-			return 0;
-		}
+	if (bio->bi_size)
+		return 1;
 
-		bio_put(bio);
+	/*
+	 * successful reads get decrypted by the worker thread
+	 * because we never want to decrypt in an irq context
+	 */
+	if ((bio_rw(bio) == READ || bio_rw(bio) == READA)
+	    && bio_flagged(bio, BIO_UPTODATE)) {
+		cryptio_queue_bio(cc, bio);
+		up(&cc->bh_mutex);
+
+		return 0;
 	}
 
-	bio_endio(io->bio, io->bio->bi_size, error);
-	crypt_free_io(cc, io);
+	bio_put(bio);
+	dec_pending(io, error);
 
 	return error;
 }
@@ -542,38 +625,77 @@
                      union map_info *map_context)
 {
 	struct crypt_c *cc = (struct crypt_c*) ti->private;
-	struct crypt_io *io;
-	struct bio *clone;
+	struct crypt_io *io = crypt_alloc_io(cc);
+	struct bio *clone = NULL;
+	struct convert_context ctx;
+	unsigned int remaining = bio->bi_size;
+	sector_t sector = bio->bi_sector - ti->begin;
+	int bio_vec_idx = 0;
 	int r = 0;
 
-	if (bio_rw(bio) == WRITE) {
-		clone = crypt_alloc_buffer(bio);
-		if (clone) {
-			r = crypt_convert(cc, clone, bio, bio->bi_sector - ti->begin, 1);
-			if (r < 0) {
-				crypt_free_buffer(clone);
-				return r;
+	io->target = ti;
+	io->bio = bio;
+	io->first_clone = NULL;
+	io->error = 0;
+	atomic_set(&io->pending, 1);
+
+	if (bio_rw(bio) == WRITE)
+		crypt_convert_init(cc, &ctx, NULL, bio, sector, 1);
+
+	while (remaining) {
+		if (bio_rw(bio) == WRITE) {
+			clone = crypt_alloc_buffer(bio, io->first_clone, &bio_vec_idx);
+			if (clone) {
+				ctx.bio_out = clone;
+				r = crypt_convert(cc, &ctx);
+				if (r < 0) {
+					crypt_free_buffer_pages(clone, clone->bi_size);
+					bio_put(clone);
+					goto cleanup;
+				}
 			}
+		} else
+			clone = bio_clone(bio, GFP_NOIO);
+
+		if (!clone) {
+			r = -ENOMEM;
+			goto cleanup;
 		}
-	} else
-		clone = bio_clone(bio, GFP_NOIO);
 
-	if (!clone)
-		return -ENOMEM;
+		if (!io->first_clone) {
+			/*
+			 * hold a reference to the first clone, because it holds
+			 * the bio_vec array and that needs to be released only
+			 * after all other clones are released
+			 */
+			bio_get(clone);
+			io->first_clone = clone;
+		}
+		atomic_inc(&io->pending);
 
-	io = crypt_alloc_io(cc);
-	io->target = ti;
-	io->bio = bio;
-	io->clone = clone;
+		clone->bi_private = io;
+		clone->bi_end_io = crypt_endio;
+		clone->bi_bdev = cc->dev->bdev;
+		clone->bi_sector = cc->start + sector;
+		clone->bi_rw = bio->bi_rw;
 
-	clone->bi_private = io;
-	clone->bi_end_io = crypt_endio;
-	clone->bi_bdev = cc->dev->bdev;
-	clone->bi_sector = cc->start + (bio->bi_sector - ti->begin);
+		remaining -= clone->bi_size;
+		sector += bio_sectors(clone);
 
-	generic_make_request(clone);
+		generic_make_request(clone);
+	}
 
+	dec_pending(io, 0);
 	return 0;
+
+cleanup:
+	if (io->first_clone) {
+		dec_pending(io, r);
+		return 0;
+	}
+
+	crypt_free_io(cc, io);
+	return r;
 }
 
 static int crypt_status(struct dm_target *ti, status_type_t type,
-------------- next part --------------
--- dm-crypt.c.2	2003-07-30 15:24:57.000000000 +0200
+++ dm-crypt.c.3	2003-07-30 16:01:10.500919568 +0200
@@ -49,9 +49,11 @@
 	sector_t start;
 
 	/*
-	 * pool for per bio private data
+	 * pool for per bio private data and
+	 * for encryption buffer pages
 	 */
 	mempool_t *io_pool;
+	mempool_t *page_pool;
 
 	/*
 	 * worker thread related data
@@ -74,10 +76,33 @@
 
 #define MIN_IOS        256
 #define MIN_POOL_PAGES 16
-#define MIN_BIO_PAGES  4
+#define MIN_BIO_PAGES  8
 
 static kmem_cache_t *_io_cache;
 
+/*
+ * Mempool alloc and free functions for the page and io pool
+ */
+static void *mempool_alloc_page(int gfp_mask, void *data)
+{
+	return alloc_page(gfp_mask);
+}
+
+static void mempool_free_page(void *page, void *data)
+{
+	__free_page(page);
+}
+
+static inline struct page *crypt_alloc_page(struct crypt_c *cc, int gfp_mask)
+{
+	return mempool_alloc(cc->page_pool, gfp_mask);
+}
+
+static inline void crypt_free_page(struct crypt_c *cc, struct page *page)
+{
+	 mempool_free(page, cc->page_pool);
+}
+
 static inline struct crypt_io *crypt_alloc_io(struct crypt_c *cc)
 {
 	return mempool_alloc(cc->io_pool, GFP_NOIO);
@@ -185,12 +210,12 @@
 
 /*
  * Generate a new unfragmented bio with the given size
- * May return a smaller bio when running out of pages
  * This should never violate the device limitations
+ * May return a smaller bio when running out of pages
  */
-static struct bio *crypt_alloc_sized_bio(unsigned int size,
-                                         struct bio *base_bio,
-                                         int *bio_vec_idx)
+static struct bio *
+crypt_alloc_buffer(struct crypt_c *cc, unsigned int size,
+                   struct bio *base_bio, int *bio_vec_idx)
 {
 	struct bio *bio;
 	int nr_iovecs = dm_div_up(size, PAGE_SIZE);
@@ -215,7 +240,7 @@
 	for(i = bio->bi_idx; i < nr_iovecs; i++) {
 		struct bio_vec *bv = bio_iovec_idx(bio, i);
 
-		bv->bv_page = alloc_page(gfp_mask);
+		bv->bv_page = crypt_alloc_page(cc, gfp_mask);
 		if (!bv->bv_page)
 			break;
 
@@ -252,45 +277,14 @@
 	return bio;
 }
 
-static struct bio *crypt_alloc_buffer(struct bio *old_bio,
-                                      struct bio *base_bio,
-                                      int *bio_vec_idx)
-{
-	struct bio *new_bio;
-	int retries = 50;
-
-	/*
-	 * When called on the page reclaim -> writepage path, this code can
-	 * trivially consume all memory.  So we drop PF_MEMALLOC to avoid
-	 * stealing all the page reserves and throttle to the writeout rate.
-	 * pdflush will have been woken by page reclaim.  Let it do its work.
-	 */
-	do {
-		int flags = current->flags;
-
-		current->flags &= ~PF_MEMALLOC;
-		new_bio = crypt_alloc_sized_bio(old_bio->bi_size, base_bio,
-                                      bio_vec_idx);
-		if (flags & PF_MEMALLOC)
-			current->flags |= PF_MEMALLOC;
-
-		if (!new_bio) {
-			if (!--retries)
-				return NULL;
-			blk_congestion_wait(WRITE, HZ/10);
-		}
-	} while(!new_bio);
-
-	return new_bio;
-}
-
-static void crypt_free_buffer_pages(struct bio *bio, unsigned int bytes)
+static void crypt_free_buffer_pages(struct crypt_c *cc, struct bio *bio,
+                                    unsigned int bytes)
 {
 	int i = bio->bi_idx;
 
 	while(bytes) {
 		struct bio_vec *bv = bio_iovec_idx(bio, i++);
-		__free_page(bv->bv_page);
+		crypt_free_page(cc, bv->bv_page);
 		bytes -= bv->bv_len;
 	}
 }
@@ -514,9 +508,14 @@
 				     mempool_free_slab, _io_cache);
 	if (!cc->io_pool) {
 		ti->error = "dm-crypt: Cannot allocate crypt io mempool";
-		crypto_free_tfm(tfm);
-		kfree(cc);
-		return -ENOMEM;
+		goto bad1;
+	}
+
+	cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page,
+				       mempool_free_page, NULL);
+	if (!cc->page_pool) {
+		ti->error = "dm-crypt: Cannot allocate page mempool";
+		goto bad2;
 	}
 
 	cc->tfm = tfm;
@@ -525,28 +524,28 @@
 	if ((key_size == 0 && strcmp(argv[1], "-") != 0)
 	    || crypt_decode_key(cc->key, argv[1], key_size) < 0) {
 		ti->error = "dm-crypt: Error decoding key";
-		goto bad;
+		goto bad3;
 	}
 
 	if (tfm->crt_u.cipher.cit_setkey(tfm, cc->key, key_size) < 0) {
 		ti->error = "dm-crypt: Error setting key";
-		goto bad;
+		goto bad3;
 	}
 
 	if (sscanf(argv[2], SECTOR_FORMAT, &cc->iv_offset) != 1) {
 		ti->error = "dm-crypt: Invalid iv_offset sector";
-		goto bad;
+		goto bad3;
 	}
 
 	if (sscanf(argv[4], SECTOR_FORMAT, &cc->start) != 1) {
 		ti->error = "dm-crypt: Invalid device sector";
-		goto bad;
+		goto bad3;
 	}
 
 	if (dm_get_device(ti, argv[3], cc->start, ti->len,
 	                  dm_table_get_mode(ti->table), &cc->dev)) {
 		ti->error = "dm-crypt: Device lookup failed";
-		goto bad;
+		goto bad3;
 	}
 
 	spin_lock_init(&cc->lock);
@@ -561,8 +560,11 @@
 
 	return 0;
 
-bad:
+bad3:
+	mempool_destroy(cc->page_pool);
+bad2:
 	mempool_destroy(cc->io_pool);
+bad1:
 	crypto_free_tfm(tfm);
 	kfree(cc);
 	return -EINVAL;
@@ -581,9 +583,13 @@
 	/* wait for worker thread to terminate */
 	down(&cc->sem);
 
+	mempool_destroy(cc->page_pool);
 	mempool_destroy(cc->io_pool);
+
 	crypto_free_tfm(cc->tfm);
+
 	dm_put_device(ti, cc->dev);
+
 	kfree(cc);
 }
 
@@ -597,7 +603,7 @@
 		 * free the processed pages, even if
 		 * it's only a partially completed write
 		 */
-		crypt_free_buffer_pages(bio, done);
+		crypt_free_buffer_pages(cc, bio, done);
 	}
 
 	if (bio->bi_size)
@@ -637,19 +643,24 @@
 	io->bio = bio;
 	io->first_clone = NULL;
 	io->error = 0;
-	atomic_set(&io->pending, 1);
+	atomic_set(&io->pending, 1); /* hold a reference */
 
 	if (bio_rw(bio) == WRITE)
 		crypt_convert_init(cc, &ctx, NULL, bio, sector, 1);
 
+	/*
+	 * The allocated buffers can be smaller then the whole bio,
+	 * so repeat the whole process until all the data can be handled.
+	 */
 	while (remaining) {
 		if (bio_rw(bio) == WRITE) {
-			clone = crypt_alloc_buffer(bio, io->first_clone, &bio_vec_idx);
+			clone = crypt_alloc_buffer(cc, bio->bi_size, io->first_clone,
+			                           &bio_vec_idx);
 			if (clone) {
 				ctx.bio_out = clone;
 				r = crypt_convert(cc, &ctx);
 				if (r < 0) {
-					crypt_free_buffer_pages(clone, clone->bi_size);
+					crypt_free_buffer_pages(cc, clone, clone->bi_size);
 					bio_put(clone);
 					goto cleanup;
 				}
@@ -685,6 +696,7 @@
 		generic_make_request(clone);
 	}
 
+	/* drop reference, clones could have returned before we reach this */
 	dec_pending(io, 0);
 	return 0;
 
@@ -694,6 +706,7 @@
 		return 0;
 	}
 
+	/* if no bio has been dispatched yet, we can directly return the error */
 	crypt_free_io(cc, io);
 	return r;
 }


More information about the dm-devel mailing list