[dm-devel] [PATCH] dm.c : Per-device caches

Kevin Corry corryk at us.ibm.com
Mon Jan 13 11:10:01 UTC 2003


A change a few weeks ago introduced per-device mempools for dm_io structures,
but still used a global cache. This patch introduces per-devices caches.

I'm not certain if this is a requirement to avoid deadlock, but it seems like
a good idea, since we're allocating a dm_io for every I/O request.

Of the other mempool users, both the scsi and bio code use per-object caches
when using per-object mempools. Scsi uses them for per-host scatter-gather
lists (drivers/scsi/scsi_lib.c:scsi_init_queue()), and bio uses them for
biovec pools (fs/bio.c:biovec_init_pools()). Looks like NFS uses global
pools/caches (instead of per-object) (fs/nfs/read.c and fs/nfs/write.c), JFS
uses per-object pools with a global cache (fs/jfs/jfs_metapage.c), and
MD Raid-1 and Multipath use mempools without caches.

--- linux-2.5.56a/drivers/md/dm.c	2003/01/02 19:58:28
+++ linux-2.5.56b/drivers/md/dm.c	2003/01/13 15:46:17
@@ -62,27 +62,20 @@
 	/*
 	 * io objects are allocated from here.
 	 */
+	kmem_cache_t *io_cache;
 	mempool_t *io_pool;
 };
 
 #define MIN_IOS 256
-static kmem_cache_t *_io_cache;
 
 static __init int local_init(void)
 {
 	int r;
 
-	/* allocate a slab for the dm_ios */
-	_io_cache = kmem_cache_create("dm io",
-				      sizeof(struct dm_io), 0, 0, NULL, NULL);
-	if (!_io_cache)
-		return -ENOMEM;
-
 	_major = major;
 	r = register_blkdev(_major, _name, &dm_blk_dops);
 	if (r < 0) {
 		DMERR("register_blkdev failed");
-		kmem_cache_destroy(_io_cache);
 		return r;
 	}
 
@@ -94,8 +87,6 @@
 
 static void local_exit(void)
 {
-	kmem_cache_destroy(_io_cache);
-
 	if (unregister_blkdev(_major, _name) < 0)
 		DMERR("devfs_unregister_blkdev failed");
 
@@ -573,6 +564,7 @@
 static struct mapped_device *alloc_dev(int minor)
 {
 	struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
+	char name[15];
 
 	if (!md) {
 		DMWARN("unable to allocate device, out of memory.");
@@ -594,9 +586,19 @@
 	md->queue.queuedata = md;
 	blk_queue_make_request(&md->queue, dm_request);
 
+	snprintf(name, 15, "dm io %d", minor);
+	md->io_cache = kmem_cache_create(name, sizeof(struct dm_io),
+					 0, 0, NULL, NULL);
+	if (!md->io_cache) {
+		free_minor(md->disk->first_minor);
+		kfree(md);
+		return NULL;
+	}
+
 	md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
-				     mempool_free_slab, _io_cache);
+				     mempool_free_slab, md->io_cache);
 	if (!md->io_pool) {
+		kmem_cache_destroy(md->io_cache);
 		free_minor(md->disk->first_minor);
 		kfree(md);
 		return NULL;
@@ -627,6 +629,7 @@
 {
 	free_minor(md->disk->first_minor);
 	mempool_destroy(md->io_pool);
+	kmem_cache_destroy(md->io_cache);
 	del_gendisk(md->disk);
 	put_disk(md->disk);
 	kfree(md);





More information about the dm-devel mailing list