Give each device its own io mempool to avoid a potential
deadlock with stacked devices.  [HM + EJT]
--- diff/drivers/md/dm.c	2002-11-29 15:03:15.000000000 +0000
+++ source/drivers/md/dm.c	2002-11-29 14:57:32.000000000 +0000
@@ -62,11 +62,15 @@
 	 * The current mapping.
 	 */
 	struct dm_table *map;
+
+	/*
+	 * io objects are allocated from here.
+	 */
+	mempool_t *io_pool;
 };
 
 #define MIN_IOS 256
 static kmem_cache_t *_io_cache;
-static mempool_t *_io_pool;
 
 /* block device arrays */
 static int _block_size[MAX_DEVICES];
@@ -89,18 +93,10 @@
 	if (!_io_cache)
 		return -ENOMEM;
 
-	_io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
-				  mempool_free_slab, _io_cache);
-	if (!_io_pool) {
-		kmem_cache_destroy(_io_cache);
-		return -ENOMEM;
-	}
-
 	_major = major;
 	r = register_blkdev(_major, _name, &dm_blk_dops);
 	if (r < 0) {
 		DMERR("register_blkdev failed");
-		mempool_destroy(_io_pool);
 		kmem_cache_destroy(_io_cache);
 		return r;
 	}
@@ -121,7 +117,6 @@
 
 static void local_exit(void)
 {
-	mempool_destroy(_io_pool);
 	kmem_cache_destroy(_io_cache);
 
 	if (unregister_blkdev(_major, _name) < 0)
@@ -209,14 +204,14 @@
 	return 0;
 }
 
-static inline struct dm_io *alloc_io(void)
+static inline struct dm_io *alloc_io(struct mapped_device *md)
 {
-	return mempool_alloc(_io_pool, GFP_NOIO);
+	return mempool_alloc(md->io_pool, GFP_NOIO);
 }
 
-static inline void free_io(struct dm_io *io)
+static inline void free_io(struct mapped_device *md, struct dm_io *io)
 {
-	mempool_free(io, _io_pool);
+	mempool_free(io, md->io_pool);
 }
 
 static inline struct deferred_io *alloc_deferred(void)
@@ -326,7 +321,7 @@
 
 	bh->b_end_io = io->end_io;
 	bh->b_private = io->context;
-	free_io(io);
+	free_io(io->md, io);
 
 	bh->b_end_io(bh, uptodate);
 }
@@ -345,7 +340,7 @@
 	if (!ti)
 		return -EINVAL;
 
-	io = alloc_io();
+	io = alloc_io(md);
 	if (!io)
 		return -ENOMEM;
 
@@ -363,7 +358,7 @@
 
 	} else
 		/* we don't need to hook */
-		free_io(io);
+		free_io(io->md, io);
 
 	return r;
 }
@@ -609,6 +604,15 @@
 
 	DMWARN("allocating minor %d.", minor);
 	memset(md, 0, sizeof(*md));
+
+	md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
+				     mempool_free_slab, _io_cache);
+	if (!md->io_pool) {
+		free_minor(minor);
+		kfree(md);
+		return NULL;
+	}
+
 	md->dev = mk_kdev(_major, minor);
 	init_rwsem(&md->lock);
 	atomic_set(&md->holders, 1);
@@ -621,6 +625,7 @@
 static void free_dev(struct mapped_device *md)
 {
 	free_minor(minor(md->dev));
+	mempool_destroy(md->io_pool);
 	kfree(md);
 }