Mempool version by Christoph Hellwig that holds the free objects
in an array rather than a list.  This prevents the objects getting
dirtied, eg, people maybe expecting them to come out of a slab with
a particular state.
--- diff/include/linux/mempool.h	2002-11-15 11:54:01.000000000 +0000
+++ source/include/linux/mempool.h	2002-11-15 11:53:36.000000000 +0000
@@ -13,24 +13,13 @@
 typedef void * (mempool_alloc_t)(int gfp_mask, void *pool_data);
 typedef void (mempool_free_t)(void *element, void *pool_data);
 
-struct mempool_s {
-	spinlock_t lock;
-	int min_nr, curr_nr;
-	struct list_head elements;
-
-	void *pool_data;
-	mempool_alloc_t *alloc;
-	mempool_free_t *free;
-	wait_queue_head_t wait;
-};
 extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
 				 mempool_free_t *free_fn, void *pool_data);
-extern void mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask);
+extern int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask);
 extern void mempool_destroy(mempool_t *pool);
 extern void * mempool_alloc(mempool_t *pool, int gfp_mask);
 extern void mempool_free(void *element, mempool_t *pool);
 
-
 /*
  * A mempool_alloc_t and mempool_free_t that get the memory from
  * a slab that is passed in through pool_data.
@@ -38,4 +27,5 @@
 void *mempool_alloc_slab(int gfp_mask, void *pool_data);
 void mempool_free_slab(void *element, void *pool_data);
 
+
 #endif /* _LINUX_MEMPOOL_H */
--- diff/mm/mempool.c	2002-11-15 11:54:01.000000000 +0000
+++ source/mm/mempool.c	2002-11-15 11:53:50.000000000 +0000
@@ -12,7 +12,40 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/mempool.h>
-#include <linux/compiler.h>
+
+struct mempool_s {
+	spinlock_t lock;
+	int min_nr;		/* nr of elements at *elements */
+	int curr_nr;		/* Current nr of elements at *elements */
+	void **elements;
+
+	void *pool_data;
+	mempool_alloc_t *alloc;
+	mempool_free_t *free;
+	wait_queue_head_t wait;
+};
+
+static void add_element(mempool_t *pool, void *element)
+{
+	BUG_ON(pool->curr_nr >= pool->min_nr);
+	pool->elements[pool->curr_nr++] = element;
+}
+
+static void *remove_element(mempool_t *pool)
+{
+	BUG_ON(pool->curr_nr <= 0);
+	return pool->elements[--pool->curr_nr];
+}
+
+static void free_pool(mempool_t *pool)
+{
+	while (pool->curr_nr) {
+		void *element = remove_element(pool);
+		pool->free(element, pool->pool_data);
+	}
+	kfree(pool->elements);
+	kfree(pool);
+}
 
 /**
  * mempool_create - create a memory pool
@@ -26,24 +59,25 @@
  * memory pool. The pool can be used from the mempool_alloc and mempool_free
  * functions. This function might sleep. Both the alloc_fn() and the free_fn()
  * functions might sleep - as long as the mempool_alloc function is not called
- * from IRQ contexts. The element allocated by alloc_fn() must be able to
- * hold a struct list_head. (8 bytes on x86.)
+ * from IRQ contexts.
  */
 mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
 				mempool_free_t *free_fn, void *pool_data)
 {
 	mempool_t *pool;
-	int i;
 
 	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
 	if (!pool)
 		return NULL;
 	memset(pool, 0, sizeof(*pool));
-
+	pool->elements = kmalloc(min_nr * sizeof(void *), GFP_KERNEL);
+	if (!pool->elements) {
+		kfree(pool);
+		return NULL;
+	}
 	spin_lock_init(&pool->lock);
 	pool->min_nr = min_nr;
 	pool->pool_data = pool_data;
-	INIT_LIST_HEAD(&pool->elements);
 	init_waitqueue_head(&pool->wait);
 	pool->alloc = alloc_fn;
 	pool->free = free_fn;
@@ -51,27 +85,15 @@
 	/*
 	 * First pre-allocate the guaranteed number of buffers.
 	 */
-	for (i = 0; i < min_nr; i++) {
+	while (pool->curr_nr < pool->min_nr) {
 		void *element;
-		struct list_head *tmp;
-		element = pool->alloc(GFP_KERNEL, pool->pool_data);
 
+		element = pool->alloc(GFP_KERNEL, pool->pool_data);
 		if (unlikely(!element)) {
-			/*
-			 * Not enough memory - free the allocated ones
-			 * and return:
-			 */
-			list_for_each(tmp, &pool->elements) {
-				element = tmp;
-				pool->free(element, pool->pool_data);
-			}
-			kfree(pool);
-
+			free_pool(pool);
 			return NULL;
 		}
-		tmp = element;
-		list_add(tmp, &pool->elements);
-		pool->curr_nr++;
+		add_element(pool, element);
 	}
 	return pool;
 }
@@ -92,53 +114,54 @@
  * while this function is running. mempool_alloc() & mempool_free()
  * might be called (eg. from IRQ contexts) while this function executes.
  */
-void mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask)
+int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask)
 {
-	int delta;
 	void *element;
+	void **new_elements;
 	unsigned long flags;
-	struct list_head *tmp;
 
-	if (new_min_nr <= 0)
-		BUG();
+	BUG_ON(new_min_nr <= 0);
 
 	spin_lock_irqsave(&pool->lock, flags);
 	if (new_min_nr < pool->min_nr) {
-		pool->min_nr = new_min_nr;
-		/*
-		 * Free possible excess elements.
-		 */
-		while (pool->curr_nr > pool->min_nr) {
-			tmp = pool->elements.next;
-			if (tmp == &pool->elements)
-				BUG();
-			list_del(tmp);
-			element = tmp;
-			pool->curr_nr--;
+		while (pool->curr_nr > new_min_nr) {
+			element = remove_element(pool);
 			spin_unlock_irqrestore(&pool->lock, flags);
-
 			pool->free(element, pool->pool_data);
-
 			spin_lock_irqsave(&pool->lock, flags);
 		}
-		spin_unlock_irqrestore(&pool->lock, flags);
-		return;
+		pool->min_nr = new_min_nr;
+		goto out_unlock;
 	}
-	delta = new_min_nr - pool->min_nr;
-	pool->min_nr = new_min_nr;
 	spin_unlock_irqrestore(&pool->lock, flags);
 
-	/*
-	 * We refill the pool up to the new treshold - but we dont
-	 * (cannot) guarantee that the refill succeeds.
-	 */
-	while (delta) {
+	/* Grow the pool */
+	new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
+	if (!new_elements)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&pool->lock, flags);
+	memcpy(new_elements, pool->elements,
+			pool->curr_nr * sizeof(*new_elements));
+	kfree(pool->elements);
+	pool->elements = new_elements;
+	pool->min_nr = new_min_nr;
+
+	while (pool->curr_nr < pool->min_nr) {
+		spin_unlock_irqrestore(&pool->lock, flags);
 		element = pool->alloc(gfp_mask, pool->pool_data);
 		if (!element)
-			break;
-		mempool_free(element, pool);
-		delta--;
+			goto out;
+		spin_lock_irqsave(&pool->lock, flags);
+		if (pool->curr_nr < pool->min_nr)
+			add_element(pool, element);
+		else
+			kfree(element);		/* Raced */
 	}
+out_unlock:
+	spin_unlock_irqrestore(&pool->lock, flags);
+out:
+	return 0;
 }
 
 /**
@@ -147,27 +170,14 @@
  *             mempool_create().
  *
  * this function only sleeps if the free_fn() function sleeps. The caller
- * has to guarantee that no mempool_alloc() nor mempool_free() happens in
- * this pool when calling this function.
+ * has to guarantee that all elements have been returned to the pool (ie:
+ * freed) prior to calling mempool_destroy().
  */
 void mempool_destroy(mempool_t *pool)
 {
-	void *element;
-	struct list_head *head, *tmp;
-
-	if (!pool)
-		return;
-
-	head = &pool->elements;
-	for (tmp = head->next; tmp != head; ) {
-		element = tmp;
-		tmp = tmp->next;
-		pool->free(element, pool->pool_data);
-		pool->curr_nr--;
-	}
-	if (pool->curr_nr)
-		BUG();
-	kfree(pool);
+	if (pool->curr_nr != pool->min_nr)
+		BUG();		/* There were outstanding elements */
+	free_pool(pool);
 }
 
 /**
@@ -185,7 +195,6 @@
 {
 	void *element;
 	unsigned long flags;
-	struct list_head *tmp;
 	int curr_nr;
 	DECLARE_WAITQUEUE(wait, current);
 	int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
@@ -212,10 +221,7 @@
 
 	spin_lock_irqsave(&pool->lock, flags);
 	if (likely(pool->curr_nr)) {
-		tmp = pool->elements.next;
-		list_del(tmp);
-		element = tmp;
-		pool->curr_nr--;
+		element = remove_element(pool);
 		spin_unlock_irqrestore(&pool->lock, flags);
 		return element;
 	}
@@ -258,8 +264,7 @@
 	if (pool->curr_nr < pool->min_nr) {
 		spin_lock_irqsave(&pool->lock, flags);
 		if (pool->curr_nr < pool->min_nr) {
-			list_add(element, &pool->elements);
-			pool->curr_nr++;
+			add_element(pool, element);
 			spin_unlock_irqrestore(&pool->lock, flags);
 			wake_up(&pool->wait);
 			return;
@@ -292,4 +297,3 @@
 EXPORT_SYMBOL(mempool_free);
 EXPORT_SYMBOL(mempool_alloc_slab);
 EXPORT_SYMBOL(mempool_free_slab);
-