[PATCH] mempool: use common mempool kmalloc allocator
This patch changes several mempool users, all of which are basically just wrappers around kmalloc(), to use the common mempool_kmalloc/kfree, rather than their own wrapper function, removing a bunch of duplicated code. Signed-off-by: Matthew Dobson <colpatch@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
53184082b0
commit
0eaae62aba
@@ -89,16 +89,6 @@ int bitmap_active(struct bitmap *bitmap)
|
||||
}
|
||||
|
||||
#define WRITE_POOL_SIZE 256
|
||||
/* mempool for queueing pending writes on the bitmap file */
|
||||
static void *write_pool_alloc(gfp_t gfp_flags, void *data)
|
||||
{
|
||||
return kmalloc(sizeof(struct page_list), gfp_flags);
|
||||
}
|
||||
|
||||
static void write_pool_free(void *ptr, void *data)
|
||||
{
|
||||
kfree(ptr);
|
||||
}
|
||||
|
||||
/*
|
||||
* just a placeholder - calls kmalloc for bitmap pages
|
||||
@@ -1564,8 +1554,8 @@ int bitmap_create(mddev_t *mddev)
|
||||
spin_lock_init(&bitmap->write_lock);
|
||||
INIT_LIST_HEAD(&bitmap->complete_pages);
|
||||
init_waitqueue_head(&bitmap->write_wait);
|
||||
bitmap->write_pool = mempool_create(WRITE_POOL_SIZE, write_pool_alloc,
|
||||
write_pool_free, NULL);
|
||||
bitmap->write_pool = mempool_create_kmalloc_pool(WRITE_POOL_SIZE,
|
||||
sizeof(struct page_list));
|
||||
err = -ENOMEM;
|
||||
if (!bitmap->write_pool)
|
||||
goto error;
|
||||
|
@@ -32,16 +32,6 @@ struct io {
|
||||
static unsigned _num_ios;
|
||||
static mempool_t *_io_pool;
|
||||
|
||||
static void *alloc_io(gfp_t gfp_mask, void *pool_data)
|
||||
{
|
||||
return kmalloc(sizeof(struct io), gfp_mask);
|
||||
}
|
||||
|
||||
static void free_io(void *element, void *pool_data)
|
||||
{
|
||||
kfree(element);
|
||||
}
|
||||
|
||||
static unsigned int pages_to_ios(unsigned int pages)
|
||||
{
|
||||
return 4 * pages; /* too many ? */
|
||||
@@ -65,7 +55,8 @@ static int resize_pool(unsigned int new_ios)
|
||||
|
||||
} else {
|
||||
/* create new pool */
|
||||
_io_pool = mempool_create(new_ios, alloc_io, free_io, NULL);
|
||||
_io_pool = mempool_create_kmalloc_pool(new_ios,
|
||||
sizeof(struct io));
|
||||
if (!_io_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@@ -122,16 +122,6 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
|
||||
/* FIXME move this */
|
||||
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
|
||||
|
||||
static void *region_alloc(gfp_t gfp_mask, void *pool_data)
|
||||
{
|
||||
return kmalloc(sizeof(struct region), gfp_mask);
|
||||
}
|
||||
|
||||
static void region_free(void *element, void *pool_data)
|
||||
{
|
||||
kfree(element);
|
||||
}
|
||||
|
||||
#define MIN_REGIONS 64
|
||||
#define MAX_RECOVERY 1
|
||||
static int rh_init(struct region_hash *rh, struct mirror_set *ms,
|
||||
@@ -173,8 +163,8 @@ static int rh_init(struct region_hash *rh, struct mirror_set *ms,
|
||||
INIT_LIST_HEAD(&rh->quiesced_regions);
|
||||
INIT_LIST_HEAD(&rh->recovered_regions);
|
||||
|
||||
rh->region_pool = mempool_create(MIN_REGIONS, region_alloc,
|
||||
region_free, NULL);
|
||||
rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
|
||||
sizeof(struct region));
|
||||
if (!rh->region_pool) {
|
||||
vfree(rh->buckets);
|
||||
rh->buckets = NULL;
|
||||
|
Reference in New Issue
Block a user