JBD: JBD slab allocation cleanups
JBD: Replace slab allocations with page allocations JBD allocate memory for committed_data and frozen_data from slab. However JBD should not pass slab pages down to the block layer. Use page allocator pages instead. This will also prepare JBD for the large blocksize patchset. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mingming Cao <cmm@us.ibm.com>
This commit is contained in:
committed by
Theodore Ts'o
parent
d85714d81c
commit
c089d490df
@@ -73,14 +73,21 @@ extern int journal_enable_debug;
|
||||
#endif
|
||||
|
||||
extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
|
||||
extern void * jbd_slab_alloc(size_t size, gfp_t flags);
|
||||
extern void jbd_slab_free(void *ptr, size_t size);
|
||||
|
||||
#define jbd_kmalloc(size, flags) \
|
||||
__jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
|
||||
#define jbd_rep_kmalloc(size, flags) \
|
||||
__jbd_kmalloc(__FUNCTION__, (size), (flags), 1)
|
||||
|
||||
static inline void *jbd_alloc(size_t size, gfp_t flags)
|
||||
{
|
||||
return (void *)__get_free_pages(flags, get_order(size));
|
||||
}
|
||||
|
||||
static inline void jbd_free(void *ptr, size_t size)
|
||||
{
|
||||
free_pages((unsigned long)ptr, get_order(size));
|
||||
};
|
||||
|
||||
#define JFS_MIN_JOURNAL_BLOCKS 1024
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
Reference in New Issue
Block a user