SLUB: Introduce and use SLUB_MAX_SIZE and SLUB_PAGE_SHIFT constants
As a preparational patch to bump up page allocator pass-through threshold, introduce two new constants SLUB_MAX_SIZE and SLUB_PAGE_SHIFT and convert mm/slub.c to use them. Reported-by: "Zhang, Yanmin" <yanmin_zhang@linux.intel.com> Tested-by: "Zhang, Yanmin" <yanmin_zhang@linux.intel.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
This commit is contained in:
committed by
Pekka Enberg
parent
b578f3fcca
commit
fe1200b63d
16
mm/slub.c
16
mm/slub.c
@@ -2475,7 +2475,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
|
||||
* Kmalloc subsystem
|
||||
*******************************************************************/
|
||||
|
||||
struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
|
||||
struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
|
||||
EXPORT_SYMBOL(kmalloc_caches);
|
||||
|
||||
static int __init setup_slub_min_order(char *str)
|
||||
@@ -2537,7 +2537,7 @@ panic:
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
|
||||
static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
|
||||
|
||||
static void sysfs_add_func(struct work_struct *w)
|
||||
{
|
||||
@@ -2658,7 +2658,7 @@ void *__kmalloc(size_t size, gfp_t flags)
|
||||
{
|
||||
struct kmem_cache *s;
|
||||
|
||||
if (unlikely(size > PAGE_SIZE))
|
||||
if (unlikely(size > SLUB_MAX_SIZE))
|
||||
return kmalloc_large(size, flags);
|
||||
|
||||
s = get_slab(size, flags);
|
||||
@@ -2686,7 +2686,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
struct kmem_cache *s;
|
||||
|
||||
if (unlikely(size > PAGE_SIZE))
|
||||
if (unlikely(size > SLUB_MAX_SIZE))
|
||||
return kmalloc_large_node(size, flags, node);
|
||||
|
||||
s = get_slab(size, flags);
|
||||
@@ -2985,7 +2985,7 @@ void __init kmem_cache_init(void)
|
||||
caches++;
|
||||
}
|
||||
|
||||
for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
|
||||
for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
|
||||
create_kmalloc_cache(&kmalloc_caches[i],
|
||||
"kmalloc", 1 << i, GFP_KERNEL);
|
||||
caches++;
|
||||
@@ -3022,7 +3022,7 @@ void __init kmem_cache_init(void)
|
||||
slab_state = UP;
|
||||
|
||||
/* Provide the correct kmalloc names now that the caches are up */
|
||||
for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
|
||||
for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
|
||||
kmalloc_caches[i]. name =
|
||||
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
|
||||
|
||||
@@ -3222,7 +3222,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
|
||||
{
|
||||
struct kmem_cache *s;
|
||||
|
||||
if (unlikely(size > PAGE_SIZE))
|
||||
if (unlikely(size > SLUB_MAX_SIZE))
|
||||
return kmalloc_large(size, gfpflags);
|
||||
|
||||
s = get_slab(size, gfpflags);
|
||||
@@ -3238,7 +3238,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
|
||||
{
|
||||
struct kmem_cache *s;
|
||||
|
||||
if (unlikely(size > PAGE_SIZE))
|
||||
if (unlikely(size > SLUB_MAX_SIZE))
|
||||
return kmalloc_large_node(size, gfpflags, node);
|
||||
|
||||
s = get_slab(size, gfpflags);
|
||||
|
Reference in New Issue
Block a user