Merge branch 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm
* 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm: slub: Support 4k kmallocs again to compensate for page allocator slowness slub: Fallback to kmalloc_large for failing higher order allocs slub: Determine gfpflags once and not every time a slab is allocated make slub.c:slab_address() static slub: kmalloc page allocator pass-through cleanup slab: avoid double initialization & do initialization in 1 place
This commit is contained in:
@@ -71,6 +71,7 @@ struct kmem_cache {
|
|||||||
|
|
||||||
/* Allocation and freeing of slabs */
|
/* Allocation and freeing of slabs */
|
||||||
int objects; /* Number of objects in slab */
|
int objects; /* Number of objects in slab */
|
||||||
|
gfp_t allocflags; /* gfp flags to use on each alloc */
|
||||||
int refcount; /* Refcount for slab cache destroy */
|
int refcount; /* Refcount for slab cache destroy */
|
||||||
void (*ctor)(struct kmem_cache *, void *);
|
void (*ctor)(struct kmem_cache *, void *);
|
||||||
int inuse; /* Offset to metadata */
|
int inuse; /* Offset to metadata */
|
||||||
@@ -110,7 +111,7 @@ struct kmem_cache {
|
|||||||
* We keep the general caches in an array of slab caches that are used for
|
* We keep the general caches in an array of slab caches that are used for
|
||||||
* 2^x bytes of allocations.
|
* 2^x bytes of allocations.
|
||||||
*/
|
*/
|
||||||
extern struct kmem_cache kmalloc_caches[PAGE_SHIFT];
|
extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sorry that the following has to be that ugly but some versions of GCC
|
* Sorry that the following has to be that ugly but some versions of GCC
|
||||||
@@ -188,12 +189,16 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
|
|||||||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
|
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
|
||||||
void *__kmalloc(size_t size, gfp_t flags);
|
void *__kmalloc(size_t size, gfp_t flags);
|
||||||
|
|
||||||
|
static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
|
||||||
|
{
|
||||||
|
return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size));
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
||||||
{
|
{
|
||||||
if (__builtin_constant_p(size)) {
|
if (__builtin_constant_p(size)) {
|
||||||
if (size > PAGE_SIZE / 2)
|
if (size > PAGE_SIZE)
|
||||||
return (void *)__get_free_pages(flags | __GFP_COMP,
|
return kmalloc_large(size, flags);
|
||||||
get_order(size));
|
|
||||||
|
|
||||||
if (!(flags & SLUB_DMA)) {
|
if (!(flags & SLUB_DMA)) {
|
||||||
struct kmem_cache *s = kmalloc_slab(size);
|
struct kmem_cache *s = kmalloc_slab(size);
|
||||||
@@ -214,7 +219,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
|||||||
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
||||||
{
|
{
|
||||||
if (__builtin_constant_p(size) &&
|
if (__builtin_constant_p(size) &&
|
||||||
size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) {
|
size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
|
||||||
struct kmem_cache *s = kmalloc_slab(size);
|
struct kmem_cache *s = kmalloc_slab(size);
|
||||||
|
|
||||||
if (!s)
|
if (!s)
|
||||||
|
@@ -2630,6 +2630,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
|
|||||||
slabp->colouroff = colour_off;
|
slabp->colouroff = colour_off;
|
||||||
slabp->s_mem = objp + colour_off;
|
slabp->s_mem = objp + colour_off;
|
||||||
slabp->nodeid = nodeid;
|
slabp->nodeid = nodeid;
|
||||||
|
slabp->free = 0;
|
||||||
return slabp;
|
return slabp;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2683,7 +2684,6 @@ static void cache_init_objs(struct kmem_cache *cachep,
|
|||||||
slab_bufctl(slabp)[i] = i + 1;
|
slab_bufctl(slabp)[i] = i + 1;
|
||||||
}
|
}
|
||||||
slab_bufctl(slabp)[i - 1] = BUFCTL_END;
|
slab_bufctl(slabp)[i - 1] = BUFCTL_END;
|
||||||
slabp->free = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
|
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
|
||||||
@@ -2816,7 +2816,6 @@ static int cache_grow(struct kmem_cache *cachep,
|
|||||||
if (!slabp)
|
if (!slabp)
|
||||||
goto opps1;
|
goto opps1;
|
||||||
|
|
||||||
slabp->nodeid = nodeid;
|
|
||||||
slab_map_pages(cachep, slabp, objp);
|
slab_map_pages(cachep, slabp, objp);
|
||||||
|
|
||||||
cache_init_objs(cachep, slabp);
|
cache_init_objs(cachep, slabp);
|
||||||
|
94
mm/slub.c
94
mm/slub.c
@@ -211,6 +211,8 @@ static inline void ClearSlabDebug(struct page *page)
|
|||||||
/* Internal SLUB flags */
|
/* Internal SLUB flags */
|
||||||
#define __OBJECT_POISON 0x80000000 /* Poison object */
|
#define __OBJECT_POISON 0x80000000 /* Poison object */
|
||||||
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
|
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
|
||||||
|
#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */
|
||||||
|
#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */
|
||||||
|
|
||||||
/* Not all arches define cache_line_size */
|
/* Not all arches define cache_line_size */
|
||||||
#ifndef cache_line_size
|
#ifndef cache_line_size
|
||||||
@@ -308,7 +310,7 @@ static inline int is_end(void *addr)
|
|||||||
return (unsigned long)addr & PAGE_MAPPING_ANON;
|
return (unsigned long)addr & PAGE_MAPPING_ANON;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *slab_address(struct page *page)
|
static void *slab_address(struct page *page)
|
||||||
{
|
{
|
||||||
return page->end - PAGE_MAPPING_ANON;
|
return page->end - PAGE_MAPPING_ANON;
|
||||||
}
|
}
|
||||||
@@ -1078,14 +1080,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
int pages = 1 << s->order;
|
int pages = 1 << s->order;
|
||||||
|
|
||||||
if (s->order)
|
flags |= s->allocflags;
|
||||||
flags |= __GFP_COMP;
|
|
||||||
|
|
||||||
if (s->flags & SLAB_CACHE_DMA)
|
|
||||||
flags |= SLUB_DMA;
|
|
||||||
|
|
||||||
if (s->flags & SLAB_RECLAIM_ACCOUNT)
|
|
||||||
flags |= __GFP_RECLAIMABLE;
|
|
||||||
|
|
||||||
if (node == -1)
|
if (node == -1)
|
||||||
page = alloc_pages(flags, s->order);
|
page = alloc_pages(flags, s->order);
|
||||||
@@ -1546,7 +1541,6 @@ load_freelist:
|
|||||||
unlock_out:
|
unlock_out:
|
||||||
slab_unlock(c->page);
|
slab_unlock(c->page);
|
||||||
stat(c, ALLOC_SLOWPATH);
|
stat(c, ALLOC_SLOWPATH);
|
||||||
out:
|
|
||||||
#ifdef SLUB_FASTPATH
|
#ifdef SLUB_FASTPATH
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
#endif
|
#endif
|
||||||
@@ -1581,8 +1575,24 @@ new_slab:
|
|||||||
c->page = new;
|
c->page = new;
|
||||||
goto load_freelist;
|
goto load_freelist;
|
||||||
}
|
}
|
||||||
object = NULL;
|
#ifdef SLUB_FASTPATH
|
||||||
goto out;
|
local_irq_restore(flags);
|
||||||
|
#endif
|
||||||
|
/*
|
||||||
|
* No memory available.
|
||||||
|
*
|
||||||
|
* If the slab uses higher order allocs but the object is
|
||||||
|
* smaller than a page size then we can fallback in emergencies
|
||||||
|
* to the page allocator via kmalloc_large. The page allocator may
|
||||||
|
* have failed to obtain a higher order page and we can try to
|
||||||
|
* allocate a single page if the object fits into a single page.
|
||||||
|
* That is only possible if certain conditions are met that are being
|
||||||
|
* checked when a slab is created.
|
||||||
|
*/
|
||||||
|
if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK))
|
||||||
|
return kmalloc_large(s->objsize, gfpflags);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
debug:
|
debug:
|
||||||
object = c->page->freelist;
|
object = c->page->freelist;
|
||||||
if (!alloc_debug_processing(s, c->page, object, addr))
|
if (!alloc_debug_processing(s, c->page, object, addr))
|
||||||
@@ -2329,10 +2339,33 @@ static int calculate_sizes(struct kmem_cache *s)
|
|||||||
size = ALIGN(size, align);
|
size = ALIGN(size, align);
|
||||||
s->size = size;
|
s->size = size;
|
||||||
|
|
||||||
s->order = calculate_order(size);
|
if ((flags & __KMALLOC_CACHE) &&
|
||||||
|
PAGE_SIZE / size < slub_min_objects) {
|
||||||
|
/*
|
||||||
|
* Kmalloc cache that would not have enough objects in
|
||||||
|
* an order 0 page. Kmalloc slabs can fallback to
|
||||||
|
* page allocator order 0 allocs so take a reasonably large
|
||||||
|
* order that will allows us a good number of objects.
|
||||||
|
*/
|
||||||
|
s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
|
||||||
|
s->flags |= __PAGE_ALLOC_FALLBACK;
|
||||||
|
s->allocflags |= __GFP_NOWARN;
|
||||||
|
} else
|
||||||
|
s->order = calculate_order(size);
|
||||||
|
|
||||||
if (s->order < 0)
|
if (s->order < 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
s->allocflags = 0;
|
||||||
|
if (s->order)
|
||||||
|
s->allocflags |= __GFP_COMP;
|
||||||
|
|
||||||
|
if (s->flags & SLAB_CACHE_DMA)
|
||||||
|
s->allocflags |= SLUB_DMA;
|
||||||
|
|
||||||
|
if (s->flags & SLAB_RECLAIM_ACCOUNT)
|
||||||
|
s->allocflags |= __GFP_RECLAIMABLE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Determine the number of objects per slab
|
* Determine the number of objects per slab
|
||||||
*/
|
*/
|
||||||
@@ -2484,11 +2517,11 @@ EXPORT_SYMBOL(kmem_cache_destroy);
|
|||||||
* Kmalloc subsystem
|
* Kmalloc subsystem
|
||||||
*******************************************************************/
|
*******************************************************************/
|
||||||
|
|
||||||
struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned;
|
struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
|
||||||
EXPORT_SYMBOL(kmalloc_caches);
|
EXPORT_SYMBOL(kmalloc_caches);
|
||||||
|
|
||||||
#ifdef CONFIG_ZONE_DMA
|
#ifdef CONFIG_ZONE_DMA
|
||||||
static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT];
|
static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int __init setup_slub_min_order(char *str)
|
static int __init setup_slub_min_order(char *str)
|
||||||
@@ -2536,7 +2569,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
|
|||||||
|
|
||||||
down_write(&slub_lock);
|
down_write(&slub_lock);
|
||||||
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
|
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
|
||||||
flags, NULL))
|
flags | __KMALLOC_CACHE, NULL))
|
||||||
goto panic;
|
goto panic;
|
||||||
|
|
||||||
list_add(&s->list, &slab_caches);
|
list_add(&s->list, &slab_caches);
|
||||||
@@ -2670,9 +2703,8 @@ void *__kmalloc(size_t size, gfp_t flags)
|
|||||||
{
|
{
|
||||||
struct kmem_cache *s;
|
struct kmem_cache *s;
|
||||||
|
|
||||||
if (unlikely(size > PAGE_SIZE / 2))
|
if (unlikely(size > PAGE_SIZE))
|
||||||
return (void *)__get_free_pages(flags | __GFP_COMP,
|
return kmalloc_large(size, flags);
|
||||||
get_order(size));
|
|
||||||
|
|
||||||
s = get_slab(size, flags);
|
s = get_slab(size, flags);
|
||||||
|
|
||||||
@@ -2688,9 +2720,8 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
|||||||
{
|
{
|
||||||
struct kmem_cache *s;
|
struct kmem_cache *s;
|
||||||
|
|
||||||
if (unlikely(size > PAGE_SIZE / 2))
|
if (unlikely(size > PAGE_SIZE))
|
||||||
return (void *)__get_free_pages(flags | __GFP_COMP,
|
return kmalloc_large(size, flags);
|
||||||
get_order(size));
|
|
||||||
|
|
||||||
s = get_slab(size, flags);
|
s = get_slab(size, flags);
|
||||||
|
|
||||||
@@ -3001,7 +3032,7 @@ void __init kmem_cache_init(void)
|
|||||||
caches++;
|
caches++;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) {
|
for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
|
||||||
create_kmalloc_cache(&kmalloc_caches[i],
|
create_kmalloc_cache(&kmalloc_caches[i],
|
||||||
"kmalloc", 1 << i, GFP_KERNEL);
|
"kmalloc", 1 << i, GFP_KERNEL);
|
||||||
caches++;
|
caches++;
|
||||||
@@ -3028,7 +3059,7 @@ void __init kmem_cache_init(void)
|
|||||||
slab_state = UP;
|
slab_state = UP;
|
||||||
|
|
||||||
/* Provide the correct kmalloc names now that the caches are up */
|
/* Provide the correct kmalloc names now that the caches are up */
|
||||||
for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++)
|
for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
|
||||||
kmalloc_caches[i]. name =
|
kmalloc_caches[i]. name =
|
||||||
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
|
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
|
||||||
|
|
||||||
@@ -3057,6 +3088,9 @@ static int slab_unmergeable(struct kmem_cache *s)
|
|||||||
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
|
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
if ((s->flags & __PAGE_ALLOC_FALLBACK))
|
||||||
|
return 1;
|
||||||
|
|
||||||
if (s->ctor)
|
if (s->ctor)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
@@ -3218,9 +3252,9 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
|
|||||||
{
|
{
|
||||||
struct kmem_cache *s;
|
struct kmem_cache *s;
|
||||||
|
|
||||||
if (unlikely(size > PAGE_SIZE / 2))
|
if (unlikely(size > PAGE_SIZE))
|
||||||
return (void *)__get_free_pages(gfpflags | __GFP_COMP,
|
return kmalloc_large(size, gfpflags);
|
||||||
get_order(size));
|
|
||||||
s = get_slab(size, gfpflags);
|
s = get_slab(size, gfpflags);
|
||||||
|
|
||||||
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
||||||
@@ -3234,9 +3268,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
|
|||||||
{
|
{
|
||||||
struct kmem_cache *s;
|
struct kmem_cache *s;
|
||||||
|
|
||||||
if (unlikely(size > PAGE_SIZE / 2))
|
if (unlikely(size > PAGE_SIZE))
|
||||||
return (void *)__get_free_pages(gfpflags | __GFP_COMP,
|
return kmalloc_large(size, gfpflags);
|
||||||
get_order(size));
|
|
||||||
s = get_slab(size, gfpflags);
|
s = get_slab(size, gfpflags);
|
||||||
|
|
||||||
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
||||||
|
Reference in New Issue
Block a user