SLUB: add support for dynamic cacheline size determination
SLUB currently assumes that the cacheline size is static. However, i386 f.e. supports dynamic cache line size determination. Use cache_line_size() instead of L1_CACHE_BYTES in the allocator. That also explains the purpose of SLAB_HWCACHE_ALIGN. So we will need to keep that one around to allow dynamic aligning of objects depending on boot determination of the cache line size. [akpm@linux-foundation.org: need to define it before we use it] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
97416ce82e
commit
65c02d4cfb
15
mm/slub.c
15
mm/slub.c
@@ -157,6 +157,11 @@
|
|||||||
/* Internal SLUB flags */
|
/* Internal SLUB flags */
|
||||||
#define __OBJECT_POISON 0x80000000 /* Poison object */
|
#define __OBJECT_POISON 0x80000000 /* Poison object */
|
||||||
|
|
||||||
|
/* Not all arches define cache_line_size */
|
||||||
|
#ifndef cache_line_size
|
||||||
|
#define cache_line_size() L1_CACHE_BYTES
|
||||||
|
#endif
|
||||||
|
|
||||||
static int kmem_size = sizeof(struct kmem_cache);
|
static int kmem_size = sizeof(struct kmem_cache);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
@@ -1480,8 +1485,8 @@ static unsigned long calculate_alignment(unsigned long flags,
|
|||||||
* then use it.
|
* then use it.
|
||||||
*/
|
*/
|
||||||
if ((flags & SLAB_HWCACHE_ALIGN) &&
|
if ((flags & SLAB_HWCACHE_ALIGN) &&
|
||||||
size > L1_CACHE_BYTES / 2)
|
size > cache_line_size() / 2)
|
||||||
return max_t(unsigned long, align, L1_CACHE_BYTES);
|
return max_t(unsigned long, align, cache_line_size());
|
||||||
|
|
||||||
if (align < ARCH_SLAB_MINALIGN)
|
if (align < ARCH_SLAB_MINALIGN)
|
||||||
return ARCH_SLAB_MINALIGN;
|
return ARCH_SLAB_MINALIGN;
|
||||||
@@ -1667,8 +1672,8 @@ static int calculate_sizes(struct kmem_cache *s)
|
|||||||
size += sizeof(void *);
|
size += sizeof(void *);
|
||||||
/*
|
/*
|
||||||
* Determine the alignment based on various parameters that the
|
* Determine the alignment based on various parameters that the
|
||||||
* user specified (this is unecessarily complex due to the attempt
|
* user specified and the dynamic determination of cache line size
|
||||||
* to be compatible with SLAB. Should be cleaned up some day).
|
* on bootup.
|
||||||
*/
|
*/
|
||||||
align = calculate_alignment(flags, align, s->objsize);
|
align = calculate_alignment(flags, align, s->objsize);
|
||||||
|
|
||||||
@@ -2280,7 +2285,7 @@ void __init kmem_cache_init(void)
|
|||||||
|
|
||||||
printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
|
printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
|
||||||
" Processors=%d, Nodes=%d\n",
|
" Processors=%d, Nodes=%d\n",
|
||||||
KMALLOC_SHIFT_HIGH, L1_CACHE_BYTES,
|
KMALLOC_SHIFT_HIGH, cache_line_size(),
|
||||||
slub_min_order, slub_max_order, slub_min_objects,
|
slub_min_order, slub_max_order, slub_min_objects,
|
||||||
nr_cpu_ids, nr_node_ids);
|
nr_cpu_ids, nr_node_ids);
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user