Merge branches 'slab/fixes', 'slab/kmemleak', 'slub/perf' and 'slub/stats' into for-linus
This commit is contained in:
146
mm/slab.c
146
mm/slab.c
@ -604,67 +604,6 @@ static struct kmem_cache cache_cache = {
|
||||
|
||||
#define BAD_ALIEN_MAGIC 0x01020304ul
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
|
||||
/*
|
||||
* Slab sometimes uses the kmalloc slabs to store the slab headers
|
||||
* for other slabs "off slab".
|
||||
* The locking for this is tricky in that it nests within the locks
|
||||
* of all other slabs in a few places; to deal with this special
|
||||
* locking we put on-slab caches into a separate lock-class.
|
||||
*
|
||||
* We set lock class for alien array caches which are up during init.
|
||||
* The lock annotation will be lost if all cpus of a node goes down and
|
||||
* then comes back up during hotplug
|
||||
*/
|
||||
static struct lock_class_key on_slab_l3_key;
|
||||
static struct lock_class_key on_slab_alc_key;
|
||||
|
||||
static inline void init_lock_keys(void)
|
||||
|
||||
{
|
||||
int q;
|
||||
struct cache_sizes *s = malloc_sizes;
|
||||
|
||||
while (s->cs_size != ULONG_MAX) {
|
||||
for_each_node(q) {
|
||||
struct array_cache **alc;
|
||||
int r;
|
||||
struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
|
||||
if (!l3 || OFF_SLAB(s->cs_cachep))
|
||||
continue;
|
||||
lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
|
||||
alc = l3->alien;
|
||||
/*
|
||||
* FIXME: This check for BAD_ALIEN_MAGIC
|
||||
* should go away when common slab code is taught to
|
||||
* work even without alien caches.
|
||||
* Currently, non NUMA code returns BAD_ALIEN_MAGIC
|
||||
* for alloc_alien_cache,
|
||||
*/
|
||||
if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
|
||||
continue;
|
||||
for_each_node(r) {
|
||||
if (alc[r])
|
||||
lockdep_set_class(&alc[r]->lock,
|
||||
&on_slab_alc_key);
|
||||
}
|
||||
}
|
||||
s++;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void init_lock_keys(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Guard access to the cache-chain.
|
||||
*/
|
||||
static DEFINE_MUTEX(cache_chain_mutex);
|
||||
static struct list_head cache_chain;
|
||||
|
||||
/*
|
||||
* chicken and egg problem: delay the per-cpu array allocation
|
||||
* until the general caches are up.
|
||||
@ -685,6 +624,79 @@ int slab_is_available(void)
|
||||
return g_cpucache_up >= EARLY;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
|
||||
/*
|
||||
* Slab sometimes uses the kmalloc slabs to store the slab headers
|
||||
* for other slabs "off slab".
|
||||
* The locking for this is tricky in that it nests within the locks
|
||||
* of all other slabs in a few places; to deal with this special
|
||||
* locking we put on-slab caches into a separate lock-class.
|
||||
*
|
||||
* We set lock class for alien array caches which are up during init.
|
||||
* The lock annotation will be lost if all cpus of a node goes down and
|
||||
* then comes back up during hotplug
|
||||
*/
|
||||
static struct lock_class_key on_slab_l3_key;
|
||||
static struct lock_class_key on_slab_alc_key;
|
||||
|
||||
static void init_node_lock_keys(int q)
|
||||
{
|
||||
struct cache_sizes *s = malloc_sizes;
|
||||
|
||||
if (g_cpucache_up != FULL)
|
||||
return;
|
||||
|
||||
for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
|
||||
struct array_cache **alc;
|
||||
struct kmem_list3 *l3;
|
||||
int r;
|
||||
|
||||
l3 = s->cs_cachep->nodelists[q];
|
||||
if (!l3 || OFF_SLAB(s->cs_cachep))
|
||||
return;
|
||||
lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
|
||||
alc = l3->alien;
|
||||
/*
|
||||
* FIXME: This check for BAD_ALIEN_MAGIC
|
||||
* should go away when common slab code is taught to
|
||||
* work even without alien caches.
|
||||
* Currently, non NUMA code returns BAD_ALIEN_MAGIC
|
||||
* for alloc_alien_cache,
|
||||
*/
|
||||
if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
|
||||
return;
|
||||
for_each_node(r) {
|
||||
if (alc[r])
|
||||
lockdep_set_class(&alc[r]->lock,
|
||||
&on_slab_alc_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline void init_lock_keys(void)
|
||||
{
|
||||
int node;
|
||||
|
||||
for_each_node(node)
|
||||
init_node_lock_keys(node);
|
||||
}
|
||||
#else
|
||||
static void init_node_lock_keys(int q)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void init_lock_keys(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Guard access to the cache-chain.
|
||||
*/
|
||||
static DEFINE_MUTEX(cache_chain_mutex);
|
||||
static struct list_head cache_chain;
|
||||
|
||||
static DEFINE_PER_CPU(struct delayed_work, reap_work);
|
||||
|
||||
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
|
||||
@ -1254,6 +1266,8 @@ static int __cpuinit cpuup_prepare(long cpu)
|
||||
kfree(shared);
|
||||
free_alien_cache(alien);
|
||||
}
|
||||
init_node_lock_keys(node);
|
||||
|
||||
return 0;
|
||||
bad:
|
||||
cpuup_canceled(cpu);
|
||||
@ -3103,13 +3117,19 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
|
||||
} else {
|
||||
STATS_INC_ALLOCMISS(cachep);
|
||||
objp = cache_alloc_refill(cachep, flags);
|
||||
/*
|
||||
* the 'ac' may be updated by cache_alloc_refill(),
|
||||
* and kmemleak_erase() requires its correct value.
|
||||
*/
|
||||
ac = cpu_cache_get(cachep);
|
||||
}
|
||||
/*
|
||||
* To avoid a false negative, if an object that is in one of the
|
||||
* per-CPU caches is leaked, we need to make sure kmemleak doesn't
|
||||
* treat the array pointers as a reference to the object.
|
||||
*/
|
||||
kmemleak_erase(&ac->entry[ac->avail]);
|
||||
if (objp)
|
||||
kmemleak_erase(&ac->entry[ac->avail]);
|
||||
return objp;
|
||||
}
|
||||
|
||||
@ -3306,7 +3326,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
|
||||
cache_alloc_debugcheck_before(cachep, flags);
|
||||
local_irq_save(save_flags);
|
||||
|
||||
if (unlikely(nodeid == -1))
|
||||
if (nodeid == -1)
|
||||
nodeid = numa_node_id();
|
||||
|
||||
if (unlikely(!cachep->nodelists[nodeid])) {
|
||||
|
Reference in New Issue
Block a user