cpu hotplug: slab: cleanup cpuup_callback()
cpuup_callback() is too long. This patch factors out CPU_UP_CANCELLED and CPU_UP_PREPARE handlings from cpuup_callback(). Cc: Christoph Lameter <clameter@sgi.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: Gautham R Shenoy <ego@in.ibm.com> Cc: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
6c72ffaab9
commit
fbf1e473bd
313
mm/slab.c
313
mm/slab.c
@@ -1156,105 +1156,181 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int __cpuinit cpuup_callback(struct notifier_block *nfb,
|
static void __cpuinit cpuup_canceled(long cpu)
|
||||||
unsigned long action, void *hcpu)
|
{
|
||||||
|
struct kmem_cache *cachep;
|
||||||
|
struct kmem_list3 *l3 = NULL;
|
||||||
|
int node = cpu_to_node(cpu);
|
||||||
|
|
||||||
|
list_for_each_entry(cachep, &cache_chain, next) {
|
||||||
|
struct array_cache *nc;
|
||||||
|
struct array_cache *shared;
|
||||||
|
struct array_cache **alien;
|
||||||
|
cpumask_t mask;
|
||||||
|
|
||||||
|
mask = node_to_cpumask(node);
|
||||||
|
/* cpu is dead; no one can alloc from it. */
|
||||||
|
nc = cachep->array[cpu];
|
||||||
|
cachep->array[cpu] = NULL;
|
||||||
|
l3 = cachep->nodelists[node];
|
||||||
|
|
||||||
|
if (!l3)
|
||||||
|
goto free_array_cache;
|
||||||
|
|
||||||
|
spin_lock_irq(&l3->list_lock);
|
||||||
|
|
||||||
|
/* Free limit for this kmem_list3 */
|
||||||
|
l3->free_limit -= cachep->batchcount;
|
||||||
|
if (nc)
|
||||||
|
free_block(cachep, nc->entry, nc->avail, node);
|
||||||
|
|
||||||
|
if (!cpus_empty(mask)) {
|
||||||
|
spin_unlock_irq(&l3->list_lock);
|
||||||
|
goto free_array_cache;
|
||||||
|
}
|
||||||
|
|
||||||
|
shared = l3->shared;
|
||||||
|
if (shared) {
|
||||||
|
free_block(cachep, shared->entry,
|
||||||
|
shared->avail, node);
|
||||||
|
l3->shared = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
alien = l3->alien;
|
||||||
|
l3->alien = NULL;
|
||||||
|
|
||||||
|
spin_unlock_irq(&l3->list_lock);
|
||||||
|
|
||||||
|
kfree(shared);
|
||||||
|
if (alien) {
|
||||||
|
drain_alien_cache(cachep, alien);
|
||||||
|
free_alien_cache(alien);
|
||||||
|
}
|
||||||
|
free_array_cache:
|
||||||
|
kfree(nc);
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* In the previous loop, all the objects were freed to
|
||||||
|
* the respective cache's slabs, now we can go ahead and
|
||||||
|
* shrink each nodelist to its limit.
|
||||||
|
*/
|
||||||
|
list_for_each_entry(cachep, &cache_chain, next) {
|
||||||
|
l3 = cachep->nodelists[node];
|
||||||
|
if (!l3)
|
||||||
|
continue;
|
||||||
|
drain_freelist(cachep, l3, l3->free_objects);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __cpuinit cpuup_prepare(long cpu)
|
||||||
{
|
{
|
||||||
long cpu = (long)hcpu;
|
|
||||||
struct kmem_cache *cachep;
|
struct kmem_cache *cachep;
|
||||||
struct kmem_list3 *l3 = NULL;
|
struct kmem_list3 *l3 = NULL;
|
||||||
int node = cpu_to_node(cpu);
|
int node = cpu_to_node(cpu);
|
||||||
const int memsize = sizeof(struct kmem_list3);
|
const int memsize = sizeof(struct kmem_list3);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to do this right in the beginning since
|
||||||
|
* alloc_arraycache's are going to use this list.
|
||||||
|
* kmalloc_node allows us to add the slab to the right
|
||||||
|
* kmem_list3 and not this cpu's kmem_list3
|
||||||
|
*/
|
||||||
|
|
||||||
|
list_for_each_entry(cachep, &cache_chain, next) {
|
||||||
|
/*
|
||||||
|
* Set up the size64 kmemlist for cpu before we can
|
||||||
|
* begin anything. Make sure some other cpu on this
|
||||||
|
* node has not already allocated this
|
||||||
|
*/
|
||||||
|
if (!cachep->nodelists[node]) {
|
||||||
|
l3 = kmalloc_node(memsize, GFP_KERNEL, node);
|
||||||
|
if (!l3)
|
||||||
|
goto bad;
|
||||||
|
kmem_list3_init(l3);
|
||||||
|
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
|
||||||
|
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The l3s don't come and go as CPUs come and
|
||||||
|
* go. cache_chain_mutex is sufficient
|
||||||
|
* protection here.
|
||||||
|
*/
|
||||||
|
cachep->nodelists[node] = l3;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock_irq(&cachep->nodelists[node]->list_lock);
|
||||||
|
cachep->nodelists[node]->free_limit =
|
||||||
|
(1 + nr_cpus_node(node)) *
|
||||||
|
cachep->batchcount + cachep->num;
|
||||||
|
spin_unlock_irq(&cachep->nodelists[node]->list_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now we can go ahead with allocating the shared arrays and
|
||||||
|
* array caches
|
||||||
|
*/
|
||||||
|
list_for_each_entry(cachep, &cache_chain, next) {
|
||||||
|
struct array_cache *nc;
|
||||||
|
struct array_cache *shared = NULL;
|
||||||
|
struct array_cache **alien = NULL;
|
||||||
|
|
||||||
|
nc = alloc_arraycache(node, cachep->limit,
|
||||||
|
cachep->batchcount);
|
||||||
|
if (!nc)
|
||||||
|
goto bad;
|
||||||
|
if (cachep->shared) {
|
||||||
|
shared = alloc_arraycache(node,
|
||||||
|
cachep->shared * cachep->batchcount,
|
||||||
|
0xbaadf00d);
|
||||||
|
if (!shared)
|
||||||
|
goto bad;
|
||||||
|
}
|
||||||
|
if (use_alien_caches) {
|
||||||
|
alien = alloc_alien_cache(node, cachep->limit);
|
||||||
|
if (!alien)
|
||||||
|
goto bad;
|
||||||
|
}
|
||||||
|
cachep->array[cpu] = nc;
|
||||||
|
l3 = cachep->nodelists[node];
|
||||||
|
BUG_ON(!l3);
|
||||||
|
|
||||||
|
spin_lock_irq(&l3->list_lock);
|
||||||
|
if (!l3->shared) {
|
||||||
|
/*
|
||||||
|
* We are serialised from CPU_DEAD or
|
||||||
|
* CPU_UP_CANCELLED by the cpucontrol lock
|
||||||
|
*/
|
||||||
|
l3->shared = shared;
|
||||||
|
shared = NULL;
|
||||||
|
}
|
||||||
|
#ifdef CONFIG_NUMA
|
||||||
|
if (!l3->alien) {
|
||||||
|
l3->alien = alien;
|
||||||
|
alien = NULL;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
spin_unlock_irq(&l3->list_lock);
|
||||||
|
kfree(shared);
|
||||||
|
free_alien_cache(alien);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
bad:
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __cpuinit cpuup_callback(struct notifier_block *nfb,
|
||||||
|
unsigned long action, void *hcpu)
|
||||||
|
{
|
||||||
|
long cpu = (long)hcpu;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
switch (action) {
|
switch (action) {
|
||||||
case CPU_LOCK_ACQUIRE:
|
case CPU_LOCK_ACQUIRE:
|
||||||
mutex_lock(&cache_chain_mutex);
|
mutex_lock(&cache_chain_mutex);
|
||||||
break;
|
break;
|
||||||
case CPU_UP_PREPARE:
|
case CPU_UP_PREPARE:
|
||||||
case CPU_UP_PREPARE_FROZEN:
|
case CPU_UP_PREPARE_FROZEN:
|
||||||
/*
|
err = cpuup_prepare(cpu);
|
||||||
* We need to do this right in the beginning since
|
|
||||||
* alloc_arraycache's are going to use this list.
|
|
||||||
* kmalloc_node allows us to add the slab to the right
|
|
||||||
* kmem_list3 and not this cpu's kmem_list3
|
|
||||||
*/
|
|
||||||
|
|
||||||
list_for_each_entry(cachep, &cache_chain, next) {
|
|
||||||
/*
|
|
||||||
* Set up the size64 kmemlist for cpu before we can
|
|
||||||
* begin anything. Make sure some other cpu on this
|
|
||||||
* node has not already allocated this
|
|
||||||
*/
|
|
||||||
if (!cachep->nodelists[node]) {
|
|
||||||
l3 = kmalloc_node(memsize, GFP_KERNEL, node);
|
|
||||||
if (!l3)
|
|
||||||
goto bad;
|
|
||||||
kmem_list3_init(l3);
|
|
||||||
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
|
|
||||||
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The l3s don't come and go as CPUs come and
|
|
||||||
* go. cache_chain_mutex is sufficient
|
|
||||||
* protection here.
|
|
||||||
*/
|
|
||||||
cachep->nodelists[node] = l3;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_irq(&cachep->nodelists[node]->list_lock);
|
|
||||||
cachep->nodelists[node]->free_limit =
|
|
||||||
(1 + nr_cpus_node(node)) *
|
|
||||||
cachep->batchcount + cachep->num;
|
|
||||||
spin_unlock_irq(&cachep->nodelists[node]->list_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Now we can go ahead with allocating the shared arrays and
|
|
||||||
* array caches
|
|
||||||
*/
|
|
||||||
list_for_each_entry(cachep, &cache_chain, next) {
|
|
||||||
struct array_cache *nc;
|
|
||||||
struct array_cache *shared = NULL;
|
|
||||||
struct array_cache **alien = NULL;
|
|
||||||
|
|
||||||
nc = alloc_arraycache(node, cachep->limit,
|
|
||||||
cachep->batchcount);
|
|
||||||
if (!nc)
|
|
||||||
goto bad;
|
|
||||||
if (cachep->shared) {
|
|
||||||
shared = alloc_arraycache(node,
|
|
||||||
cachep->shared * cachep->batchcount,
|
|
||||||
0xbaadf00d);
|
|
||||||
if (!shared)
|
|
||||||
goto bad;
|
|
||||||
}
|
|
||||||
if (use_alien_caches) {
|
|
||||||
alien = alloc_alien_cache(node, cachep->limit);
|
|
||||||
if (!alien)
|
|
||||||
goto bad;
|
|
||||||
}
|
|
||||||
cachep->array[cpu] = nc;
|
|
||||||
l3 = cachep->nodelists[node];
|
|
||||||
BUG_ON(!l3);
|
|
||||||
|
|
||||||
spin_lock_irq(&l3->list_lock);
|
|
||||||
if (!l3->shared) {
|
|
||||||
/*
|
|
||||||
* We are serialised from CPU_DEAD or
|
|
||||||
* CPU_UP_CANCELLED by the cpucontrol lock
|
|
||||||
*/
|
|
||||||
l3->shared = shared;
|
|
||||||
shared = NULL;
|
|
||||||
}
|
|
||||||
#ifdef CONFIG_NUMA
|
|
||||||
if (!l3->alien) {
|
|
||||||
l3->alien = alien;
|
|
||||||
alien = NULL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
spin_unlock_irq(&l3->list_lock);
|
|
||||||
kfree(shared);
|
|
||||||
free_alien_cache(alien);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case CPU_ONLINE:
|
case CPU_ONLINE:
|
||||||
case CPU_ONLINE_FROZEN:
|
case CPU_ONLINE_FROZEN:
|
||||||
@@ -1291,72 +1367,13 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
|
|||||||
#endif
|
#endif
|
||||||
case CPU_UP_CANCELED:
|
case CPU_UP_CANCELED:
|
||||||
case CPU_UP_CANCELED_FROZEN:
|
case CPU_UP_CANCELED_FROZEN:
|
||||||
list_for_each_entry(cachep, &cache_chain, next) {
|
cpuup_canceled(cpu);
|
||||||
struct array_cache *nc;
|
|
||||||
struct array_cache *shared;
|
|
||||||
struct array_cache **alien;
|
|
||||||
cpumask_t mask;
|
|
||||||
|
|
||||||
mask = node_to_cpumask(node);
|
|
||||||
/* cpu is dead; no one can alloc from it. */
|
|
||||||
nc = cachep->array[cpu];
|
|
||||||
cachep->array[cpu] = NULL;
|
|
||||||
l3 = cachep->nodelists[node];
|
|
||||||
|
|
||||||
if (!l3)
|
|
||||||
goto free_array_cache;
|
|
||||||
|
|
||||||
spin_lock_irq(&l3->list_lock);
|
|
||||||
|
|
||||||
/* Free limit for this kmem_list3 */
|
|
||||||
l3->free_limit -= cachep->batchcount;
|
|
||||||
if (nc)
|
|
||||||
free_block(cachep, nc->entry, nc->avail, node);
|
|
||||||
|
|
||||||
if (!cpus_empty(mask)) {
|
|
||||||
spin_unlock_irq(&l3->list_lock);
|
|
||||||
goto free_array_cache;
|
|
||||||
}
|
|
||||||
|
|
||||||
shared = l3->shared;
|
|
||||||
if (shared) {
|
|
||||||
free_block(cachep, shared->entry,
|
|
||||||
shared->avail, node);
|
|
||||||
l3->shared = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
alien = l3->alien;
|
|
||||||
l3->alien = NULL;
|
|
||||||
|
|
||||||
spin_unlock_irq(&l3->list_lock);
|
|
||||||
|
|
||||||
kfree(shared);
|
|
||||||
if (alien) {
|
|
||||||
drain_alien_cache(cachep, alien);
|
|
||||||
free_alien_cache(alien);
|
|
||||||
}
|
|
||||||
free_array_cache:
|
|
||||||
kfree(nc);
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* In the previous loop, all the objects were freed to
|
|
||||||
* the respective cache's slabs, now we can go ahead and
|
|
||||||
* shrink each nodelist to its limit.
|
|
||||||
*/
|
|
||||||
list_for_each_entry(cachep, &cache_chain, next) {
|
|
||||||
l3 = cachep->nodelists[node];
|
|
||||||
if (!l3)
|
|
||||||
continue;
|
|
||||||
drain_freelist(cachep, l3, l3->free_objects);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case CPU_LOCK_RELEASE:
|
case CPU_LOCK_RELEASE:
|
||||||
mutex_unlock(&cache_chain_mutex);
|
mutex_unlock(&cache_chain_mutex);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return NOTIFY_OK;
|
return err ? NOTIFY_BAD : NOTIFY_OK;
|
||||||
bad:
|
|
||||||
return NOTIFY_BAD;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block __cpuinitdata cpucache_notifier = {
|
static struct notifier_block __cpuinitdata cpucache_notifier = {
|
||||||
|
Reference in New Issue
Block a user