slub: free_list() cleanup
free_list looked a bit screwy so here is an attempt to clean it up. free_list is is only used for freeing partial lists. We do not need to return a parameter if we decrement nr_partial within the function which allows a simplification of the whole thing. The current version modifies nr_partial outside of the list_lock which is technically not correct. It was only ok because we should be the only user of this slab cache at this point. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
This commit is contained in:
committed by
Pekka Enberg
parent
d629d81957
commit
599870b175
18
mm/slub.c
18
mm/slub.c
@@ -2372,25 +2372,21 @@ const char *kmem_cache_name(struct kmem_cache *s)
|
|||||||
EXPORT_SYMBOL(kmem_cache_name);
|
EXPORT_SYMBOL(kmem_cache_name);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Attempt to free all slabs on a node. Return the number of slabs we
|
* Attempt to free all partial slabs on a node.
|
||||||
* were unable to free.
|
|
||||||
*/
|
*/
|
||||||
static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
|
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
||||||
struct list_head *list)
|
|
||||||
{
|
{
|
||||||
int slabs_inuse = 0;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct page *page, *h;
|
struct page *page, *h;
|
||||||
|
|
||||||
spin_lock_irqsave(&n->list_lock, flags);
|
spin_lock_irqsave(&n->list_lock, flags);
|
||||||
list_for_each_entry_safe(page, h, list, lru)
|
list_for_each_entry_safe(page, h, &n->partial, lru)
|
||||||
if (!page->inuse) {
|
if (!page->inuse) {
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
discard_slab(s, page);
|
discard_slab(s, page);
|
||||||
} else
|
n->nr_partial--;
|
||||||
slabs_inuse++;
|
}
|
||||||
spin_unlock_irqrestore(&n->list_lock, flags);
|
spin_unlock_irqrestore(&n->list_lock, flags);
|
||||||
return slabs_inuse;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2407,8 +2403,8 @@ static inline int kmem_cache_close(struct kmem_cache *s)
|
|||||||
for_each_node_state(node, N_NORMAL_MEMORY) {
|
for_each_node_state(node, N_NORMAL_MEMORY) {
|
||||||
struct kmem_cache_node *n = get_node(s, node);
|
struct kmem_cache_node *n = get_node(s, node);
|
||||||
|
|
||||||
n->nr_partial -= free_list(s, n, &n->partial);
|
free_partial(s, n);
|
||||||
if (slabs_node(s, node))
|
if (n->nr_partial || slabs_node(s, node))
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
free_kmem_cache_nodes(s);
|
free_kmem_cache_nodes(s);
|
||||||
|
Reference in New Issue
Block a user