mm/memcg: apply add/del_page to lruvec
Take lruvec further: pass it instead of zone to add_page_to_lru_list() and del_page_from_lru_list(); and pagevec_lru_move_fn() pass lruvec down to its target functions. This cleanup eliminates a swathe of cruft in memcontrol.c, including mem_cgroup_lru_add_list(), mem_cgroup_lru_del_list() and mem_cgroup_lru_move_lists() - which never actually touched the lists. In their place, mem_cgroup_page_lruvec() to decide the lruvec, previously a side-effect of add, and mem_cgroup_update_lru_size() to maintain the lru_size stats. Whilst these are simplifications in their own right, the goal is to bring the evaluation of lruvec next to the spin_locking of the lrus, in preparation for a future patch. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
75b00af77e
commit
fa9add641b
101
mm/memcontrol.c
101
mm/memcontrol.c
@@ -1035,7 +1035,7 @@ EXPORT_SYMBOL(mem_cgroup_count_vm_event);
|
||||
/**
|
||||
* mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
|
||||
* @zone: zone of the wanted lruvec
|
||||
* @mem: memcg of the wanted lruvec
|
||||
* @memcg: memcg of the wanted lruvec
|
||||
*
|
||||
* Returns the lru list vector holding pages for the given @zone and
|
||||
* @mem. This can be the global zone lruvec, if the memory controller
|
||||
@@ -1068,19 +1068,11 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
|
||||
*/
|
||||
|
||||
/**
|
||||
* mem_cgroup_lru_add_list - account for adding an lru page and return lruvec
|
||||
* @zone: zone of the page
|
||||
* mem_cgroup_page_lruvec - return lruvec for adding an lru page
|
||||
* @page: the page
|
||||
* @lru: current lru
|
||||
*
|
||||
* This function accounts for @page being added to @lru, and returns
|
||||
* the lruvec for the given @zone and the memcg @page is charged to.
|
||||
*
|
||||
* The callsite is then responsible for physically linking the page to
|
||||
* the returned lruvec->lists[@lru].
|
||||
* @zone: zone of the page
|
||||
*/
|
||||
struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
|
||||
enum lru_list lru)
|
||||
struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
|
||||
{
|
||||
struct mem_cgroup_per_zone *mz;
|
||||
struct mem_cgroup *memcg;
|
||||
@@ -1093,7 +1085,7 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
|
||||
memcg = pc->mem_cgroup;
|
||||
|
||||
/*
|
||||
* Surreptitiously switch any uncharged page to root:
|
||||
* Surreptitiously switch any uncharged offlist page to root:
|
||||
* an uncharged page off lru does nothing to secure
|
||||
* its former mem_cgroup from sudden removal.
|
||||
*
|
||||
@@ -1101,65 +1093,35 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
|
||||
* under page_cgroup lock: between them, they make all uses
|
||||
* of pc->mem_cgroup safe.
|
||||
*/
|
||||
if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
|
||||
if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
|
||||
pc->mem_cgroup = memcg = root_mem_cgroup;
|
||||
|
||||
mz = page_cgroup_zoneinfo(memcg, page);
|
||||
/* compound_order() is stabilized through lru_lock */
|
||||
mz->lru_size[lru] += 1 << compound_order(page);
|
||||
return &mz->lruvec;
|
||||
}
|
||||
|
||||
/**
|
||||
* mem_cgroup_lru_del_list - account for removing an lru page
|
||||
* @page: the page
|
||||
* @lru: target lru
|
||||
* mem_cgroup_update_lru_size - account for adding or removing an lru page
|
||||
* @lruvec: mem_cgroup per zone lru vector
|
||||
* @lru: index of lru list the page is sitting on
|
||||
* @nr_pages: positive when adding or negative when removing
|
||||
*
|
||||
* This function accounts for @page being removed from @lru.
|
||||
*
|
||||
* The callsite is then responsible for physically unlinking
|
||||
* @page->lru.
|
||||
* This function must be called when a page is added to or removed from an
|
||||
* lru list.
|
||||
*/
|
||||
void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
|
||||
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
|
||||
int nr_pages)
|
||||
{
|
||||
struct mem_cgroup_per_zone *mz;
|
||||
struct mem_cgroup *memcg;
|
||||
struct page_cgroup *pc;
|
||||
unsigned long *lru_size;
|
||||
|
||||
if (mem_cgroup_disabled())
|
||||
return;
|
||||
|
||||
pc = lookup_page_cgroup(page);
|
||||
memcg = pc->mem_cgroup;
|
||||
VM_BUG_ON(!memcg);
|
||||
mz = page_cgroup_zoneinfo(memcg, page);
|
||||
/* huge page split is done under lru_lock. so, we have no races. */
|
||||
VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page)));
|
||||
mz->lru_size[lru] -= 1 << compound_order(page);
|
||||
}
|
||||
|
||||
/**
|
||||
* mem_cgroup_lru_move_lists - account for moving a page between lrus
|
||||
* @zone: zone of the page
|
||||
* @page: the page
|
||||
* @from: current lru
|
||||
* @to: target lru
|
||||
*
|
||||
* This function accounts for @page being moved between the lrus @from
|
||||
* and @to, and returns the lruvec for the given @zone and the memcg
|
||||
* @page is charged to.
|
||||
*
|
||||
* The callsite is then responsible for physically relinking
|
||||
* @page->lru to the returned lruvec->lists[@to].
|
||||
*/
|
||||
struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
|
||||
struct page *page,
|
||||
enum lru_list from,
|
||||
enum lru_list to)
|
||||
{
|
||||
/* XXX: Optimize this, especially for @from == @to */
|
||||
mem_cgroup_lru_del_list(page, from);
|
||||
return mem_cgroup_lru_add_list(zone, page, to);
|
||||
mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
|
||||
lru_size = mz->lru_size + lru;
|
||||
*lru_size += nr_pages;
|
||||
VM_BUG_ON((long)(*lru_size) < 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1252,24 +1214,6 @@ int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
|
||||
return (active > inactive);
|
||||
}
|
||||
|
||||
struct zone_reclaim_stat *
|
||||
mem_cgroup_get_reclaim_stat_from_page(struct page *page)
|
||||
{
|
||||
struct page_cgroup *pc;
|
||||
struct mem_cgroup_per_zone *mz;
|
||||
|
||||
if (mem_cgroup_disabled())
|
||||
return NULL;
|
||||
|
||||
pc = lookup_page_cgroup(page);
|
||||
if (!PageCgroupUsed(pc))
|
||||
return NULL;
|
||||
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
|
||||
smp_rmb();
|
||||
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
|
||||
return &mz->lruvec.reclaim_stat;
|
||||
}
|
||||
|
||||
#define mem_cgroup_from_res_counter(counter, member) \
|
||||
container_of(counter, struct mem_cgroup, member)
|
||||
|
||||
@@ -2509,6 +2453,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
|
||||
{
|
||||
struct page_cgroup *pc = lookup_page_cgroup(page);
|
||||
struct zone *uninitialized_var(zone);
|
||||
struct lruvec *lruvec;
|
||||
bool was_on_lru = false;
|
||||
bool anon;
|
||||
|
||||
@@ -2531,8 +2476,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
|
||||
zone = page_zone(page);
|
||||
spin_lock_irq(&zone->lru_lock);
|
||||
if (PageLRU(page)) {
|
||||
lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
|
||||
ClearPageLRU(page);
|
||||
del_page_from_lru_list(zone, page, page_lru(page));
|
||||
del_page_from_lru_list(page, lruvec, page_lru(page));
|
||||
was_on_lru = true;
|
||||
}
|
||||
}
|
||||
@@ -2550,9 +2496,10 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
|
||||
|
||||
if (lrucare) {
|
||||
if (was_on_lru) {
|
||||
lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
|
||||
VM_BUG_ON(PageLRU(page));
|
||||
SetPageLRU(page);
|
||||
add_page_to_lru_list(zone, page, page_lru(page));
|
||||
add_page_to_lru_list(page, lruvec, page_lru(page));
|
||||
}
|
||||
spin_unlock_irq(&zone->lru_lock);
|
||||
}
|
||||
|
Reference in New Issue
Block a user