mm: memcg: remove optimization of keeping the root_mem_cgroup LRU lists empty
root_mem_cgroup, lacking a configurable limit, was never subject to limit reclaim, so the pages charged to it could be kept off its LRU lists. They would be found on the global per-zone LRU lists upon physical memory pressure and it made sense to avoid uselessly linking them to both lists. The global per-zone LRU lists are about to go away on memcg-enabled kernels, with all pages being exclusively linked to their respective per-memcg LRU lists. As a result, pages of the root_mem_cgroup must also be linked to its LRU lists again. This is purely about the LRU list, root_mem_cgroup is still not charged. The overhead is temporary until the double-LRU scheme is going away completely. Signed-off-by: Johannes Weiner <jweiner@redhat.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Ying Han <yinghan@google.com> Cc: Greg Thelen <gthelen@google.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
5660048cca
commit
ad2b8e6010
@@ -1031,8 +1031,6 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
|
|||||||
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
|
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
|
||||||
/* huge page split is done under lru_lock. so, we have no races. */
|
/* huge page split is done under lru_lock. so, we have no races. */
|
||||||
MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
|
MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
|
||||||
if (mem_cgroup_is_root(pc->mem_cgroup))
|
|
||||||
return;
|
|
||||||
VM_BUG_ON(list_empty(&pc->lru));
|
VM_BUG_ON(list_empty(&pc->lru));
|
||||||
list_del_init(&pc->lru);
|
list_del_init(&pc->lru);
|
||||||
}
|
}
|
||||||
@@ -1057,13 +1055,11 @@ void mem_cgroup_rotate_reclaimable_page(struct page *page)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
pc = lookup_page_cgroup(page);
|
pc = lookup_page_cgroup(page);
|
||||||
/* unused or root page is not rotated. */
|
/* unused page is not rotated. */
|
||||||
if (!PageCgroupUsed(pc))
|
if (!PageCgroupUsed(pc))
|
||||||
return;
|
return;
|
||||||
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
|
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
if (mem_cgroup_is_root(pc->mem_cgroup))
|
|
||||||
return;
|
|
||||||
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
|
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
|
||||||
list_move_tail(&pc->lru, &mz->lists[lru]);
|
list_move_tail(&pc->lru, &mz->lists[lru]);
|
||||||
}
|
}
|
||||||
@@ -1077,13 +1073,11 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
pc = lookup_page_cgroup(page);
|
pc = lookup_page_cgroup(page);
|
||||||
/* unused or root page is not rotated. */
|
/* unused page is not rotated. */
|
||||||
if (!PageCgroupUsed(pc))
|
if (!PageCgroupUsed(pc))
|
||||||
return;
|
return;
|
||||||
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
|
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
if (mem_cgroup_is_root(pc->mem_cgroup))
|
|
||||||
return;
|
|
||||||
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
|
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
|
||||||
list_move(&pc->lru, &mz->lists[lru]);
|
list_move(&pc->lru, &mz->lists[lru]);
|
||||||
}
|
}
|
||||||
@@ -1115,8 +1109,6 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
|
|||||||
/* huge page split is done under lru_lock. so, we have no races. */
|
/* huge page split is done under lru_lock. so, we have no races. */
|
||||||
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
|
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
|
||||||
SetPageCgroupAcctLRU(pc);
|
SetPageCgroupAcctLRU(pc);
|
||||||
if (mem_cgroup_is_root(pc->mem_cgroup))
|
|
||||||
return;
|
|
||||||
list_add(&pc->lru, &mz->lists[lru]);
|
list_add(&pc->lru, &mz->lists[lru]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user