Memory controller: make page_referenced() cgroup aware

Make page_referenced() cgroup aware.  Without this patch, page_referenced()
can cause a page to be skipped while reclaiming pages.  This patch ensures
that other cgroups do not hold pages in a particular cgroup hostage.  It
is required to ensure that shared pages are freed from a cgroup when they
are not actively referenced from the cgroup that brought them in

Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Paul Menage <menage@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Kirill Korotaev <dev@sw.ru>
Cc: Herbert Poetzl <herbert@13thfloor.at>
Cc: David Rientjes <rientjes@google.com>
Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Balbir Singh
2008-02-07 00:14:01 -08:00
committed by Linus Torvalds
parent 8697d33194
commit bed7161a51
5 changed files with 40 additions and 10 deletions

View File

@ -302,7 +302,8 @@ out:
return referenced;
}
static int page_referenced_anon(struct page *page)
static int page_referenced_anon(struct page *page,
struct mem_cgroup *mem_cont)
{
unsigned int mapcount;
struct anon_vma *anon_vma;
@ -315,6 +316,13 @@ static int page_referenced_anon(struct page *page)
mapcount = page_mapcount(page);
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
/*
* If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different
* cgroups
*/
if (mem_cont && (mm_cgroup(vma->vm_mm) != mem_cont))
continue;
referenced += page_referenced_one(page, vma, &mapcount);
if (!mapcount)
break;
@ -335,7 +343,8 @@ static int page_referenced_anon(struct page *page)
*
* This function is only called from page_referenced for object-based pages.
*/
static int page_referenced_file(struct page *page)
static int page_referenced_file(struct page *page,
struct mem_cgroup *mem_cont)
{
unsigned int mapcount;
struct address_space *mapping = page->mapping;
@ -368,6 +377,13 @@ static int page_referenced_file(struct page *page)
mapcount = page_mapcount(page);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
/*
* If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different
* cgroups
*/
if (mem_cont && (mm_cgroup(vma->vm_mm) != mem_cont))
continue;
if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
== (VM_LOCKED|VM_MAYSHARE)) {
referenced++;
@ -390,7 +406,8 @@ static int page_referenced_file(struct page *page)
* Quick test_and_clear_referenced for all mappings to a page,
* returns the number of ptes which referenced the page.
*/
int page_referenced(struct page *page, int is_locked)
int page_referenced(struct page *page, int is_locked,
struct mem_cgroup *mem_cont)
{
int referenced = 0;
@ -402,14 +419,15 @@ int page_referenced(struct page *page, int is_locked)
if (page_mapped(page) && page->mapping) {
if (PageAnon(page))
referenced += page_referenced_anon(page);
referenced += page_referenced_anon(page, mem_cont);
else if (is_locked)
referenced += page_referenced_file(page);
referenced += page_referenced_file(page, mem_cont);
else if (TestSetPageLocked(page))
referenced++;
else {
if (page->mapping)
referenced += page_referenced_file(page);
referenced +=
page_referenced_file(page, mem_cont);
unlock_page(page);
}
}