[PATCH] zoned vm counters: convert nr_mapped to per zone counter
nr_mapped is important because it allows a determination of how many pages of a zone are not mapped, which would allow a more efficient means of determining when we need to reclaim memory in a zone. We take the nr_mapped field out of the page state structure and define a new per zone counter named NR_FILE_MAPPED (the anonymous pages will be split off from NR_MAPPED in the next patch). We replace the use of nr_mapped in various kernel locations. This avoids the looping over all processors in try_to_free_pages(), writeback, reclaim (swap + zone reclaim). [akpm@osdl.org: bugfix] Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
2244b95a7b
commit
65ba55f500
@@ -455,7 +455,7 @@ static void __page_set_anon_rmap(struct page *page,
|
||||
* nr_mapped state can be updated without turning off
|
||||
* interrupts because it is not modified via interrupt.
|
||||
*/
|
||||
__inc_page_state(nr_mapped);
|
||||
__inc_zone_page_state(page, NR_FILE_MAPPED);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -499,7 +499,7 @@ void page_add_new_anon_rmap(struct page *page,
|
||||
void page_add_file_rmap(struct page *page)
|
||||
{
|
||||
if (atomic_inc_and_test(&page->_mapcount))
|
||||
__inc_page_state(nr_mapped);
|
||||
__inc_zone_page_state(page, NR_FILE_MAPPED);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -531,7 +531,7 @@ void page_remove_rmap(struct page *page)
|
||||
*/
|
||||
if (page_test_and_clear_dirty(page))
|
||||
set_page_dirty(page);
|
||||
__dec_page_state(nr_mapped);
|
||||
__dec_zone_page_state(page, NR_FILE_MAPPED);
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user