[PATCH] zoned vm counters: conversion of nr_pagecache to per zone counter
Currently a single atomic variable is used to establish the size of the page cache in the whole machine. The zoned VM counters have the same method of implementation as the nr_pagecache code but also allow the determination of the pagecache size per zone. Remove the special implementation for nr_pagecache and make it a zoned counter named NR_FILE_PAGES. Updates of the page cache counters are always performed with interrupts off. We can therefore use the __ variant here. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
65ba55f500
commit
347ce434d5
@@ -113,51 +113,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
||||
extern void remove_from_page_cache(struct page *page);
|
||||
extern void __remove_from_page_cache(struct page *page);
|
||||
|
||||
extern atomic_t nr_pagecache;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2)
|
||||
DECLARE_PER_CPU(long, nr_pagecache_local);
|
||||
|
||||
/*
|
||||
* pagecache_acct implements approximate accounting for pagecache.
|
||||
* vm_enough_memory() do not need high accuracy. Writers will keep
|
||||
* an offset in their per-cpu arena and will spill that into the
|
||||
* global count whenever the absolute value of the local count
|
||||
* exceeds the counter's threshold.
|
||||
*
|
||||
* MUST be protected from preemption.
|
||||
* current protection is mapping->page_lock.
|
||||
*/
|
||||
static inline void pagecache_acct(int count)
|
||||
{
|
||||
long *local;
|
||||
|
||||
local = &__get_cpu_var(nr_pagecache_local);
|
||||
*local += count;
|
||||
if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) {
|
||||
atomic_add(*local, &nr_pagecache);
|
||||
*local = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void pagecache_acct(int count)
|
||||
{
|
||||
atomic_add(count, &nr_pagecache);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline unsigned long get_page_cache_size(void)
|
||||
{
|
||||
int ret = atomic_read(&nr_pagecache);
|
||||
if (unlikely(ret < 0))
|
||||
ret = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return byte-offset into filesystem object for page.
|
||||
*/
|
||||
|
Reference in New Issue
Block a user