memcg: helper function for relcaim from shmem.
A new call, mem_cgroup_shrink_usage() is added for shmem handling and relacing non-standard usage of mem_cgroup_charge/uncharge. Now, shmem calls mem_cgroup_charge() just for reclaim some pages from mem_cgroup. In general, shmem is used by some process group and not for global resource (like file caches). So, it's reasonable to reclaim pages from mem_cgroup where shmem is mainly used. [hugh@veritas.com: shmem_getpage release page sooner] [hugh@veritas.com: mem_cgroup_shrink_usage css_put] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Pavel Emelyanov <xemul@openvz.org> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Cc: Paul Menage <menage@google.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
69029cd550
commit
c9b0ed5148
@@ -37,6 +37,8 @@ extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
|
|||||||
extern void mem_cgroup_uncharge_page(struct page *page);
|
extern void mem_cgroup_uncharge_page(struct page *page);
|
||||||
extern void mem_cgroup_uncharge_cache_page(struct page *page);
|
extern void mem_cgroup_uncharge_cache_page(struct page *page);
|
||||||
extern void mem_cgroup_move_lists(struct page *page, bool active);
|
extern void mem_cgroup_move_lists(struct page *page, bool active);
|
||||||
|
extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask);
|
||||||
|
|
||||||
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
||||||
struct list_head *dst,
|
struct list_head *dst,
|
||||||
unsigned long *scanned, int order,
|
unsigned long *scanned, int order,
|
||||||
@@ -102,6 +104,11 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void mem_cgroup_move_lists(struct page *page, bool active)
|
static inline void mem_cgroup_move_lists(struct page *page, bool active)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@@ -780,6 +780,32 @@ void mem_cgroup_end_migration(struct page *newpage)
|
|||||||
mem_cgroup_uncharge_page(newpage);
|
mem_cgroup_uncharge_page(newpage);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A call to try to shrink memory usage under specified resource controller.
|
||||||
|
* This is typically used for page reclaiming for shmem for reducing side
|
||||||
|
* effect of page allocation from shmem, which is used by some mem_cgroup.
|
||||||
|
*/
|
||||||
|
int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
|
||||||
|
{
|
||||||
|
struct mem_cgroup *mem;
|
||||||
|
int progress = 0;
|
||||||
|
int retry = MEM_CGROUP_RECLAIM_RETRIES;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
|
||||||
|
css_get(&mem->css);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
do {
|
||||||
|
progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
|
||||||
|
} while (!progress && --retry);
|
||||||
|
|
||||||
|
css_put(&mem->css);
|
||||||
|
if (!retry)
|
||||||
|
return -ENOMEM;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This routine traverse page_cgroup in given list and drop them all.
|
* This routine traverse page_cgroup in given list and drop them all.
|
||||||
* *And* this routine doesn't reclaim page itself, just removes page_cgroup.
|
* *And* this routine doesn't reclaim page itself, just removes page_cgroup.
|
||||||
|
11
mm/shmem.c
11
mm/shmem.c
@@ -1315,17 +1315,14 @@ repeat:
|
|||||||
shmem_swp_unmap(entry);
|
shmem_swp_unmap(entry);
|
||||||
spin_unlock(&info->lock);
|
spin_unlock(&info->lock);
|
||||||
unlock_page(swappage);
|
unlock_page(swappage);
|
||||||
|
page_cache_release(swappage);
|
||||||
if (error == -ENOMEM) {
|
if (error == -ENOMEM) {
|
||||||
/* allow reclaim from this memory cgroup */
|
/* allow reclaim from this memory cgroup */
|
||||||
error = mem_cgroup_cache_charge(swappage,
|
error = mem_cgroup_shrink_usage(current->mm,
|
||||||
current->mm, gfp & ~__GFP_HIGHMEM);
|
gfp);
|
||||||
if (error) {
|
if (error)
|
||||||
page_cache_release(swappage);
|
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
mem_cgroup_uncharge_cache_page(swappage);
|
|
||||||
}
|
|
||||||
page_cache_release(swappage);
|
|
||||||
goto repeat;
|
goto repeat;
|
||||||
}
|
}
|
||||||
} else if (sgp == SGP_READ && !filepage) {
|
} else if (sgp == SGP_READ && !filepage) {
|
||||||
|
Reference in New Issue
Block a user