memcg: fix memory migration of shmem swapcache
In the current implementation mem_cgroup_end_migration() decides whether the page migration has succeeded or not by checking "oldpage->mapping". But if we are tring to migrate a shmem swapcache, the page->mapping of it is NULL from the begining, so the check would be invalid. As a result, mem_cgroup_end_migration() assumes the migration has succeeded even if it's not, so "newpage" would be freed while it's not uncharged. This patch fixes it by passing mem_cgroup_end_migration() the result of the page migration. Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
17295c88a1
commit
50de1dd967
@@ -98,7 +98,7 @@ extern int
|
|||||||
mem_cgroup_prepare_migration(struct page *page,
|
mem_cgroup_prepare_migration(struct page *page,
|
||||||
struct page *newpage, struct mem_cgroup **ptr);
|
struct page *newpage, struct mem_cgroup **ptr);
|
||||||
extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
|
extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
|
||||||
struct page *oldpage, struct page *newpage);
|
struct page *oldpage, struct page *newpage, bool migration_ok);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For memory reclaim.
|
* For memory reclaim.
|
||||||
@@ -251,8 +251,7 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
|
static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
|
||||||
struct page *oldpage,
|
struct page *oldpage, struct page *newpage, bool migration_ok)
|
||||||
struct page *newpage)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -2896,7 +2896,7 @@ int mem_cgroup_prepare_migration(struct page *page,
|
|||||||
|
|
||||||
/* remove redundant charge if migration failed*/
|
/* remove redundant charge if migration failed*/
|
||||||
void mem_cgroup_end_migration(struct mem_cgroup *mem,
|
void mem_cgroup_end_migration(struct mem_cgroup *mem,
|
||||||
struct page *oldpage, struct page *newpage)
|
struct page *oldpage, struct page *newpage, bool migration_ok)
|
||||||
{
|
{
|
||||||
struct page *used, *unused;
|
struct page *used, *unused;
|
||||||
struct page_cgroup *pc;
|
struct page_cgroup *pc;
|
||||||
@@ -2905,8 +2905,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
|
|||||||
return;
|
return;
|
||||||
/* blocks rmdir() */
|
/* blocks rmdir() */
|
||||||
cgroup_exclude_rmdir(&mem->css);
|
cgroup_exclude_rmdir(&mem->css);
|
||||||
/* at migration success, oldpage->mapping is NULL. */
|
if (!migration_ok) {
|
||||||
if (oldpage->mapping) {
|
|
||||||
used = oldpage;
|
used = oldpage;
|
||||||
unused = newpage;
|
unused = newpage;
|
||||||
} else {
|
} else {
|
||||||
|
@@ -768,7 +768,7 @@ skip_unmap:
|
|||||||
|
|
||||||
uncharge:
|
uncharge:
|
||||||
if (!charge)
|
if (!charge)
|
||||||
mem_cgroup_end_migration(mem, page, newpage);
|
mem_cgroup_end_migration(mem, page, newpage, rc == 0);
|
||||||
unlock:
|
unlock:
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user