hugetlb/cgroup: assign the page hugetlb cgroup when we move the page to active list.
A page's hugetlb cgroup assignment and movement to the active list should occur with hugetlb_lock held. Otherwise when we remove the hugetlb cgroup we will iterate the active list and find pages with NULL hugetlb cgroup values. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
79dbb2368a
commit
94ae8ba717
@@ -213,6 +213,7 @@ done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Should be called with hugetlb_lock held */
|
||||
void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
|
||||
struct hugetlb_cgroup *h_cg,
|
||||
struct page *page)
|
||||
@@ -220,9 +221,7 @@ void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
|
||||
if (hugetlb_cgroup_disabled() || !h_cg)
|
||||
return;
|
||||
|
||||
spin_lock(&hugetlb_lock);
|
||||
set_hugetlb_cgroup(page, h_cg);
|
||||
spin_unlock(&hugetlb_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -389,6 +388,7 @@ int __init hugetlb_cgroup_file_init(int idx)
|
||||
void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
|
||||
{
|
||||
struct hugetlb_cgroup *h_cg;
|
||||
struct hstate *h = page_hstate(oldhpage);
|
||||
|
||||
if (hugetlb_cgroup_disabled())
|
||||
return;
|
||||
@@ -401,6 +401,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
|
||||
|
||||
/* move the h_cg details to new cgroup */
|
||||
set_hugetlb_cgroup(newhpage, h_cg);
|
||||
list_move(&newhpage->lru, &h->hugepage_activelist);
|
||||
spin_unlock(&hugetlb_lock);
|
||||
cgroup_release_and_wakeup_rmdir(&h_cg->css);
|
||||
return;
|
||||
|
||||
Reference in New Issue
Block a user