memcg: fix init_page_cgroup nid with sparsemem
Commit 21a3c96468
("memcg: allocate memory cgroup structures in local
nodes") makes page_cgroup allocation as NUMA aware. But that caused a
problem https://bugzilla.kernel.org/show_bug.cgi?id=36192.
The problem was getting a NID from invalid struct pages, which was not
initialized because it was out-of-node, out of [node_start_pfn,
node_end_pfn)
Now, with sparsemem, page_cgroup_init scans pfn from 0 to max_pfn. But
this may scan a pfn which is not on any node and can access memmap which
is not initialized.
This makes page_cgroup_init() for SPARSEMEM node aware and remove a code
to get nid from page->flags. (Then, we'll use valid NID always.)
[akpm@linux-foundation.org: try to fix up comments]
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
8957712710
commit
37573e8c71
@@ -162,13 +162,13 @@ static void free_page_cgroup(void *addr)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int __meminit init_section_page_cgroup(unsigned long pfn)
|
static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
|
||||||
{
|
{
|
||||||
struct page_cgroup *base, *pc;
|
struct page_cgroup *base, *pc;
|
||||||
struct mem_section *section;
|
struct mem_section *section;
|
||||||
unsigned long table_size;
|
unsigned long table_size;
|
||||||
unsigned long nr;
|
unsigned long nr;
|
||||||
int nid, index;
|
int index;
|
||||||
|
|
||||||
nr = pfn_to_section_nr(pfn);
|
nr = pfn_to_section_nr(pfn);
|
||||||
section = __nr_to_section(nr);
|
section = __nr_to_section(nr);
|
||||||
@@ -176,7 +176,6 @@ static int __meminit init_section_page_cgroup(unsigned long pfn)
|
|||||||
if (section->page_cgroup)
|
if (section->page_cgroup)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
nid = page_to_nid(pfn_to_page(pfn));
|
|
||||||
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
|
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
|
||||||
base = alloc_page_cgroup(table_size, nid);
|
base = alloc_page_cgroup(table_size, nid);
|
||||||
|
|
||||||
@@ -196,7 +195,11 @@ static int __meminit init_section_page_cgroup(unsigned long pfn)
|
|||||||
pc = base + index;
|
pc = base + index;
|
||||||
init_page_cgroup(pc, nr);
|
init_page_cgroup(pc, nr);
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* The passed "pfn" may not be aligned to SECTION. For the calculation
|
||||||
|
* we need to apply a mask.
|
||||||
|
*/
|
||||||
|
pfn &= PAGE_SECTION_MASK;
|
||||||
section->page_cgroup = base - pfn;
|
section->page_cgroup = base - pfn;
|
||||||
total_usage += table_size;
|
total_usage += table_size;
|
||||||
return 0;
|
return 0;
|
||||||
@@ -225,10 +228,20 @@ int __meminit online_page_cgroup(unsigned long start_pfn,
|
|||||||
start = start_pfn & ~(PAGES_PER_SECTION - 1);
|
start = start_pfn & ~(PAGES_PER_SECTION - 1);
|
||||||
end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
|
end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
|
||||||
|
|
||||||
|
if (nid == -1) {
|
||||||
|
/*
|
||||||
|
* In this case, "nid" already exists and contains valid memory.
|
||||||
|
* "start_pfn" passed to us is a pfn which is an arg for
|
||||||
|
* online__pages(), and start_pfn should exist.
|
||||||
|
*/
|
||||||
|
nid = pfn_to_nid(start_pfn);
|
||||||
|
VM_BUG_ON(!node_state(nid, N_ONLINE));
|
||||||
|
}
|
||||||
|
|
||||||
for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
|
for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
|
||||||
if (!pfn_present(pfn))
|
if (!pfn_present(pfn))
|
||||||
continue;
|
continue;
|
||||||
fail = init_section_page_cgroup(pfn);
|
fail = init_section_page_cgroup(pfn, nid);
|
||||||
}
|
}
|
||||||
if (!fail)
|
if (!fail)
|
||||||
return 0;
|
return 0;
|
||||||
@@ -284,25 +297,47 @@ static int __meminit page_cgroup_callback(struct notifier_block *self,
|
|||||||
void __init page_cgroup_init(void)
|
void __init page_cgroup_init(void)
|
||||||
{
|
{
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
int fail = 0;
|
int nid;
|
||||||
|
|
||||||
if (mem_cgroup_disabled())
|
if (mem_cgroup_disabled())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
|
for_each_node_state(nid, N_HIGH_MEMORY) {
|
||||||
if (!pfn_present(pfn))
|
unsigned long start_pfn, end_pfn;
|
||||||
continue;
|
|
||||||
fail = init_section_page_cgroup(pfn);
|
start_pfn = node_start_pfn(nid);
|
||||||
}
|
end_pfn = node_end_pfn(nid);
|
||||||
if (fail) {
|
/*
|
||||||
printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
|
* start_pfn and end_pfn may not be aligned to SECTION and the
|
||||||
panic("Out of memory");
|
* page->flags of out of node pages are not initialized. So we
|
||||||
} else {
|
* scan [start_pfn, the biggest section's pfn < end_pfn) here.
|
||||||
hotplug_memory_notifier(page_cgroup_callback, 0);
|
*/
|
||||||
|
for (pfn = start_pfn;
|
||||||
|
pfn < end_pfn;
|
||||||
|
pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
|
||||||
|
|
||||||
|
if (!pfn_valid(pfn))
|
||||||
|
continue;
|
||||||
|
/*
|
||||||
|
* Nodes's pfns can be overlapping.
|
||||||
|
* We know some arch can have a nodes layout such as
|
||||||
|
* -------------pfn-------------->
|
||||||
|
* N0 | N1 | N2 | N0 | N1 | N2|....
|
||||||
|
*/
|
||||||
|
if (pfn_to_nid(pfn) != nid)
|
||||||
|
continue;
|
||||||
|
if (init_section_page_cgroup(pfn, nid))
|
||||||
|
goto oom;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
hotplug_memory_notifier(page_cgroup_callback, 0);
|
||||||
printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
|
printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
|
||||||
printk(KERN_INFO "please try 'cgroup_disable=memory' option if you don't"
|
printk(KERN_INFO "please try 'cgroup_disable=memory' option if you "
|
||||||
" want memory cgroups\n");
|
"don't want memory cgroups\n");
|
||||||
|
return;
|
||||||
|
oom:
|
||||||
|
printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
|
||||||
|
panic("Out of memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
|
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
|
||||||
|
Reference in New Issue
Block a user