[PATCH] Fix sparsemem on Cell
Fix an oops experienced on the Cell architecture when init-time functions, early_*(), are called at runtime. It alters the call paths to make sure that the callers explicitly say whether the call is being made on behalf of a hotplug even, or happening at boot-time. It has been compile tested on ppc64, ia64, s390, i386 and x86_64. Acked-by: Arnd Bergmann <arndb@de.ibm.com> Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Cc: Yasunori Goto <y-goto@jp.fujitsu.com> Acked-by: Andy Whitcroft <apw@shadowen.org> Cc: Christoph Lameter <clameter@engr.sgi.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
47a4d5be7c
commit
a2f3aa0257
@ -67,11 +67,13 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
|
||||
zone_type = zone - pgdat->node_zones;
|
||||
if (!populated_zone(zone)) {
|
||||
int ret = 0;
|
||||
ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
|
||||
ret = init_currently_empty_zone(zone, phys_start_pfn,
|
||||
nr_pages, MEMMAP_HOTPLUG);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
|
||||
memmap_init_zone(nr_pages, nid, zone_type,
|
||||
phys_start_pfn, MEMMAP_HOTPLUG);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1956,17 +1956,24 @@ static inline unsigned long wait_table_bits(unsigned long size)
|
||||
* done. Non-atomic initialization, single-pass.
|
||||
*/
|
||||
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
||||
unsigned long start_pfn)
|
||||
unsigned long start_pfn, enum memmap_context context)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long end_pfn = start_pfn + size;
|
||||
unsigned long pfn;
|
||||
|
||||
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
|
||||
if (!early_pfn_valid(pfn))
|
||||
continue;
|
||||
if (!early_pfn_in_nid(pfn, nid))
|
||||
continue;
|
||||
/*
|
||||
* There can be holes in boot-time mem_map[]s
|
||||
* handed to this function. They do not
|
||||
* exist on hotplugged memory.
|
||||
*/
|
||||
if (context == MEMMAP_EARLY) {
|
||||
if (!early_pfn_valid(pfn))
|
||||
continue;
|
||||
if (!early_pfn_in_nid(pfn, nid))
|
||||
continue;
|
||||
}
|
||||
page = pfn_to_page(pfn);
|
||||
set_page_links(page, zone, nid, pfn);
|
||||
init_page_count(page);
|
||||
@ -1993,7 +2000,7 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
|
||||
|
||||
#ifndef __HAVE_ARCH_MEMMAP_INIT
|
||||
#define memmap_init(size, nid, zone, start_pfn) \
|
||||
memmap_init_zone((size), (nid), (zone), (start_pfn))
|
||||
memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
|
||||
#endif
|
||||
|
||||
static int __cpuinit zone_batchsize(struct zone *zone)
|
||||
@ -2239,7 +2246,8 @@ static __meminit void zone_pcp_init(struct zone *zone)
|
||||
|
||||
__meminit int init_currently_empty_zone(struct zone *zone,
|
||||
unsigned long zone_start_pfn,
|
||||
unsigned long size)
|
||||
unsigned long size,
|
||||
enum memmap_context context)
|
||||
{
|
||||
struct pglist_data *pgdat = zone->zone_pgdat;
|
||||
int ret;
|
||||
@ -2683,7 +2691,8 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
|
||||
if (!size)
|
||||
continue;
|
||||
|
||||
ret = init_currently_empty_zone(zone, zone_start_pfn, size);
|
||||
ret = init_currently_empty_zone(zone, zone_start_pfn,
|
||||
size, MEMMAP_EARLY);
|
||||
BUG_ON(ret);
|
||||
zone_start_pfn += size;
|
||||
}
|
||||
|
Reference in New Issue
Block a user