Merge branch 'memblock-kill-early_node_map' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into core/memblock

This commit is contained in:
Ingo Molnar
2011-12-20 12:14:26 +01:00
75 changed files with 1073 additions and 1788 deletions

View File

@@ -181,39 +181,17 @@ static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
static unsigned long __meminitdata dma_reserve;
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
/*
* MAX_ACTIVE_REGIONS determines the maximum number of distinct
* ranges of memory (RAM) that may be registered with add_active_range().
* Ranges passed to add_active_range() will be merged if possible
* so the number of times add_active_range() can be called is
* related to the number of nodes and the number of holes
*/
#ifdef CONFIG_MAX_ACTIVE_REGIONS
/* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
#define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
#else
#if MAX_NUMNODES >= 32
/* If there can be many nodes, allow up to 50 holes per node */
#define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
#else
/* By default, allow up to 256 distinct regions */
#define MAX_ACTIVE_REGIONS 256
#endif
#endif
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
static int __meminitdata nr_nodemap_entries;
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
EXPORT_SYMBOL(movable_zone);
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
EXPORT_SYMBOL(movable_zone);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
@@ -706,10 +684,10 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
int loop;
prefetchw(page);
for (loop = 0; loop < BITS_PER_LONG; loop++) {
for (loop = 0; loop < (1 << order); loop++) {
struct page *p = &page[loop];
if (loop + 1 < BITS_PER_LONG)
if (loop + 1 < (1 << order))
prefetchw(p + 1);
__ClearPageReserved(p);
set_page_count(p, 0);
@@ -3737,35 +3715,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
return 0;
}
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
/*
* Basic iterator support. Return the first range of PFNs for a node
* Note: nid == MAX_NUMNODES returns first region regardless of node
*/
static int __meminit first_active_region_index_in_nid(int nid)
{
int i;
for (i = 0; i < nr_nodemap_entries; i++)
if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
return i;
return -1;
}
/*
* Basic iterator support. Return the next active range of PFNs for a node
* Note: nid == MAX_NUMNODES returns next region regardless of node
*/
static int __meminit next_active_region_index_in_nid(int index, int nid)
{
for (index = index + 1; index < nr_nodemap_entries; index++)
if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
return index;
return -1;
}
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
/*
* Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
@@ -3775,15 +3725,12 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
*/
int __meminit __early_pfn_to_nid(unsigned long pfn)
{
int i;
for (i = 0; i < nr_nodemap_entries; i++) {
unsigned long start_pfn = early_node_map[i].start_pfn;
unsigned long end_pfn = early_node_map[i].end_pfn;
unsigned long start_pfn, end_pfn;
int i, nid;
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
if (start_pfn <= pfn && pfn < end_pfn)
return early_node_map[i].nid;
}
return nid;
/* This is a memory hole */
return -1;
}
@@ -3812,11 +3759,6 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
}
#endif
/* Basic iterator support to walk early_node_map[] */
#define for_each_active_range_index_in_nid(i, nid) \
for (i = first_active_region_index_in_nid(nid); i != -1; \
i = next_active_region_index_in_nid(i, nid))
/**
* free_bootmem_with_active_regions - Call free_bootmem_node for each active range
* @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
@@ -3826,122 +3768,34 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
* add_active_ranges() contain no holes and may be freed, this
* this function may be used instead of calling free_bootmem() manually.
*/
void __init free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn)
void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
{
int i;
unsigned long start_pfn, end_pfn;
int i, this_nid;
for_each_active_range_index_in_nid(i, nid) {
unsigned long size_pages = 0;
unsigned long end_pfn = early_node_map[i].end_pfn;
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
start_pfn = min(start_pfn, max_low_pfn);
end_pfn = min(end_pfn, max_low_pfn);
if (early_node_map[i].start_pfn >= max_low_pfn)
continue;
if (end_pfn > max_low_pfn)
end_pfn = max_low_pfn;
size_pages = end_pfn - early_node_map[i].start_pfn;
free_bootmem_node(NODE_DATA(early_node_map[i].nid),
PFN_PHYS(early_node_map[i].start_pfn),
size_pages << PAGE_SHIFT);
if (start_pfn < end_pfn)
free_bootmem_node(NODE_DATA(this_nid),
PFN_PHYS(start_pfn),
(end_pfn - start_pfn) << PAGE_SHIFT);
}
}
#ifdef CONFIG_HAVE_MEMBLOCK
/*
* Basic iterator support. Return the last range of PFNs for a node
* Note: nid == MAX_NUMNODES returns last region regardless of node
*/
static int __meminit last_active_region_index_in_nid(int nid)
{
int i;
for (i = nr_nodemap_entries - 1; i >= 0; i--)
if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
return i;
return -1;
}
/*
* Basic iterator support. Return the previous active range of PFNs for a node
* Note: nid == MAX_NUMNODES returns next region regardless of node
*/
static int __meminit previous_active_region_index_in_nid(int index, int nid)
{
for (index = index - 1; index >= 0; index--)
if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
return index;
return -1;
}
#define for_each_active_range_index_in_nid_reverse(i, nid) \
for (i = last_active_region_index_in_nid(nid); i != -1; \
i = previous_active_region_index_in_nid(i, nid))
u64 __init find_memory_core_early(int nid, u64 size, u64 align,
u64 goal, u64 limit)
{
int i;
/* Need to go over early_node_map to find out good range for node */
for_each_active_range_index_in_nid_reverse(i, nid) {
u64 addr;
u64 ei_start, ei_last;
u64 final_start, final_end;
ei_last = early_node_map[i].end_pfn;
ei_last <<= PAGE_SHIFT;
ei_start = early_node_map[i].start_pfn;
ei_start <<= PAGE_SHIFT;
final_start = max(ei_start, goal);
final_end = min(ei_last, limit);
if (final_start >= final_end)
continue;
addr = memblock_find_in_range(final_start, final_end, size, align);
if (addr == MEMBLOCK_ERROR)
continue;
return addr;
}
return MEMBLOCK_ERROR;
}
#endif
int __init add_from_early_node_map(struct range *range, int az,
int nr_range, int nid)
{
unsigned long start_pfn, end_pfn;
int i;
u64 start, end;
/* need to go over early_node_map to find out good range for node */
for_each_active_range_index_in_nid(i, nid) {
start = early_node_map[i].start_pfn;
end = early_node_map[i].end_pfn;
nr_range = add_range(range, az, nr_range, start, end);
}
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL)
nr_range = add_range(range, az, nr_range, start_pfn, end_pfn);
return nr_range;
}
void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
{
int i;
int ret;
for_each_active_range_index_in_nid(i, nid) {
ret = work_fn(early_node_map[i].start_pfn,
early_node_map[i].end_pfn, data);
if (ret)
break;
}
}
/**
* sparse_memory_present_with_active_regions - Call memory_present for each active range
* @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
@@ -3952,12 +3806,11 @@ void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
*/
void __init sparse_memory_present_with_active_regions(int nid)
{
int i;
unsigned long start_pfn, end_pfn;
int i, this_nid;
for_each_active_range_index_in_nid(i, nid)
memory_present(early_node_map[i].nid,
early_node_map[i].start_pfn,
early_node_map[i].end_pfn);
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
memory_present(this_nid, start_pfn, end_pfn);
}
/**
@@ -3974,13 +3827,15 @@ void __init sparse_memory_present_with_active_regions(int nid)
void __meminit get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn)
{
unsigned long this_start_pfn, this_end_pfn;
int i;
*start_pfn = -1UL;
*end_pfn = 0;
for_each_active_range_index_in_nid(i, nid) {
*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
*start_pfn = min(*start_pfn, this_start_pfn);
*end_pfn = max(*end_pfn, this_end_pfn);
}
if (*start_pfn == -1UL)
@@ -4083,46 +3938,16 @@ unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
unsigned long range_end_pfn)
{
int i = 0;
unsigned long prev_end_pfn = 0, hole_pages = 0;
unsigned long start_pfn;
unsigned long nr_absent = range_end_pfn - range_start_pfn;
unsigned long start_pfn, end_pfn;
int i;
/* Find the end_pfn of the first active range of pfns in the node */
i = first_active_region_index_in_nid(nid);
if (i == -1)
return 0;
prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
/* Account for ranges before physical memory on this node */
if (early_node_map[i].start_pfn > range_start_pfn)
hole_pages = prev_end_pfn - range_start_pfn;
/* Find all holes for the zone within the node */
for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
/* No need to continue if prev_end_pfn is outside the zone */
if (prev_end_pfn >= range_end_pfn)
break;
/* Make sure the end of the zone is not within the hole */
start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
prev_end_pfn = max(prev_end_pfn, range_start_pfn);
/* Update the hole size cound and move on */
if (start_pfn > range_start_pfn) {
BUG_ON(prev_end_pfn > start_pfn);
hole_pages += start_pfn - prev_end_pfn;
}
prev_end_pfn = early_node_map[i].end_pfn;
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
nr_absent -= end_pfn - start_pfn;
}
/* Account for ranges past physical memory on this node */
if (range_end_pfn > prev_end_pfn)
hole_pages += range_end_pfn -
max(range_start_pfn, prev_end_pfn);
return hole_pages;
return nr_absent;
}
/**
@@ -4143,14 +3968,14 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
unsigned long zone_type,
unsigned long *ignored)
{
unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
unsigned long node_start_pfn, node_end_pfn;
unsigned long zone_start_pfn, zone_end_pfn;
get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
node_start_pfn);
zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
node_end_pfn);
zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
adjust_zone_range_for_zone_movable(nid, zone_type,
node_start_pfn, node_end_pfn,
@@ -4158,7 +3983,7 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
}
#else
#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
unsigned long zone_type,
unsigned long *zones_size)
@@ -4176,7 +4001,7 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
return zholes_size[zone_type];
}
#endif
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
@@ -4399,10 +4224,10 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
*/
if (pgdat == NODE_DATA(0)) {
mem_map = NODE_DATA(0)->node_mem_map;
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
}
#endif
#endif /* CONFIG_FLAT_NODE_MEM_MAP */
@@ -4427,7 +4252,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
free_area_init_core(pgdat, zones_size, zholes_size);
}
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
#if MAX_NUMNODES > 1
/*
@@ -4448,170 +4273,6 @@ static inline void setup_nr_node_ids(void)
}
#endif
/**
* add_active_range - Register a range of PFNs backed by physical memory
* @nid: The node ID the range resides on
* @start_pfn: The start PFN of the available physical memory
* @end_pfn: The end PFN of the available physical memory
*
* These ranges are stored in an early_node_map[] and later used by
* free_area_init_nodes() to calculate zone sizes and holes. If the
* range spans a memory hole, it is up to the architecture to ensure
* the memory is not freed by the bootmem allocator. If possible
* the range being registered will be merged with existing ranges.
*/
void __init add_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn)
{
int i;
mminit_dprintk(MMINIT_TRACE, "memory_register",
"Entering add_active_range(%d, %#lx, %#lx) "
"%d entries of %d used\n",
nid, start_pfn, end_pfn,
nr_nodemap_entries, MAX_ACTIVE_REGIONS);
mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
/* Merge with existing active regions if possible */
for (i = 0; i < nr_nodemap_entries; i++) {
if (early_node_map[i].nid != nid)
continue;
/* Skip if an existing region covers this new one */
if (start_pfn >= early_node_map[i].start_pfn &&
end_pfn <= early_node_map[i].end_pfn)
return;
/* Merge forward if suitable */
if (start_pfn <= early_node_map[i].end_pfn &&
end_pfn > early_node_map[i].end_pfn) {
early_node_map[i].end_pfn = end_pfn;
return;
}
/* Merge backward if suitable */
if (start_pfn < early_node_map[i].start_pfn &&
end_pfn >= early_node_map[i].start_pfn) {
early_node_map[i].start_pfn = start_pfn;
return;
}
}
/* Check that early_node_map is large enough */
if (i >= MAX_ACTIVE_REGIONS) {
printk(KERN_CRIT "More than %d memory regions, truncating\n",
MAX_ACTIVE_REGIONS);
return;
}
early_node_map[i].nid = nid;
early_node_map[i].start_pfn = start_pfn;
early_node_map[i].end_pfn = end_pfn;
nr_nodemap_entries = i + 1;
}
/**
* remove_active_range - Shrink an existing registered range of PFNs
* @nid: The node id the range is on that should be shrunk
* @start_pfn: The new PFN of the range
* @end_pfn: The new PFN of the range
*
* i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
* The map is kept near the end physical page range that has already been
* registered. This function allows an arch to shrink an existing registered
* range.
*/
void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn)
{
int i, j;
int removed = 0;
printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
nid, start_pfn, end_pfn);
/* Find the old active region end and shrink */
for_each_active_range_index_in_nid(i, nid) {
if (early_node_map[i].start_pfn >= start_pfn &&
early_node_map[i].end_pfn <= end_pfn) {
/* clear it */
early_node_map[i].start_pfn = 0;
early_node_map[i].end_pfn = 0;
removed = 1;
continue;
}
if (early_node_map[i].start_pfn < start_pfn &&
early_node_map[i].end_pfn > start_pfn) {
unsigned long temp_end_pfn = early_node_map[i].end_pfn;
early_node_map[i].end_pfn = start_pfn;
if (temp_end_pfn > end_pfn)
add_active_range(nid, end_pfn, temp_end_pfn);
continue;
}
if (early_node_map[i].start_pfn >= start_pfn &&
early_node_map[i].end_pfn > end_pfn &&
early_node_map[i].start_pfn < end_pfn) {
early_node_map[i].start_pfn = end_pfn;
continue;
}
}
if (!removed)
return;
/* remove the blank ones */
for (i = nr_nodemap_entries - 1; i > 0; i--) {
if (early_node_map[i].nid != nid)
continue;
if (early_node_map[i].end_pfn)
continue;
/* we found it, get rid of it */
for (j = i; j < nr_nodemap_entries - 1; j++)
memcpy(&early_node_map[j], &early_node_map[j+1],
sizeof(early_node_map[j]));
j = nr_nodemap_entries - 1;
memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
nr_nodemap_entries--;
}
}
/**
* remove_all_active_ranges - Remove all currently registered regions
*
* During discovery, it may be found that a table like SRAT is invalid
* and an alternative discovery method must be used. This function removes
* all currently registered regions.
*/
void __init remove_all_active_ranges(void)
{
memset(early_node_map, 0, sizeof(early_node_map));
nr_nodemap_entries = 0;
}
/* Compare two active node_active_regions */
static int __init cmp_node_active_region(const void *a, const void *b)
{
struct node_active_region *arange = (struct node_active_region *)a;
struct node_active_region *brange = (struct node_active_region *)b;
/* Done this way to avoid overflows */
if (arange->start_pfn > brange->start_pfn)
return 1;
if (arange->start_pfn < brange->start_pfn)
return -1;
return 0;
}
/* sort the node_map by start_pfn */
void __init sort_node_map(void)
{
sort(early_node_map, (size_t)nr_nodemap_entries,
sizeof(struct node_active_region),
cmp_node_active_region, NULL);
}
/**
* node_map_pfn_alignment - determine the maximum internode alignment
*
@@ -4634,15 +4295,11 @@ void __init sort_node_map(void)
unsigned long __init node_map_pfn_alignment(void)
{
unsigned long accl_mask = 0, last_end = 0;
unsigned long start, end, mask;
int last_nid = -1;
int i;
for_each_active_range_index_in_nid(i, MAX_NUMNODES) {
int nid = early_node_map[i].nid;
unsigned long start = early_node_map[i].start_pfn;
unsigned long end = early_node_map[i].end_pfn;
unsigned long mask;
int i, nid;
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
if (!start || last_nid < 0 || last_nid == nid) {
last_nid = nid;
last_end = end;
@@ -4669,12 +4326,12 @@ unsigned long __init node_map_pfn_alignment(void)
/* Find the lowest pfn for a node */
static unsigned long __init find_min_pfn_for_node(int nid)
{
int i;
unsigned long min_pfn = ULONG_MAX;
unsigned long start_pfn;
int i;
/* Assuming a sorted map, the first range found has the starting pfn */
for_each_active_range_index_in_nid(i, nid)
min_pfn = min(min_pfn, early_node_map[i].start_pfn);
for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
min_pfn = min(min_pfn, start_pfn);
if (min_pfn == ULONG_MAX) {
printk(KERN_WARNING
@@ -4703,15 +4360,16 @@ unsigned long __init find_min_pfn_with_active_regions(void)
*/
static unsigned long __init early_calculate_totalpages(void)
{
int i;
unsigned long totalpages = 0;
unsigned long start_pfn, end_pfn;
int i, nid;
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
unsigned long pages = end_pfn - start_pfn;
for (i = 0; i < nr_nodemap_entries; i++) {
unsigned long pages = early_node_map[i].end_pfn -
early_node_map[i].start_pfn;
totalpages += pages;
if (pages)
node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
node_set_state(nid, N_HIGH_MEMORY);
}
return totalpages;
}
@@ -4766,6 +4424,8 @@ restart:
/* Spread kernelcore memory as evenly as possible throughout nodes */
kernelcore_node = required_kernelcore / usable_nodes;
for_each_node_state(nid, N_HIGH_MEMORY) {
unsigned long start_pfn, end_pfn;
/*
* Recalculate kernelcore_node if the division per node
* now exceeds what is necessary to satisfy the requested
@@ -4782,13 +4442,10 @@ restart:
kernelcore_remaining = kernelcore_node;
/* Go through each range of PFNs within this node */
for_each_active_range_index_in_nid(i, nid) {
unsigned long start_pfn, end_pfn;
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
unsigned long size_pages;
start_pfn = max(early_node_map[i].start_pfn,
zone_movable_pfn[nid]);
end_pfn = early_node_map[i].end_pfn;
start_pfn = max(start_pfn, zone_movable_pfn[nid]);
if (start_pfn >= end_pfn)
continue;
@@ -4890,11 +4547,8 @@ static void check_for_regular_memory(pg_data_t *pgdat)
*/
void __init free_area_init_nodes(unsigned long *max_zone_pfn)
{
unsigned long nid;
int i;
/* Sort early_node_map as initialisation assumes it is sorted */
sort_node_map();
unsigned long start_pfn, end_pfn;
int i, nid;
/* Record where the zone boundaries are */
memset(arch_zone_lowest_possible_pfn, 0,
@@ -4941,11 +4595,9 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
}
/* Print out the early_node_map[] */
printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
for (i = 0; i < nr_nodemap_entries; i++)
printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
early_node_map[i].start_pfn,
early_node_map[i].end_pfn);
printk("Early memory PFN ranges\n");
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
printk(" %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
/* Initialise every node */
mminit_verify_pageflags_layout();
@@ -4998,7 +4650,7 @@ static int __init cmdline_parse_movablecore(char *p)
early_param("kernelcore", cmdline_parse_kernelcore);
early_param("movablecore", cmdline_parse_movablecore);
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
/**
* set_dma_reserve - set the specified number of pages reserved in the first zone