memblock: Kill early_node_map[]
Now all ARCH_POPULATES_NODE_MAP archs select HAVE_MEBLOCK_NODE_MAP - there's no user of early_node_map[] left. Kill early_node_map[] and replace ARCH_POPULATES_NODE_MAP with HAVE_MEMBLOCK_NODE_MAP. Also, relocate for_each_mem_pfn_range() and helper from mm.h to memblock.h as page_alloc.c would no longer host an alternative implementation. This change is ultimately one to one mapping and shouldn't cause any observable difference; however, after the recent changes, there are some functions which now would fit memblock.c better than page_alloc.c and dependency on HAVE_MEMBLOCK_NODE_MAP instead of HAVE_MEMBLOCK doesn't make much sense on some of them. Further cleanups for functions inside HAVE_MEMBLOCK_NODE_MAP in mm.h would be nice. -v2: Fix compile bug introduced by mis-spelling CONFIG_HAVE_MEMBLOCK_NODE_MAP to CONFIG_MEMBLOCK_HAVE_NODE_MAP in mmzone.h. Reported by Stephen Rothwell. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Chen Liqin <liqin.chen@sunplusct.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: "H. Peter Anvin" <hpa@zytor.com>
This commit is contained in:
@ -1252,43 +1252,34 @@ static inline void pgtable_page_dtor(struct page *page)
|
||||
extern void free_area_init(unsigned long * zones_size);
|
||||
extern void free_area_init_node(int nid, unsigned long * zones_size,
|
||||
unsigned long zone_start_pfn, unsigned long *zholes_size);
|
||||
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
|
||||
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||
/*
|
||||
* With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its
|
||||
* With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
|
||||
* zones, allocate the backing mem_map and account for memory holes in a more
|
||||
* architecture independent manner. This is a substitute for creating the
|
||||
* zone_sizes[] and zholes_size[] arrays and passing them to
|
||||
* free_area_init_node()
|
||||
*
|
||||
* An architecture is expected to register range of page frames backed by
|
||||
* physical memory with add_active_range() before calling
|
||||
* physical memory with memblock_add[_node]() before calling
|
||||
* free_area_init_nodes() passing in the PFN each zone ends at. At a basic
|
||||
* usage, an architecture is expected to do something like
|
||||
*
|
||||
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
|
||||
* max_highmem_pfn};
|
||||
* for_each_valid_physical_page_range()
|
||||
* add_active_range(node_id, start_pfn, end_pfn)
|
||||
* memblock_add_node(base, size, nid)
|
||||
* free_area_init_nodes(max_zone_pfns);
|
||||
*
|
||||
* If the architecture guarantees that there are no holes in the ranges
|
||||
* registered with add_active_range(), free_bootmem_active_regions()
|
||||
* will call free_bootmem_node() for each registered physical page range.
|
||||
* Similarly sparse_memory_present_with_active_regions() calls
|
||||
* memory_present() for each range when SPARSEMEM is enabled.
|
||||
* free_bootmem_with_active_regions() calls free_bootmem_node() for each
|
||||
* registered physical page range. Similarly
|
||||
* sparse_memory_present_with_active_regions() calls memory_present() for
|
||||
* each range when SPARSEMEM is enabled.
|
||||
*
|
||||
* See mm/page_alloc.c for more information on each function exposed by
|
||||
* CONFIG_ARCH_POPULATES_NODE_MAP
|
||||
* CONFIG_HAVE_MEMBLOCK_NODE_MAP.
|
||||
*/
|
||||
extern void free_area_init_nodes(unsigned long *max_zone_pfn);
|
||||
#ifndef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||
extern void add_active_range(unsigned int nid, unsigned long start_pfn,
|
||||
unsigned long end_pfn);
|
||||
extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
|
||||
unsigned long end_pfn);
|
||||
extern void remove_all_active_ranges(void);
|
||||
void sort_node_map(void);
|
||||
#endif
|
||||
unsigned long node_map_pfn_alignment(void);
|
||||
unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
|
||||
unsigned long end_pfn);
|
||||
@ -1303,28 +1294,9 @@ int add_from_early_node_map(struct range *range, int az,
|
||||
int nr_range, int nid);
|
||||
extern void sparse_memory_present_with_active_regions(int nid);
|
||||
|
||||
extern void __next_mem_pfn_range(int *idx, int nid,
|
||||
unsigned long *out_start_pfn,
|
||||
unsigned long *out_end_pfn, int *out_nid);
|
||||
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
||||
|
||||
/**
|
||||
* for_each_mem_pfn_range - early memory pfn range iterator
|
||||
* @i: an integer used as loop variable
|
||||
* @nid: node selector, %MAX_NUMNODES for all nodes
|
||||
* @p_start: ptr to ulong for start pfn of the range, can be %NULL
|
||||
* @p_end: ptr to ulong for end pfn of the range, can be %NULL
|
||||
* @p_nid: ptr to int for nid of the range, can be %NULL
|
||||
*
|
||||
* Walks over configured memory ranges. Available after early_node_map is
|
||||
* populated.
|
||||
*/
|
||||
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
|
||||
for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
|
||||
i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
|
||||
|
||||
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
|
||||
|
||||
#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \
|
||||
#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
|
||||
!defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
|
||||
static inline int __early_pfn_to_nid(unsigned long pfn)
|
||||
{
|
||||
|
Reference in New Issue
Block a user