mm/page_alloc.c: cleanups
This patch contains the following cleanups: - make the following needlessly global variables static: - required_kernelcore - zone_movable_pfn[] - make the following needlessly global functions static: - move_freepages() - move_freepages_block() - setup_pageset() - find_usable_zone_for_movable() - adjust_zone_range_for_zone_movable() - __absent_pages_in_range() - find_min_pfn_for_node() - find_zone_movable_pfns_for_nodes() Signed-off-by: Adrian Bunk <bunk@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
2be0ffe2b2
commit
b69a7288ea
@@ -153,9 +153,9 @@ static unsigned long __meminitdata dma_reserve;
|
|||||||
static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
|
static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
|
||||||
static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
|
static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
|
||||||
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
|
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
|
||||||
unsigned long __initdata required_kernelcore;
|
static unsigned long __initdata required_kernelcore;
|
||||||
static unsigned long __initdata required_movablecore;
|
static unsigned long __initdata required_movablecore;
|
||||||
unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
|
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
|
||||||
|
|
||||||
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
|
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
|
||||||
int movable_zone;
|
int movable_zone;
|
||||||
@@ -674,9 +674,9 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
|
|||||||
* Note that start_page and end_pages are not aligned on a pageblock
|
* Note that start_page and end_pages are not aligned on a pageblock
|
||||||
* boundary. If alignment is required, use move_freepages_block()
|
* boundary. If alignment is required, use move_freepages_block()
|
||||||
*/
|
*/
|
||||||
int move_freepages(struct zone *zone,
|
static int move_freepages(struct zone *zone,
|
||||||
struct page *start_page, struct page *end_page,
|
struct page *start_page, struct page *end_page,
|
||||||
int migratetype)
|
int migratetype)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long order;
|
unsigned long order;
|
||||||
@@ -715,7 +715,8 @@ int move_freepages(struct zone *zone,
|
|||||||
return pages_moved;
|
return pages_moved;
|
||||||
}
|
}
|
||||||
|
|
||||||
int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
|
static int move_freepages_block(struct zone *zone, struct page *page,
|
||||||
|
int migratetype)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn, end_pfn;
|
unsigned long start_pfn, end_pfn;
|
||||||
struct page *start_page, *end_page;
|
struct page *start_page, *end_page;
|
||||||
@@ -2652,7 +2653,7 @@ static int zone_batchsize(struct zone *zone)
|
|||||||
return batch;
|
return batch;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
|
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
|
||||||
{
|
{
|
||||||
struct per_cpu_pages *pcp;
|
struct per_cpu_pages *pcp;
|
||||||
|
|
||||||
@@ -3099,7 +3100,7 @@ void __meminit get_pfn_range_for_nid(unsigned int nid,
|
|||||||
* assumption is made that zones within a node are ordered in monotonic
|
* assumption is made that zones within a node are ordered in monotonic
|
||||||
* increasing memory addresses so that the "highest" populated zone is used
|
* increasing memory addresses so that the "highest" populated zone is used
|
||||||
*/
|
*/
|
||||||
void __init find_usable_zone_for_movable(void)
|
static void __init find_usable_zone_for_movable(void)
|
||||||
{
|
{
|
||||||
int zone_index;
|
int zone_index;
|
||||||
for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
|
for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
|
||||||
@@ -3125,7 +3126,7 @@ void __init find_usable_zone_for_movable(void)
|
|||||||
* highest usable zone for ZONE_MOVABLE. This preserves the assumption that
|
* highest usable zone for ZONE_MOVABLE. This preserves the assumption that
|
||||||
* zones within a node are in order of monotonic increases memory addresses
|
* zones within a node are in order of monotonic increases memory addresses
|
||||||
*/
|
*/
|
||||||
void __meminit adjust_zone_range_for_zone_movable(int nid,
|
static void __meminit adjust_zone_range_for_zone_movable(int nid,
|
||||||
unsigned long zone_type,
|
unsigned long zone_type,
|
||||||
unsigned long node_start_pfn,
|
unsigned long node_start_pfn,
|
||||||
unsigned long node_end_pfn,
|
unsigned long node_end_pfn,
|
||||||
@@ -3186,7 +3187,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
|
|||||||
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
|
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
|
||||||
* then all holes in the requested range will be accounted for.
|
* then all holes in the requested range will be accounted for.
|
||||||
*/
|
*/
|
||||||
unsigned long __meminit __absent_pages_in_range(int nid,
|
static unsigned long __meminit __absent_pages_in_range(int nid,
|
||||||
unsigned long range_start_pfn,
|
unsigned long range_start_pfn,
|
||||||
unsigned long range_end_pfn)
|
unsigned long range_end_pfn)
|
||||||
{
|
{
|
||||||
@@ -3723,7 +3724,7 @@ static void __init sort_node_map(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Find the lowest pfn for a node */
|
/* Find the lowest pfn for a node */
|
||||||
unsigned long __init find_min_pfn_for_node(int nid)
|
static unsigned long __init find_min_pfn_for_node(int nid)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
unsigned long min_pfn = ULONG_MAX;
|
unsigned long min_pfn = ULONG_MAX;
|
||||||
@@ -3795,7 +3796,7 @@ static unsigned long __init early_calculate_totalpages(void)
|
|||||||
* memory. When they don't, some nodes will have more kernelcore than
|
* memory. When they don't, some nodes will have more kernelcore than
|
||||||
* others
|
* others
|
||||||
*/
|
*/
|
||||||
void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
|
static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
|
||||||
{
|
{
|
||||||
int i, nid;
|
int i, nid;
|
||||||
unsigned long usable_startpfn;
|
unsigned long usable_startpfn;
|
||||||
|
Reference in New Issue
Block a user