x86: Use absent_pages_in_range() instead of memblock_x86_hole_size()
memblock_x86_hole_size() calculates the total size of holes in a given range according to memblock and is used by numa emulation code and numa_meminfo_cover_memory(). Since conversion to MEMBLOCK_NODE_MAP, absent_pages_in_range() also uses memblock and gives the same result. This patch replaces memblock_x86_hole_size() uses with absent_pages_in_range(). After the conversion the x86 function doesn't have any user left and is killed. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1310462166-31469-12-git-send-email-tj@kernel.org Cc: Yinghai Lu <yinghai@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
committed by
H. Peter Anvin
parent
6b5d41a1b9
commit
474b881bf4
@@ -6,6 +6,4 @@
|
|||||||
void memblock_x86_reserve_range(u64 start, u64 end, char *name);
|
void memblock_x86_reserve_range(u64 start, u64 end, char *name);
|
||||||
void memblock_x86_free_range(u64 start, u64 end);
|
void memblock_x86_free_range(u64 start, u64 end);
|
||||||
|
|
||||||
u64 memblock_x86_hole_size(u64 start, u64 end);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -32,55 +32,3 @@ void __init memblock_x86_free_range(u64 start, u64 end)
|
|||||||
|
|
||||||
memblock_free(start, end - start);
|
memblock_free(start, end - start);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Finds an active region in the address range from start_pfn to last_pfn and
|
|
||||||
* returns its range in ei_startpfn and ei_endpfn for the memblock entry.
|
|
||||||
*/
|
|
||||||
static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
|
|
||||||
unsigned long start_pfn,
|
|
||||||
unsigned long last_pfn,
|
|
||||||
unsigned long *ei_startpfn,
|
|
||||||
unsigned long *ei_endpfn)
|
|
||||||
{
|
|
||||||
u64 align = PAGE_SIZE;
|
|
||||||
|
|
||||||
*ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
|
|
||||||
*ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
/* Skip map entries smaller than a page */
|
|
||||||
if (*ei_startpfn >= *ei_endpfn)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Skip if map is outside the node */
|
|
||||||
if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Check for overlaps */
|
|
||||||
if (*ei_startpfn < start_pfn)
|
|
||||||
*ei_startpfn = start_pfn;
|
|
||||||
if (*ei_endpfn > last_pfn)
|
|
||||||
*ei_endpfn = last_pfn;
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Find the hole size (in bytes) in the memory range.
|
|
||||||
* @start: starting address of the memory range to scan
|
|
||||||
* @end: ending address of the memory range to scan
|
|
||||||
*/
|
|
||||||
u64 __init memblock_x86_hole_size(u64 start, u64 end)
|
|
||||||
{
|
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
|
||||||
unsigned long last_pfn = end >> PAGE_SHIFT;
|
|
||||||
unsigned long ei_startpfn, ei_endpfn, ram = 0;
|
|
||||||
struct memblock_region *r;
|
|
||||||
|
|
||||||
for_each_memblock(memory, r)
|
|
||||||
if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
|
|
||||||
&ei_startpfn, &ei_endpfn))
|
|
||||||
ram += ei_endpfn - ei_startpfn;
|
|
||||||
|
|
||||||
return end - start - ((u64)ram << PAGE_SHIFT);
|
|
||||||
}
|
|
||||||
|
@@ -475,8 +475,8 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
|
|||||||
numaram = 0;
|
numaram = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
e820ram = max_pfn - (memblock_x86_hole_size(0,
|
e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
|
||||||
PFN_PHYS(max_pfn)) >> PAGE_SHIFT);
|
|
||||||
/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
|
/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
|
||||||
if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
|
if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
|
||||||
printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
|
printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
|
||||||
|
@@ -28,6 +28,16 @@ static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)
|
|||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u64 mem_hole_size(u64 start, u64 end)
|
||||||
|
{
|
||||||
|
unsigned long start_pfn = PFN_UP(start);
|
||||||
|
unsigned long end_pfn = PFN_DOWN(end);
|
||||||
|
|
||||||
|
if (start_pfn < end_pfn)
|
||||||
|
return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sets up nid to range from @start to @end. The return value is -errno if
|
* Sets up nid to range from @start to @end. The return value is -errno if
|
||||||
* something went wrong, 0 otherwise.
|
* something went wrong, 0 otherwise.
|
||||||
@@ -89,7 +99,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
|
|||||||
* Calculate target node size. x86_32 freaks on __udivdi3() so do
|
* Calculate target node size. x86_32 freaks on __udivdi3() so do
|
||||||
* the division in ulong number of pages and convert back.
|
* the division in ulong number of pages and convert back.
|
||||||
*/
|
*/
|
||||||
size = max_addr - addr - memblock_x86_hole_size(addr, max_addr);
|
size = max_addr - addr - mem_hole_size(addr, max_addr);
|
||||||
size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes);
|
size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -135,8 +145,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
|
|||||||
* Continue to add memory to this fake node if its
|
* Continue to add memory to this fake node if its
|
||||||
* non-reserved memory is less than the per-node size.
|
* non-reserved memory is less than the per-node size.
|
||||||
*/
|
*/
|
||||||
while (end - start -
|
while (end - start - mem_hole_size(start, end) < size) {
|
||||||
memblock_x86_hole_size(start, end) < size) {
|
|
||||||
end += FAKE_NODE_MIN_SIZE;
|
end += FAKE_NODE_MIN_SIZE;
|
||||||
if (end > limit) {
|
if (end > limit) {
|
||||||
end = limit;
|
end = limit;
|
||||||
@@ -150,7 +159,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
|
|||||||
* this one must extend to the boundary.
|
* this one must extend to the boundary.
|
||||||
*/
|
*/
|
||||||
if (end < dma32_end && dma32_end - end -
|
if (end < dma32_end && dma32_end - end -
|
||||||
memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
|
mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
|
||||||
end = dma32_end;
|
end = dma32_end;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -158,8 +167,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
|
|||||||
* next node, this one must extend to the end of the
|
* next node, this one must extend to the end of the
|
||||||
* physical node.
|
* physical node.
|
||||||
*/
|
*/
|
||||||
if (limit - end -
|
if (limit - end - mem_hole_size(end, limit) < size)
|
||||||
memblock_x86_hole_size(end, limit) < size)
|
|
||||||
end = limit;
|
end = limit;
|
||||||
|
|
||||||
ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
|
ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
|
||||||
@@ -180,7 +188,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
|
|||||||
{
|
{
|
||||||
u64 end = start + size;
|
u64 end = start + size;
|
||||||
|
|
||||||
while (end - start - memblock_x86_hole_size(start, end) < size) {
|
while (end - start - mem_hole_size(start, end) < size) {
|
||||||
end += FAKE_NODE_MIN_SIZE;
|
end += FAKE_NODE_MIN_SIZE;
|
||||||
if (end > max_addr) {
|
if (end > max_addr) {
|
||||||
end = max_addr;
|
end = max_addr;
|
||||||
@@ -211,8 +219,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
|
|||||||
* creates a uniform distribution of node sizes across the entire
|
* creates a uniform distribution of node sizes across the entire
|
||||||
* machine (but not necessarily over physical nodes).
|
* machine (but not necessarily over physical nodes).
|
||||||
*/
|
*/
|
||||||
min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
|
min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES;
|
||||||
MAX_NUMNODES;
|
|
||||||
min_size = max(min_size, FAKE_NODE_MIN_SIZE);
|
min_size = max(min_size, FAKE_NODE_MIN_SIZE);
|
||||||
if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
|
if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
|
||||||
min_size = (min_size + FAKE_NODE_MIN_SIZE) &
|
min_size = (min_size + FAKE_NODE_MIN_SIZE) &
|
||||||
@@ -252,7 +259,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
|
|||||||
* this one must extend to the boundary.
|
* this one must extend to the boundary.
|
||||||
*/
|
*/
|
||||||
if (end < dma32_end && dma32_end - end -
|
if (end < dma32_end && dma32_end - end -
|
||||||
memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
|
mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
|
||||||
end = dma32_end;
|
end = dma32_end;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -260,8 +267,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
|
|||||||
* next node, this one must extend to the end of the
|
* next node, this one must extend to the end of the
|
||||||
* physical node.
|
* physical node.
|
||||||
*/
|
*/
|
||||||
if (limit - end -
|
if (limit - end - mem_hole_size(end, limit) < size)
|
||||||
memblock_x86_hole_size(end, limit) < size)
|
|
||||||
end = limit;
|
end = limit;
|
||||||
|
|
||||||
ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
|
ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
|
||||||
|
Reference in New Issue
Block a user