x86-64, NUMA: consolidate and improve memblk sanity checks
memblk sanity check was scattered around and incomplete. Consolidate and improve. * Confliction detection and cutoff_node() logic are moved to numa_cleanup_meminfo(). * numa_cleanup_meminfo() clears the unused memblks before returning. * Check and warn about invalid input parameters in numa_add_memblk(). * Check the maximum number of memblk isn't exceeded in numa_add_memblk(). * numa_cleanup_meminfo() is now called before numa_emulation() so that the emulation code also uses the cleaned up version. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Brian Gerst <brgerst@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Shaohui Zheng <shaohui.zheng@intel.com> Cc: David Rientjes <rientjes@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
@@ -189,37 +189,23 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init int conflicting_memblks(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
struct numa_meminfo *mi = &numa_meminfo;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < mi->nr_blks; i++) {
|
|
||||||
struct numa_memblk *blk = &mi->blk[i];
|
|
||||||
|
|
||||||
if (blk->start == blk->end)
|
|
||||||
continue;
|
|
||||||
if (blk->end > start && blk->start < end)
|
|
||||||
return blk->nid;
|
|
||||||
if (blk->end == end && blk->start == start)
|
|
||||||
return blk->nid;
|
|
||||||
}
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __init numa_add_memblk(int nid, u64 start, u64 end)
|
int __init numa_add_memblk(int nid, u64 start, u64 end)
|
||||||
{
|
{
|
||||||
struct numa_meminfo *mi = &numa_meminfo;
|
struct numa_meminfo *mi = &numa_meminfo;
|
||||||
int i;
|
|
||||||
|
|
||||||
i = conflicting_memblks(start, end);
|
/* ignore zero length blks */
|
||||||
if (i == nid) {
|
if (start == end)
|
||||||
printk(KERN_WARNING "NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
|
return 0;
|
||||||
nid, start, end, numa_nodes[i].start, numa_nodes[i].end);
|
|
||||||
} else if (i >= 0) {
|
/* whine about and ignore invalid blks */
|
||||||
printk(KERN_ERR "NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
|
if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
|
||||||
nid, start, end, i,
|
pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
|
||||||
numa_nodes[i].start, numa_nodes[i].end);
|
nid, start, end);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mi->nr_blks >= NR_NODE_MEMBLKS) {
|
||||||
|
pr_err("NUMA: too many memblk ranges\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -237,22 +223,6 @@ static void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
|
|||||||
(mi->nr_blks - idx) * sizeof(mi->blk[0]));
|
(mi->nr_blks - idx) * sizeof(mi->blk[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init void cutoff_node(int i, unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
struct bootnode *nd = &numa_nodes[i];
|
|
||||||
|
|
||||||
if (nd->start < start) {
|
|
||||||
nd->start = start;
|
|
||||||
if (nd->end < nd->start)
|
|
||||||
nd->start = nd->end;
|
|
||||||
}
|
|
||||||
if (nd->end > end) {
|
|
||||||
nd->end = end;
|
|
||||||
if (nd->start > nd->end)
|
|
||||||
nd->start = nd->end;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialize bootmem allocator for a node */
|
/* Initialize bootmem allocator for a node */
|
||||||
void __init
|
void __init
|
||||||
setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
|
setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
|
||||||
@@ -301,15 +271,44 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
|
|||||||
|
|
||||||
static int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
|
static int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
|
||||||
{
|
{
|
||||||
|
const u64 low = 0;
|
||||||
|
const u64 high = (u64)max_pfn << PAGE_SHIFT;
|
||||||
int i, j, k;
|
int i, j, k;
|
||||||
|
|
||||||
for (i = 0; i < mi->nr_blks; i++) {
|
for (i = 0; i < mi->nr_blks; i++) {
|
||||||
struct numa_memblk *bi = &mi->blk[i];
|
struct numa_memblk *bi = &mi->blk[i];
|
||||||
|
|
||||||
|
/* make sure all blocks are inside the limits */
|
||||||
|
bi->start = max(bi->start, low);
|
||||||
|
bi->end = min(bi->end, high);
|
||||||
|
|
||||||
|
/* and there's no empty block */
|
||||||
|
if (bi->start == bi->end) {
|
||||||
|
numa_remove_memblk_from(i--, mi);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
for (j = i + 1; j < mi->nr_blks; j++) {
|
for (j = i + 1; j < mi->nr_blks; j++) {
|
||||||
struct numa_memblk *bj = &mi->blk[j];
|
struct numa_memblk *bj = &mi->blk[j];
|
||||||
unsigned long start, end;
|
unsigned long start, end;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* See whether there are overlapping blocks. Whine
|
||||||
|
* about but allow overlaps of the same nid. They
|
||||||
|
* will be merged below.
|
||||||
|
*/
|
||||||
|
if (bi->end > bj->start && bi->start < bj->end) {
|
||||||
|
if (bi->nid != bj->nid) {
|
||||||
|
pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
|
||||||
|
bi->nid, bi->start, bi->end,
|
||||||
|
bj->nid, bj->start, bj->end);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
|
||||||
|
bi->nid, bi->start, bi->end,
|
||||||
|
bj->start, bj->end);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Join together blocks on the same node, holes
|
* Join together blocks on the same node, holes
|
||||||
* between which don't overlap with memory on other
|
* between which don't overlap with memory on other
|
||||||
@@ -317,8 +316,8 @@ static int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
|
|||||||
*/
|
*/
|
||||||
if (bi->nid != bj->nid)
|
if (bi->nid != bj->nid)
|
||||||
continue;
|
continue;
|
||||||
start = min(bi->start, bj->start);
|
start = max(min(bi->start, bj->start), low);
|
||||||
end = max(bi->end, bj->end);
|
end = min(max(bi->end, bj->end), high);
|
||||||
for (k = 0; k < mi->nr_blks; k++) {
|
for (k = 0; k < mi->nr_blks; k++) {
|
||||||
struct numa_memblk *bk = &mi->blk[k];
|
struct numa_memblk *bk = &mi->blk[k];
|
||||||
|
|
||||||
@@ -338,6 +337,11 @@ static int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
|
||||||
|
mi->blk[i].start = mi->blk[i].end = 0;
|
||||||
|
mi->blk[i].nid = NUMA_NO_NODE;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -824,10 +828,8 @@ void __init initmem_init(void)
|
|||||||
if (numa_init[i]() < 0)
|
if (numa_init[i]() < 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* clean up the node list */
|
if (numa_cleanup_meminfo(&numa_meminfo) < 0)
|
||||||
for (j = 0; j < MAX_NUMNODES; j++)
|
continue;
|
||||||
cutoff_node(j, 0, max_pfn << PAGE_SHIFT);
|
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA_EMU
|
#ifdef CONFIG_NUMA_EMU
|
||||||
setup_physnodes(0, max_pfn << PAGE_SHIFT);
|
setup_physnodes(0, max_pfn << PAGE_SHIFT);
|
||||||
if (cmdline && !numa_emulation(0, max_pfn, i == 0, i == 1))
|
if (cmdline && !numa_emulation(0, max_pfn, i == 0, i == 1))
|
||||||
@@ -836,9 +838,6 @@ void __init initmem_init(void)
|
|||||||
nodes_clear(node_possible_map);
|
nodes_clear(node_possible_map);
|
||||||
nodes_clear(node_online_map);
|
nodes_clear(node_online_map);
|
||||||
#endif
|
#endif
|
||||||
if (numa_cleanup_meminfo(&numa_meminfo) < 0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (numa_register_memblks(&numa_meminfo) < 0)
|
if (numa_register_memblks(&numa_meminfo) < 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user