sched, numa: replace MAX_NUMNODES with nr_node_ids in kernel/sched.c
* Replace usages of MAX_NUMNODES with nr_node_ids in kernel/sched.c, where appropriate. This saves some allocated space as well as many wasted cycles going through node entries that are non-existent. Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -6538,9 +6538,9 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
|
|||||||
|
|
||||||
min_val = INT_MAX;
|
min_val = INT_MAX;
|
||||||
|
|
||||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
for (i = 0; i < nr_node_ids; i++) {
|
||||||
/* Start at @node */
|
/* Start at @node */
|
||||||
n = (node + i) % MAX_NUMNODES;
|
n = (node + i) % nr_node_ids;
|
||||||
|
|
||||||
if (!nr_cpus_node(n))
|
if (!nr_cpus_node(n))
|
||||||
continue;
|
continue;
|
||||||
@@ -6734,7 +6734,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
|
|||||||
if (!sched_group_nodes)
|
if (!sched_group_nodes)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
for (i = 0; i < nr_node_ids; i++) {
|
||||||
struct sched_group *oldsg, *sg = sched_group_nodes[i];
|
struct sched_group *oldsg, *sg = sched_group_nodes[i];
|
||||||
|
|
||||||
*nodemask = node_to_cpumask(i);
|
*nodemask = node_to_cpumask(i);
|
||||||
@@ -6927,7 +6927,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
|||||||
/*
|
/*
|
||||||
* Allocate the per-node list of sched groups
|
* Allocate the per-node list of sched groups
|
||||||
*/
|
*/
|
||||||
sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
|
sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!sched_group_nodes) {
|
if (!sched_group_nodes) {
|
||||||
printk(KERN_WARNING "Can not alloc sched group node list\n");
|
printk(KERN_WARNING "Can not alloc sched group node list\n");
|
||||||
@@ -7066,7 +7066,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Set up physical groups */
|
/* Set up physical groups */
|
||||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
for (i = 0; i < nr_node_ids; i++) {
|
||||||
SCHED_CPUMASK_VAR(nodemask, allmasks);
|
SCHED_CPUMASK_VAR(nodemask, allmasks);
|
||||||
SCHED_CPUMASK_VAR(send_covered, allmasks);
|
SCHED_CPUMASK_VAR(send_covered, allmasks);
|
||||||
|
|
||||||
@@ -7090,7 +7090,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
|||||||
send_covered, tmpmask);
|
send_covered, tmpmask);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
for (i = 0; i < nr_node_ids; i++) {
|
||||||
/* Set up node groups */
|
/* Set up node groups */
|
||||||
struct sched_group *sg, *prev;
|
struct sched_group *sg, *prev;
|
||||||
SCHED_CPUMASK_VAR(nodemask, allmasks);
|
SCHED_CPUMASK_VAR(nodemask, allmasks);
|
||||||
@@ -7129,9 +7129,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
|||||||
cpus_or(*covered, *covered, *nodemask);
|
cpus_or(*covered, *covered, *nodemask);
|
||||||
prev = sg;
|
prev = sg;
|
||||||
|
|
||||||
for (j = 0; j < MAX_NUMNODES; j++) {
|
for (j = 0; j < nr_node_ids; j++) {
|
||||||
SCHED_CPUMASK_VAR(notcovered, allmasks);
|
SCHED_CPUMASK_VAR(notcovered, allmasks);
|
||||||
int n = (i + j) % MAX_NUMNODES;
|
int n = (i + j) % nr_node_ids;
|
||||||
node_to_cpumask_ptr(pnodemask, n);
|
node_to_cpumask_ptr(pnodemask, n);
|
||||||
|
|
||||||
cpus_complement(*notcovered, *covered);
|
cpus_complement(*notcovered, *covered);
|
||||||
@@ -7184,7 +7184,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
for (i = 0; i < MAX_NUMNODES; i++)
|
for (i = 0; i < nr_node_ids; i++)
|
||||||
init_numa_sched_groups_power(sched_group_nodes[i]);
|
init_numa_sched_groups_power(sched_group_nodes[i]);
|
||||||
|
|
||||||
if (sd_allnodes) {
|
if (sd_allnodes) {
|
||||||
|
Reference in New Issue
Block a user