cpumask: IA64: Introduce cpumask_of_{node,pcibus} to replace {node,pcibus}_to_cpumask
Impact: New APIs The old node_to_cpumask/node_to_pcibus returned a cpumask_t: these return a pointer to a struct cpumask. Part of removing cpumasks from the stack. We can also use the new for_each_cpu_and() to avoid a temporary cpumask, and a gratuitous test in sn_topology_show. (Includes fix from KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>) Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Cc: Tony Luck <tony.luck@intel.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
This commit is contained in:
@ -1001,7 +1001,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
|
||||
node = pxm_to_node(pxm);
|
||||
|
||||
if (node >= MAX_NUMNODES || !node_online(node) ||
|
||||
cpus_empty(node_to_cpumask(node)))
|
||||
cpumask_empty(cpumask_of_node(node)))
|
||||
return AE_OK;
|
||||
|
||||
/* We know a gsi to node mapping! */
|
||||
|
@ -695,32 +695,31 @@ get_target_cpu (unsigned int gsi, int irq)
|
||||
#ifdef CONFIG_NUMA
|
||||
{
|
||||
int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
|
||||
cpumask_t cpu_mask;
|
||||
const struct cpumask *cpu_mask;
|
||||
|
||||
iosapic_index = find_iosapic(gsi);
|
||||
if (iosapic_index < 0 ||
|
||||
iosapic_lists[iosapic_index].node == MAX_NUMNODES)
|
||||
goto skip_numa_setup;
|
||||
|
||||
cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
|
||||
cpus_and(cpu_mask, cpu_mask, domain);
|
||||
for_each_cpu_mask(numa_cpu, cpu_mask) {
|
||||
if (!cpu_online(numa_cpu))
|
||||
cpu_clear(numa_cpu, cpu_mask);
|
||||
cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node);
|
||||
num_cpus = 0;
|
||||
for_each_cpu_and(numa_cpu, cpu_mask, &domain) {
|
||||
if (cpu_online(numa_cpu))
|
||||
num_cpus++;
|
||||
}
|
||||
|
||||
num_cpus = cpus_weight(cpu_mask);
|
||||
|
||||
if (!num_cpus)
|
||||
goto skip_numa_setup;
|
||||
|
||||
/* Use irq assignment to distribute across cpus in node */
|
||||
cpu_index = irq % num_cpus;
|
||||
|
||||
for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++)
|
||||
numa_cpu = next_cpu(numa_cpu, cpu_mask);
|
||||
for_each_cpu_and(numa_cpu, cpu_mask, &domain)
|
||||
if (cpu_online(numa_cpu) && i++ >= cpu_index)
|
||||
break;
|
||||
|
||||
if (numa_cpu != NR_CPUS)
|
||||
if (numa_cpu < nr_cpu_ids)
|
||||
return cpu_physical_id(numa_cpu);
|
||||
}
|
||||
skip_numa_setup:
|
||||
@ -731,7 +730,7 @@ skip_numa_setup:
|
||||
* case of NUMA.)
|
||||
*/
|
||||
do {
|
||||
if (++cpu >= NR_CPUS)
|
||||
if (++cpu >= nr_cpu_ids)
|
||||
cpu = 0;
|
||||
} while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
|
||||
|
||||
|
Reference in New Issue
Block a user