cpumask: use new cpumask functions throughout x86
Impact: cleanup 1) &cpu_online_map -> cpu_online_mask 2) first_cpu/next_cpu_nr -> cpumask_first/cpumask_next 3) cpu_*_map manipulation -> init_cpu_* / set_cpu_* Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
@@ -174,11 +174,11 @@ static inline int early_cpu_to_node(int cpu)
|
|||||||
|
|
||||||
static inline const cpumask_t *cpumask_of_node(int node)
|
static inline const cpumask_t *cpumask_of_node(int node)
|
||||||
{
|
{
|
||||||
return &cpu_online_map;
|
return cpu_online_mask;
|
||||||
}
|
}
|
||||||
static inline int node_to_first_cpu(int node)
|
static inline int node_to_first_cpu(int node)
|
||||||
{
|
{
|
||||||
return first_cpu(cpu_online_map);
|
return cpumask_first(cpu_online_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void setup_node_to_cpumask_map(void) { }
|
static inline void setup_node_to_cpumask_map(void) { }
|
||||||
|
@@ -26,12 +26,12 @@ static int bigsmp_apic_id_registered(void)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const cpumask_t *bigsmp_target_cpus(void)
|
static const struct cpumask *bigsmp_target_cpus(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
return &cpu_online_map;
|
return cpu_online_mask;
|
||||||
#else
|
#else
|
||||||
return &cpumask_of_cpu(0);
|
return cpumask_of(0);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -118,9 +118,9 @@ static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* As we are using single CPU as destination, pick only one CPU here */
|
/* As we are using single CPU as destination, pick only one CPU here */
|
||||||
static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
|
static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||||
{
|
{
|
||||||
return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
|
return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask));
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||||
@@ -188,10 +188,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
|
|||||||
{ } /* NULL entry stops DMI scanning */
|
{ } /* NULL entry stops DMI scanning */
|
||||||
};
|
};
|
||||||
|
|
||||||
static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||||
{
|
{
|
||||||
cpus_clear(*retmask);
|
cpumask_clear(retmask);
|
||||||
cpu_set(cpu, *retmask);
|
cpumask_set_cpu(cpu, retmask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int probe_bigsmp(void)
|
static int probe_bigsmp(void)
|
||||||
|
@@ -460,9 +460,9 @@ static const cpumask_t *target_cpus_cluster(void)
|
|||||||
return cpu_all_mask;
|
return cpu_all_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const cpumask_t *es7000_target_cpus(void)
|
static const struct cpumask *es7000_target_cpus(void)
|
||||||
{
|
{
|
||||||
return &cpumask_of_cpu(smp_processor_id());
|
return cpumask_of(smp_processor_id());
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long
|
static unsigned long
|
||||||
@@ -517,7 +517,7 @@ static void es7000_setup_apic_routing(void)
|
|||||||
"Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
|
"Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
|
||||||
(apic_version[apic] == 0x14) ?
|
(apic_version[apic] == 0x14) ?
|
||||||
"Physical Cluster" : "Logical Cluster",
|
"Physical Cluster" : "Logical Cluster",
|
||||||
nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
|
nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int es7000_apicid_to_node(int logical_apicid)
|
static int es7000_apicid_to_node(int logical_apicid)
|
||||||
|
@@ -192,7 +192,7 @@ static const cpumask_t *summit_target_cpus(void)
|
|||||||
* dest_LowestPrio mode logical clustered apic interrupt routing
|
* dest_LowestPrio mode logical clustered apic interrupt routing
|
||||||
* Just start on cpu 0. IRQ balancing will spread load
|
* Just start on cpu 0. IRQ balancing will spread load
|
||||||
*/
|
*/
|
||||||
return &cpumask_of_cpu(0);
|
return cpumask_of(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid)
|
static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||||
|
@@ -249,7 +249,7 @@ void cmci_rediscover(int dying)
|
|||||||
for_each_online_cpu (cpu) {
|
for_each_online_cpu (cpu) {
|
||||||
if (cpu == dying)
|
if (cpu == dying)
|
||||||
continue;
|
continue;
|
||||||
if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)))
|
if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
|
||||||
continue;
|
continue;
|
||||||
/* Recheck banks in case CPUs don't all have the same */
|
/* Recheck banks in case CPUs don't all have the same */
|
||||||
if (cmci_supported(&banks))
|
if (cmci_supported(&banks))
|
||||||
|
@@ -143,9 +143,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||||||
static void *c_start(struct seq_file *m, loff_t *pos)
|
static void *c_start(struct seq_file *m, loff_t *pos)
|
||||||
{
|
{
|
||||||
if (*pos == 0) /* just in case, cpu 0 is not the first */
|
if (*pos == 0) /* just in case, cpu 0 is not the first */
|
||||||
*pos = first_cpu(cpu_online_map);
|
*pos = cpumask_first(cpu_online_mask);
|
||||||
else
|
else
|
||||||
*pos = next_cpu_nr(*pos - 1, cpu_online_map);
|
*pos = cpumask_next(*pos - 1, cpu_online_mask);
|
||||||
if ((*pos) < nr_cpu_ids)
|
if ((*pos) < nr_cpu_ids)
|
||||||
return &cpu_data(*pos);
|
return &cpu_data(*pos);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@@ -324,7 +324,7 @@ void stop_this_cpu(void *dummy)
|
|||||||
/*
|
/*
|
||||||
* Remove this CPU:
|
* Remove this CPU:
|
||||||
*/
|
*/
|
||||||
cpu_clear(smp_processor_id(), cpu_online_map);
|
set_cpu_online(smp_processor_id(), false);
|
||||||
disable_local_APIC();
|
disable_local_APIC();
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
@@ -296,7 +296,7 @@ notrace static void __cpuinit start_secondary(void *unused)
|
|||||||
__flush_tlb_all();
|
__flush_tlb_all();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* This must be done before setting cpu_online_map */
|
/* This must be done before setting cpu_online_mask */
|
||||||
set_cpu_sibling_map(raw_smp_processor_id());
|
set_cpu_sibling_map(raw_smp_processor_id());
|
||||||
wmb();
|
wmb();
|
||||||
|
|
||||||
@@ -904,9 +904,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
|
|||||||
*/
|
*/
|
||||||
static __init void disable_smp(void)
|
static __init void disable_smp(void)
|
||||||
{
|
{
|
||||||
/* use the read/write pointers to the present and possible maps */
|
init_cpu_present(cpumask_of(0));
|
||||||
cpumask_copy(&cpu_present_map, cpumask_of(0));
|
init_cpu_possible(cpumask_of(0));
|
||||||
cpumask_copy(&cpu_possible_map, cpumask_of(0));
|
|
||||||
smpboot_clear_io_apic_irqs();
|
smpboot_clear_io_apic_irqs();
|
||||||
|
|
||||||
if (smp_found_config)
|
if (smp_found_config)
|
||||||
@@ -1149,11 +1148,11 @@ early_param("possible_cpus", _setup_possible_cpus);
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cpu_possible_map should be static, it cannot change as cpu's
|
* cpu_possible_mask should be static, it cannot change as cpu's
|
||||||
* are onlined, or offlined. The reason is per-cpu data-structures
|
* are onlined, or offlined. The reason is per-cpu data-structures
|
||||||
* are allocated by some modules at init time, and dont expect to
|
* are allocated by some modules at init time, and dont expect to
|
||||||
* do this dynamically on cpu arrival/departure.
|
* do this dynamically on cpu arrival/departure.
|
||||||
* cpu_present_map on the other hand can change dynamically.
|
* cpu_present_mask on the other hand can change dynamically.
|
||||||
* In case when cpu_hotplug is not compiled, then we resort to current
|
* In case when cpu_hotplug is not compiled, then we resort to current
|
||||||
* behaviour, which is cpu_possible == cpu_present.
|
* behaviour, which is cpu_possible == cpu_present.
|
||||||
* - Ashok Raj
|
* - Ashok Raj
|
||||||
|
@@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void)
|
|||||||
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
|
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
|
||||||
if (rc >= 0) {
|
if (rc >= 0) {
|
||||||
num_processors++;
|
num_processors++;
|
||||||
cpu_set(i, cpu_possible_map);
|
set_cpu_possible(i, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -197,7 +197,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
|
|||||||
while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
|
while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
|
||||||
for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
|
for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
|
||||||
continue;
|
continue;
|
||||||
cpu_clear(cpu, cpu_possible_map);
|
set_cpu_possible(cpu, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_possible_cpu (cpu) {
|
for_each_possible_cpu (cpu) {
|
||||||
@@ -210,7 +210,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
|
|||||||
if (IS_ERR(idle))
|
if (IS_ERR(idle))
|
||||||
panic("failed fork for CPU %d", cpu);
|
panic("failed fork for CPU %d", cpu);
|
||||||
|
|
||||||
cpu_set(cpu, cpu_present_map);
|
set_cpu_present(cpu, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user