Merge branch 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (66 commits) x86: export vector_used_by_percpu_irq x86: use logical apicid in x2apic_cluster's x2apic_cpu_mask_to_apicid_and() sched: nominate preferred wakeup cpu, fix x86: fix lguest used_vectors breakage, -v2 x86: fix warning in arch/x86/kernel/io_apic.c sched: fix warning in kernel/sched.c sched: move test_sd_parent() to an SMP section of sched.h sched: add SD_BALANCE_NEWIDLE at MC and CPU level for sched_mc>0 sched: activate active load balancing in new idle cpus sched: bias task wakeups to preferred semi-idle packages sched: nominate preferred wakeup cpu sched: favour lower logical cpu number for sched_mc balance sched: framework for sched_mc/smt_power_savings=N sched: convert BALANCE_FOR_xx_POWER to inline functions x86: use possible_cpus=NUM to extend the possible cpus allowed x86: fix cpu_mask_to_apicid_and to include cpu_online_mask x86: update io_apic.c to the new cpumask code x86: Introduce topology_core_cpumask()/topology_thread_cpumask() x86: xen: use smp_call_function_many() x86: use work_on_cpu in x86/kernel/cpu/mcheck/mce_amd_64.c ... Fixed up trivial conflict in kernel/time/tick-sched.c manually
This commit is contained in:
@ -312,7 +312,8 @@ static void sun4u_irq_enable(unsigned int virt_irq)
|
||||
}
|
||||
}
|
||||
|
||||
static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask)
|
||||
static void sun4u_set_affinity(unsigned int virt_irq,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
sun4u_irq_enable(virt_irq);
|
||||
}
|
||||
@ -362,7 +363,8 @@ static void sun4v_irq_enable(unsigned int virt_irq)
|
||||
ino, err);
|
||||
}
|
||||
|
||||
static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask)
|
||||
static void sun4v_set_affinity(unsigned int virt_irq,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
|
||||
unsigned long cpuid = irq_choose_cpu(virt_irq);
|
||||
@ -429,7 +431,8 @@ static void sun4v_virq_enable(unsigned int virt_irq)
|
||||
dev_handle, dev_ino, err);
|
||||
}
|
||||
|
||||
static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
|
||||
static void sun4v_virt_set_affinity(unsigned int virt_irq,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
unsigned long cpuid, dev_handle, dev_ino;
|
||||
int err;
|
||||
@ -851,7 +854,7 @@ void fixup_irqs(void)
|
||||
!(irq_desc[irq].status & IRQ_PER_CPU)) {
|
||||
if (irq_desc[irq].chip->set_affinity)
|
||||
irq_desc[irq].chip->set_affinity(irq,
|
||||
irq_desc[irq].affinity);
|
||||
&irq_desc[irq].affinity);
|
||||
}
|
||||
spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
|
||||
}
|
||||
|
@ -780,7 +780,7 @@ out:
|
||||
if (nid != -1) {
|
||||
cpumask_t numa_mask = node_to_cpumask(nid);
|
||||
|
||||
irq_set_affinity(irq, numa_mask);
|
||||
irq_set_affinity(irq, &numa_mask);
|
||||
}
|
||||
|
||||
return irq;
|
||||
|
@ -288,7 +288,7 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
|
||||
if (nid != -1) {
|
||||
cpumask_t numa_mask = node_to_cpumask(nid);
|
||||
|
||||
irq_set_affinity(irq, numa_mask);
|
||||
irq_set_affinity(irq, &numa_mask);
|
||||
}
|
||||
err = request_irq(irq, sparc64_msiq_interrupt, 0,
|
||||
"MSIQ",
|
||||
|
@ -39,8 +39,6 @@ volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
|
||||
unsigned char boot_cpu_id = 0;
|
||||
unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
|
||||
|
||||
cpumask_t cpu_online_map = CPU_MASK_NONE;
|
||||
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
|
||||
cpumask_t smp_commenced_mask = CPU_MASK_NONE;
|
||||
|
||||
/* The only guaranteed locking primitive available on all Sparc
|
||||
@ -334,7 +332,7 @@ void __init smp_setup_cpu_possible_map(void)
|
||||
instance = 0;
|
||||
while (!cpu_find_by_instance(instance, NULL, &mid)) {
|
||||
if (mid < NR_CPUS) {
|
||||
cpu_set(mid, phys_cpu_present_map);
|
||||
cpu_set(mid, cpu_possible_map);
|
||||
cpu_set(mid, cpu_present_map);
|
||||
}
|
||||
instance++;
|
||||
@ -354,7 +352,7 @@ void __init smp_prepare_boot_cpu(void)
|
||||
|
||||
current_thread_info()->cpu = cpuid;
|
||||
cpu_set(cpuid, cpu_online_map);
|
||||
cpu_set(cpuid, phys_cpu_present_map);
|
||||
cpu_set(cpuid, cpu_possible_map);
|
||||
}
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
|
@ -49,14 +49,10 @@
|
||||
|
||||
int sparc64_multi_core __read_mostly;
|
||||
|
||||
cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
|
||||
cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
|
||||
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
|
||||
cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
|
||||
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
|
||||
|
||||
EXPORT_SYMBOL(cpu_possible_map);
|
||||
EXPORT_SYMBOL(cpu_online_map);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
|
||||
EXPORT_SYMBOL(cpu_core_map);
|
||||
|
||||
|
@ -112,10 +112,6 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data);
|
||||
#ifdef CONFIG_SMP
|
||||
/* IRQ implementation. */
|
||||
EXPORT_SYMBOL(synchronize_irq);
|
||||
|
||||
/* CPU online map and active count. */
|
||||
EXPORT_SYMBOL(cpu_online_map);
|
||||
EXPORT_SYMBOL(phys_cpu_present_map);
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL(__udelay);
|
||||
|
@ -763,7 +763,7 @@ void __devinit setup_sparc64_timer(void)
|
||||
sevt = &__get_cpu_var(sparc64_events);
|
||||
|
||||
memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
|
||||
sevt->cpumask = cpumask_of_cpu(smp_processor_id());
|
||||
sevt->cpumask = cpumask_of(smp_processor_id());
|
||||
|
||||
clockevents_register_device(sevt);
|
||||
}
|
||||
|
Reference in New Issue
Block a user