x86/irq: change irq_desc_alloc() to take node instead of cpu
This simplifies the node awareness of the code. All our allocators only deal with a NUMA node ID locality not with CPU ids anyway - so there's no need to maintain (and transform) a CPU id all across the IRq layer. v2: keep move_irq_desc related [ Impact: cleanup, prepare IRQ code to be NUMA-aware ] Signed-off-by: Yinghai Lu <yinghai@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jeremy Fitzhardinge <jeremy@goop.org> LKML-Reference: <49F65536.2020300@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@ -187,7 +187,7 @@ struct irq_desc {
|
||||
spinlock_t lock;
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_var_t affinity;
|
||||
unsigned int cpu;
|
||||
unsigned int node;
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_var_t pending_mask;
|
||||
#endif
|
||||
@ -201,16 +201,16 @@ struct irq_desc {
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
|
||||
struct irq_desc *desc, int cpu);
|
||||
struct irq_desc *desc, int node);
|
||||
extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
|
||||
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
extern struct irq_desc irq_desc[NR_IRQS];
|
||||
#else /* CONFIG_SPARSE_IRQ */
|
||||
extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
|
||||
extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node);
|
||||
#endif /* CONFIG_SPARSE_IRQ */
|
||||
|
||||
extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
|
||||
extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
|
||||
|
||||
/*
|
||||
* Migration helpers for obsolete names, they will go away:
|
||||
@ -422,12 +422,10 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
|
||||
* Allocates affinity and pending_mask cpumask if required.
|
||||
* Returns true if successful (or not required).
|
||||
*/
|
||||
static inline bool alloc_desc_masks(struct irq_desc *desc, int cpu,
|
||||
static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
|
||||
bool boot)
|
||||
{
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
int node;
|
||||
|
||||
if (boot) {
|
||||
alloc_bootmem_cpumask_var(&desc->affinity);
|
||||
|
||||
@ -437,8 +435,6 @@ static inline bool alloc_desc_masks(struct irq_desc *desc, int cpu,
|
||||
return true;
|
||||
}
|
||||
|
||||
node = cpu_to_node(cpu);
|
||||
|
||||
if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
|
||||
return false;
|
||||
|
||||
@ -494,7 +490,7 @@ static inline void free_desc_masks(struct irq_desc *old_desc,
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
|
||||
static inline bool alloc_desc_masks(struct irq_desc *desc, int cpu,
|
||||
static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
|
||||
bool boot)
|
||||
{
|
||||
return true;
|
||||
|
Reference in New Issue
Block a user