[PATCH] more for_each_cpu() conversions
When we stop allocating percpu memory for not-possible CPUs we must not touch the percpu data for not-possible CPUs at all. The correct way of doing this is to test cpu_possible() or to use for_each_cpu(). This patch is a kernel-wide sweep of all instances of NR_CPUS. I found very few instances of this bug, if any. But the patch converts lots of open-coded test to use the preferred helper macros. Cc: Mikael Starvik <starvik@axis.com> Cc: David Howells <dhowells@redhat.com> Acked-by: Kyle McMartin <kyle@parisc-linux.org> Cc: Anton Blanchard <anton@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: Andi Kleen <ak@muc.de> Cc: Christian Zankel <chris@zankel.net> Cc: Philippe Elie <phil.el@wanadoo.fr> Cc: Nathan Scott <nathans@sgi.com> Cc: Jens Axboe <axboe@suse.de> Cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
63872f87a1
commit
394e3902c5
@ -351,8 +351,8 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
|
||||
{
|
||||
int i, j;
|
||||
Dprintk("Rotating IRQs among CPUs.\n");
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) {
|
||||
for_each_online_cpu(i) {
|
||||
for (j = 0; j < NR_IRQS; j++) {
|
||||
if (!irq_desc[j].action)
|
||||
continue;
|
||||
/* Is it a significant load ? */
|
||||
@ -381,7 +381,7 @@ static void do_irq_balance(void)
|
||||
unsigned long imbalance = 0;
|
||||
cpumask_t allowed_mask, target_cpu_mask, tmp;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for_each_cpu(i) {
|
||||
int package_index;
|
||||
CPU_IRQ(i) = 0;
|
||||
if (!cpu_online(i))
|
||||
@ -422,9 +422,7 @@ static void do_irq_balance(void)
|
||||
}
|
||||
}
|
||||
/* Find the least loaded processor package */
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (!cpu_online(i))
|
||||
continue;
|
||||
for_each_online_cpu(i) {
|
||||
if (i != CPU_TO_PACKAGEINDEX(i))
|
||||
continue;
|
||||
if (min_cpu_irq > CPU_IRQ(i)) {
|
||||
@ -441,9 +439,7 @@ tryanothercpu:
|
||||
*/
|
||||
tmp_cpu_irq = 0;
|
||||
tmp_loaded = -1;
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (!cpu_online(i))
|
||||
continue;
|
||||
for_each_online_cpu(i) {
|
||||
if (i != CPU_TO_PACKAGEINDEX(i))
|
||||
continue;
|
||||
if (max_cpu_irq <= CPU_IRQ(i))
|
||||
@ -619,9 +615,7 @@ static int __init balanced_irq_init(void)
|
||||
if (smp_num_siblings > 1 && !cpus_empty(tmp))
|
||||
physical_balance = 1;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (!cpu_online(i))
|
||||
continue;
|
||||
for_each_online_cpu(i) {
|
||||
irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
|
||||
irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
|
||||
if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
|
||||
@ -638,9 +632,11 @@ static int __init balanced_irq_init(void)
|
||||
else
|
||||
printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
|
||||
failed:
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for_each_cpu(i) {
|
||||
kfree(irq_cpu_data[i].irq_delta);
|
||||
irq_cpu_data[i].irq_delta = NULL;
|
||||
kfree(irq_cpu_data[i].last_irq);
|
||||
irq_cpu_data[i].last_irq = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user