From: Anton Blanchard Another decent optimisation found in the ia64 port, dont take the irq descriptor lock or do the note_interrupt stuff on PER_CPU irqs (ie IPIs). --- arch/ppc64/kernel/irq.c | 18 ++++++++++++------ 1 files changed, 12 insertions(+), 6 deletions(-) diff -puN arch/ppc64/kernel/irq.c~ppc64-irq_per_cpu arch/ppc64/kernel/irq.c --- 25/arch/ppc64/kernel/irq.c~ppc64-irq_per_cpu 2004-02-21 20:58:30.000000000 -0800 +++ 25-akpm/arch/ppc64/kernel/irq.c 2004-02-21 20:58:30.000000000 -0800 @@ -476,8 +476,18 @@ void ppc_irq_dispatch_handler(struct pt_ struct irqaction *action; int cpu = smp_processor_id(); irq_desc_t *desc = irq_desc + irq; + irqreturn_t action_ret; kstat_cpu(cpu).irqs[irq]++; + + if (desc->status & IRQ_PER_CPU) { + /* no locking required for CPU-local interrupts: */ + ack_irq(irq); + action_ret = handle_irq_event(irq, regs, desc->action); + desc->handler->end(irq); + return; + } + spin_lock(&desc->lock); ack_irq(irq); /* @@ -485,8 +495,7 @@ void ppc_irq_dispatch_handler(struct pt_ WAITING is used by probe to mark irqs that are being tested */ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); - if (!(status & IRQ_PER_CPU)) - status |= IRQ_PENDING; /* we _want_ to handle it */ + status |= IRQ_PENDING; /* we _want_ to handle it */ /* * If the IRQ is disabled for whatever reason, we cannot @@ -509,8 +518,7 @@ void ppc_irq_dispatch_handler(struct pt_ goto out; } status &= ~IRQ_PENDING; /* we commit to handling */ - if (!(status & IRQ_PER_CPU)) - status |= IRQ_INPROGRESS; /* we are handling it */ + status |= IRQ_INPROGRESS; /* we are handling it */ } desc->status = status; @@ -534,8 +542,6 @@ void ppc_irq_dispatch_handler(struct pt_ * SMP environment. */ for (;;) { - irqreturn_t action_ret; - spin_unlock(&desc->lock); action_ret = handle_irq_event(irq, regs, action); spin_lock(&desc->lock); _