@@ -49,8 +49,7 @@ extern void *mcheckirq_ctx[NR_CPUS];
/*
* Per-cpu stacks for handling hard and soft interrupts.
*/
-extern void *hardirq_ctx[NR_CPUS];
-extern void *softirq_ctx[NR_CPUS];
+extern void *normirq_ctx[NR_CPUS];
void __do_IRQ(struct pt_regs *regs);
extern void __init init_IRQ(void);
@@ -683,17 +683,16 @@ void __do_irq(struct pt_regs *regs)
void __do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- void *cursp, *irqsp, *sirqsp;
+ void *cursp, *irqsp;
/* Switch to the irq stack to handle this */
cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
- irqsp = hardirq_ctx[raw_smp_processor_id()];
- sirqsp = softirq_ctx[raw_smp_processor_id()];
+ irqsp = normirq_ctx[raw_smp_processor_id()];
check_stack_overflow();
/* Already there ? */
- if (unlikely(cursp == irqsp || cursp == sirqsp)) {
+ if (unlikely(cursp == irqsp)) {
__do_irq(regs);
set_irq_regs(old_regs);
return;
@@ -719,10 +718,8 @@ static void __init vmap_irqstack_init(void)
{
int i;
- for_each_possible_cpu(i) {
- softirq_ctx[i] = alloc_vm_stack();
- hardirq_ctx[i] = alloc_vm_stack();
- }
+ for_each_possible_cpu(i)
+ normirq_ctx[i] = alloc_vm_stack();
}
@@ -744,12 +741,11 @@ void *dbgirq_ctx[NR_CPUS] __read_mostly;
void *mcheckirq_ctx[NR_CPUS] __read_mostly;
#endif
-void *softirq_ctx[NR_CPUS] __read_mostly;
-void *hardirq_ctx[NR_CPUS] __read_mostly;
+void *normirq_ctx[NR_CPUS] __read_mostly;
void do_softirq_own_stack(void)
{
- call_do_softirq(softirq_ctx[smp_processor_id()]);
+ call_do_softirq(normirq_ctx[smp_processor_id()]);
}
irq_hw_number_t virq_to_hw(unsigned int virq)
@@ -2089,11 +2089,7 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
unsigned long stack_page;
unsigned long cpu = task_cpu(p);
- stack_page = (unsigned long)hardirq_ctx[cpu];
- if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
- return 1;
-
- stack_page = (unsigned long)softirq_ctx[cpu];
+ stack_page = (unsigned long)normirq_ctx[cpu];
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
@@ -158,10 +158,8 @@ void __init irqstack_early_init(void)
/* interrupt stacks must be in lowmem, we get that for free on ppc32
* as the memblock is limited to lowmem by default */
- for_each_possible_cpu(i) {
- softirq_ctx[i] = alloc_stack();
- hardirq_ctx[i] = alloc_stack();
- }
+ for_each_possible_cpu(i)
+ normirq_ctx[i] = alloc_stack();
}
#ifdef CONFIG_VMAP_STACK
@@ -718,10 +718,8 @@ void __init irqstack_early_init(void)
* cannot afford to take SLB misses on them. They are not
* accessed in realmode.
*/
- for_each_possible_cpu(i) {
- softirq_ctx[i] = alloc_stack(limit, i);
- hardirq_ctx[i] = alloc_stack(limit, i);
- }
+ for_each_possible_cpu(i)
+ normirq_ctx[i] = alloc_stack(limit, i);
}
#ifdef CONFIG_PPC_BOOK3E
__do_IRQ() doesn't switch on hardirq stack if we are on softirq stack. do_softirq() bail out early without doing anything when already in an interrupt. invoke_softirq() is on task_stack when it calls do_softirq_own_stack(). So there are neither situation where we switch from hardirq stack to softirq stack nor from softirq stack to hardirq stack. It is therefore not necessary to have two stacks because they are never used at the same time. Merge both stacks into a new one called normirq_ctx. Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> --- arch/powerpc/include/asm/irq.h | 3 +-- arch/powerpc/kernel/irq.c | 18 +++++++----------- arch/powerpc/kernel/process.c | 6 +----- arch/powerpc/kernel/setup_32.c | 6 ++---- arch/powerpc/kernel/setup_64.c | 6 ++---- 5 files changed, 13 insertions(+), 26 deletions(-)