diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 7c5008892ac9c..9684a0d22d975 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -633,21 +633,48 @@ unsigned long arch_align_stack(unsigned long sp) return sp & ALMASK; } +static DEFINE_PER_CPU(struct call_single_data, backtrace_csd); +static struct cpumask backtrace_csd_busy; + static void arch_dump_stack(void *info) { struct pt_regs *regs; + static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; + arch_spin_lock(&lock); regs = get_irq_regs(); if (regs) show_regs(regs); else dump_stack(); + arch_spin_unlock(&lock); + + cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); } void arch_trigger_all_cpu_backtrace(bool include_self) { - smp_call_function(arch_dump_stack, NULL, 1); + struct call_single_data *csd; + int cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* + * If we previously sent an IPI to the target CPU & it hasn't + * cleared its bit in the busy cpumask then it didn't handle + * our previous IPI & it's not safe for us to reuse the + * call_single_data_t. + */ + if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { + pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", + cpu); + continue; + } + + csd = &per_cpu(backtrace_csd, cpu); + csd->func = arch_dump_stack; + smp_call_function_single_async(cpu, csd); + } } int mips_get_process_fp_mode(struct task_struct *task)