Subject: printk: rename printk cpulock API and always disable interrupts From: John Ogness Date: Thu Jul 15 09:34:45 2021 +0206 From: John Ogness The printk cpulock functions use local_irq_disable(). This means that hardware interrupts are also disabled on PREEMPT_RT. To make this clear, rename the functions to use the raw_ prefix: raw_printk_cpu_lock_irqsave(flags); raw_printk_cpu_unlock_irqrestore(flags); Also, these functions were a NOP for !CONFIG_SMP. But for !CONFIG_SMP they still need to disable hardware interrupts. So modify them appropriately for this. Signed-off-by: John Ogness Signed-off-by: Thomas Gleixner --- include/linux/printk.h | 30 ++++++++++++++---------------- lib/dump_stack.c | 4 ++-- lib/nmi_backtrace.c | 4 ++-- 3 files changed, 18 insertions(+), 20 deletions(-) --- --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -280,17 +280,22 @@ static inline void dump_stack(void) extern int __printk_cpu_trylock(void); extern void __printk_wait_on_cpu_lock(void); extern void __printk_cpu_unlock(void); +#else +#define __printk_cpu_trylock() 1 +#define __printk_wait_on_cpu_lock() +#define __printk_cpu_unlock() +#endif /* CONFIG_SMP */ /** - * printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning - * lock and disable interrupts. + * raw_printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning + * lock and disable interrupts. * @flags: Stack-allocated storage for saving local interrupt state, - * to be passed to printk_cpu_unlock_irqrestore(). + * to be passed to raw_printk_cpu_unlock_irqrestore(). * * If the lock is owned by another CPU, spin until it becomes available. * Interrupts are restored while spinning. */ -#define printk_cpu_lock_irqsave(flags) \ +#define raw_printk_cpu_lock_irqsave(flags) \ for (;;) { \ local_irq_save(flags); \ if (__printk_cpu_trylock()) \ @@ -300,22 +305,15 @@ extern void __printk_cpu_unlock(void); } /** - * printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning - * lock and restore interrupts. - * @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave(). + * raw_printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant + * spinning lock and restore interrupts. + * @flags: Caller's saved interrupt state from raw_printk_cpu_lock_irqsave(). */ -#define printk_cpu_unlock_irqrestore(flags) \ +#define raw_printk_cpu_unlock_irqrestore(flags) \ do { \ __printk_cpu_unlock(); \ local_irq_restore(flags); \ - } while (0) \ - -#else - -#define printk_cpu_lock_irqsave(flags) ((void)flags) -#define printk_cpu_unlock_irqrestore(flags) ((void)flags) - -#endif /* CONFIG_SMP */ + } while (0) extern int kptr_restrict; --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl * Permit this cpu to perform nested stack dumps while serialising * against other CPUs */ - printk_cpu_lock_irqsave(flags); + raw_printk_cpu_lock_irqsave(flags); __dump_stack(log_lvl); - printk_cpu_unlock_irqrestore(flags); + raw_printk_cpu_unlock_irqrestore(flags); } EXPORT_SYMBOL(dump_stack_lvl); --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -93,7 +93,7 @@ bool nmi_cpu_backtrace(struct pt_regs *r * Allow nested NMI backtraces while serializing * against other CPUs. */ - printk_cpu_lock_irqsave(flags); + raw_printk_cpu_lock_irqsave(flags); if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) { pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n", cpu, (void *)instruction_pointer(regs)); @@ -104,7 +104,7 @@ bool nmi_cpu_backtrace(struct pt_regs *r else dump_stack(); } - printk_cpu_unlock_irqrestore(flags); + raw_printk_cpu_unlock_irqrestore(flags); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); return true; }