From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 19 May 2023 16:57:31 +0200
Subject: [PATCH 5/5] ARM: vfp: Use vfp_lock() in vfp_entry().

vfp_entry() is invoked from exception handler and is fully preemptible.
It uses local_bh_disable() to remain uninterrupted while checking the
VFP state.
This is not working on PREEMPT_RT because local_bh_disable()
synchronizes the relevant section but the context remains fully
preemptible.

Use vfp_lock() for uninterrupted access.

VFP_bounce() is invoked from within vfp_entry() and may send a signal.
Sending a signal uses spinlock_t which becomes a sleeping lock
on PREEMPT_RT and must not be acquired within a preempt-disabled
section. Move the vfp_raise_sigfpe() block outside of the
preempt-disabled section.

Link: https://lore.kernel.org/r/20230519145731.574867-4-bigeasy@linutronix.de
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 arch/arm/vfp/vfphw.S     |    7 ++-----
 arch/arm/vfp/vfpmodule.c |   30 ++++++++++++++++++++----------
 2 files changed, 22 insertions(+), 15 deletions(-)

Index: linux-6.3.1-rt13/arch/arm/vfp/vfphw.S
===================================================================
@ linux-6.3.1-rt13/arch/arm/vfp/vfphw.S:183 @ vfp_hw_state_valid:
 					@ always subtract 4 from the following
 					@ instruction address.
 
-local_bh_enable_and_ret:
-	adr	r0, .
-	mov	r1, #SOFTIRQ_DISABLE_OFFSET
-	b	__local_bh_enable_ip	@ tail call
+	b	vfp_exit	@ tail call
 
 look_for_VFP_exceptions:
 	@ Check for synchronous or asynchronous exception
@ linux-6.3.1-rt13/arch/arm/vfp/vfphw.S:206 @ skip:
 	@ not recognised by VFP
 
 	DBGSTR	"not VFP"
-	b	local_bh_enable_and_ret
+	b	vfp_exit	@ tail call
 
 process_exception:
 	DBGSTR	"bounce"
Index: linux-6.3.1-rt13/arch/arm/vfp/vfpmodule.c
===================================================================
--- linux-6.3.1-rt13.orig/arch/arm/vfp/vfpmodule.c
+++ linux-6.3.1-rt13/arch/arm/vfp/vfpmodule.c
@ linux-6.3.1-rt13/arch/arm/vfp/vfphw.S:270 @ static void vfp_panic(char *reason, u32
 /*
  * Process bitmask of exception conditions.
  */
-static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
+static int vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr)
 {
 	int si_code = 0;
 
@ linux-6.3.1-rt13/arch/arm/vfp/vfphw.S:278 @ static void vfp_raise_exceptions(u32 exc
 
 	if (exceptions == VFP_EXCEPTION_ERROR) {
 		vfp_panic("unhandled bounce", inst);
-		vfp_raise_sigfpe(FPE_FLTINV, regs);
-		return;
+		return FPE_FLTINV;
 	}
 
 	/*
@ linux-6.3.1-rt13/arch/arm/vfp/vfphw.S:306 @ static void vfp_raise_exceptions(u32 exc
 	RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
 	RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
 
-	if (si_code)
-		vfp_raise_sigfpe(si_code, regs);
+	return si_code;
 }
 
 /*
@ linux-6.3.1-rt13/arch/arm/vfp/vfphw.S:351 @ static u32 vfp_emulate_instruction(u32 i
 void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
 {
 	u32 fpscr, orig_fpscr, fpsid, exceptions;
+	int si_code2 = 0;
+	int si_code = 0;
 
 	pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
 
@ linux-6.3.1-rt13/arch/arm/vfp/vfphw.S:400 @ void VFP_bounce(u32 trigger, u32 fpexc,
 		 * unallocated VFP instruction but with FPSCR.IXE set and not
 		 * on VFP subarch 1.
 		 */
-		 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
+		si_code = vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr);
 		goto exit;
 	}
 
@ linux-6.3.1-rt13/arch/arm/vfp/vfphw.S:425 @ void VFP_bounce(u32 trigger, u32 fpexc,
 	 */
 	exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
 	if (exceptions)
-		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
+		si_code2 = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
 
 	/*
 	 * If there isn't a second FP instruction, exit now. Note that
@ linux-6.3.1-rt13/arch/arm/vfp/vfphw.S:444 @ void VFP_bounce(u32 trigger, u32 fpexc,
  emulate:
 	exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
 	if (exceptions)
-		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
+		si_code = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
+
  exit:
-	local_bh_enable();
+	vfp_unlock();
+	if (si_code2)
+		vfp_raise_sigfpe(si_code2, regs);
+	if (si_code)
+		vfp_raise_sigfpe(si_code, regs);
 }
 
 static void vfp_enable(void *unused)
@ linux-6.3.1-rt13/arch/arm/vfp/vfphw.S:692 @ asmlinkage void vfp_entry(u32 trigger, s
 	if (unlikely(!have_vfp))
 		return;
 
-	local_bh_disable();
+	vfp_lock();
 	vfp_support_entry(trigger, ti, resume_pc, resume_return_address);
 }
 
+asmlinkage void vfp_exit(void)
+{
+	vfp_unlock();
+}
+
 #ifdef CONFIG_KERNEL_MODE_NEON
 
 static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr)