diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/common/mcpm_entry.c linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:209 @ int mcpm_cpu_power_up(unsigned int cpu,
 	 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
 	 * variant exists, we need to disable IRQs manually here.
 	 */
-	local_irq_disable();
+	hard_local_irq_disable();
 	arch_spin_lock(&mcpm_lock);
 
 	cpu_is_down = !mcpm_cpu_use_count[cluster][cpu];
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:233 @ int mcpm_cpu_power_up(unsigned int cpu,
 		ret = platform_ops->cpu_powerup(cpu, cluster);
 
 	arch_spin_unlock(&mcpm_lock);
-	local_irq_enable();
+	hard_local_irq_enable();
 	return ret;
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:352 @ int mcpm_cpu_powered_up(void)
 	mpidr = read_cpuid_mpidr();
 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	arch_spin_lock(&mcpm_lock);
 
 	cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:366 @ int mcpm_cpu_powered_up(void)
 		platform_ops->cpu_is_up(cpu, cluster);
 
 	arch_spin_unlock(&mcpm_lock);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	return 0;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:405 @ int __init mcpm_loopback(void (*cache_di
 	 * infrastructure. Let's play it safe by using cpu_pm_enter()
 	 * in case the CPU init code path resets the VFP or similar.
 	 */
-	local_irq_disable();
+	hard_local_irq_disable();
 	local_fiq_disable();
 	ret = cpu_pm_enter();
 	if (!ret) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:413 @ int __init mcpm_loopback(void (*cache_di
 		cpu_pm_exit();
 	}
 	local_fiq_enable();
-	local_irq_enable();
+	hard_local_irq_enable();
 	if (ret)
 		pr_err("%s returned %d\n", __func__, ret);
 	return ret;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/assembler.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/assembler.h
--- linux-5.15.26/arch/arm/include/asm/assembler.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/assembler.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:110 @
 	.endm
 #endif
 
+	.macro  disable_irq_if_pipelined
+#ifdef CONFIG_IRQ_PIPELINE
+	disable_irq_notrace
+#endif
+	.endm
+
+	.macro  enable_irq_if_pipelined
+#ifdef CONFIG_IRQ_PIPELINE
+	enable_irq_notrace
+#endif
+	.endm
+
 	.macro asm_trace_hardirqs_off, save=1
 #if defined(CONFIG_TRACE_IRQFLAGS)
 	.if \save
 	stmdb   sp!, {r0-r3, ip, lr}
 	.endif
-	bl	trace_hardirqs_off
+	bl	trace_hardirqs_off_pipelined
 	.if \save
 	ldmia	sp!, {r0-r3, ip, lr}
 	.endif
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:143 @
 	.if \save
 	stmdb   sp!, {r0-r3, ip, lr}
 	.endif
-	bl\cond	trace_hardirqs_on
+	bl\cond	trace_hardirqs_on_pipelined
 	.if \save
 	ldmia	sp!, {r0-r3, ip, lr}
 	.endif
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/atomic.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/atomic.h
--- linux-5.15.26/arch/arm/include/asm/atomic.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/atomic.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:167 @ static inline void arch_atomic_##op(int
 {									\
 	unsigned long flags;						\
 									\
-	raw_local_irq_save(flags);					\
+	flags = hard_local_irq_save();					\
 	v->counter c_op i;						\
-	raw_local_irq_restore(flags);					\
+	hard_local_irq_restore(flags);					\
 }									\
 
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:178 @ static inline int arch_atomic_##op##_ret
 	unsigned long flags;						\
 	int val;							\
 									\
-	raw_local_irq_save(flags);					\
+	flags = hard_local_irq_save();					\
 	v->counter c_op i;						\
 	val = v->counter;						\
-	raw_local_irq_restore(flags);					\
+	hard_local_irq_restore(flags);					\
 									\
 	return val;							\
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:192 @ static inline int arch_atomic_fetch_##op
 	unsigned long flags;						\
 	int val;							\
 									\
-	raw_local_irq_save(flags);					\
+	flags = hard_local_irq_save();					\
 	val = v->counter;						\
 	v->counter c_op i;						\
-	raw_local_irq_restore(flags);					\
+	hard_local_irq_restore(flags);					\
 									\
 	return val;							\
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:205 @ static inline int arch_atomic_cmpxchg(at
 	int ret;
 	unsigned long flags;
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	ret = v->counter;
 	if (likely(ret == old))
 		v->counter = new;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	return ret;
 }
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/bitops.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/bitops.h
--- linux-5.15.26/arch/arm/include/asm/bitops.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/bitops.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:43 @ static inline void ____atomic_set_bit(un
 
 	p += BIT_WORD(bit);
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	*p |= mask;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:55 @ static inline void ____atomic_clear_bit(
 
 	p += BIT_WORD(bit);
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	*p &= ~mask;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:67 @ static inline void ____atomic_change_bit
 
 	p += BIT_WORD(bit);
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	*p ^= mask;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static inline int
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:81 @ ____atomic_test_and_set_bit(unsigned int
 
 	p += BIT_WORD(bit);
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	res = *p;
 	*p = res | mask;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	return (res & mask) != 0;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:98 @ ____atomic_test_and_clear_bit(unsigned i
 
 	p += BIT_WORD(bit);
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	res = *p;
 	*p = res & ~mask;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	return (res & mask) != 0;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:115 @ ____atomic_test_and_change_bit(unsigned
 
 	p += BIT_WORD(bit);
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	res = *p;
 	*p = res ^ mask;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	return (res & mask) != 0;
 }
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/cmpxchg.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/cmpxchg.h
--- linux-5.15.26/arch/arm/include/asm/cmpxchg.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/cmpxchg.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:80 @ static inline unsigned long __xchg(unsig
 #error SMP is not supported on this platform
 #endif
 	case 1:
-		raw_local_irq_save(flags);
+		flags = hard_local_irq_save();
 		ret = *(volatile unsigned char *)ptr;
 		*(volatile unsigned char *)ptr = x;
-		raw_local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		break;
 
 	case 4:
-		raw_local_irq_save(flags);
+		flags = hard_local_irq_save();
 		ret = *(volatile unsigned long *)ptr;
 		*(volatile unsigned long *)ptr = x;
-		raw_local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		break;
 #else
 	case 1:
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/dovetail.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/dovetail.h
--- linux-5.15.26/arch/arm/include/asm/dovetail.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/dovetail.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016 Philippe Gerum.
+ */
+#ifndef _ASM_ARM_DOVETAIL_H
+#define _ASM_ARM_DOVETAIL_H
+
+/* ARM traps */
+#define ARM_TRAP_ACCESS		0	/* Data or instruction access exception */
+#define ARM_TRAP_SECTION	1	/* Section fault */
+#define ARM_TRAP_DABT		2	/* Generic data abort */
+#define ARM_TRAP_PABT		3	/* Prefetch abort */
+#define ARM_TRAP_BREAK		4	/* Instruction breakpoint */
+#define ARM_TRAP_FPU		5	/* Floating point exception */
+#define ARM_TRAP_VFP		6	/* VFP floating point exception */
+#define ARM_TRAP_UNDEFINSTR	7	/* Undefined instruction */
+#define ARM_TRAP_ALIGNMENT	8	/* Unaligned access exception */
+
+#if !defined(__ASSEMBLY__) && defined(CONFIG_DOVETAIL)
+
+static inline void arch_dovetail_exec_prepare(void)
+{ }
+
+static inline void arch_dovetail_switch_prepare(bool leave_inband)
+{ }
+
+static inline void arch_dovetail_switch_finish(bool enter_inband)
+{ }
+
+#endif
+
+#endif /* _ASM_ARM_DOVETAIL_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/efi.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/efi.h
--- linux-5.15.26/arch/arm/include/asm/efi.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/efi.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:40 @ int efi_set_mapping_permissions(struct m
 
 static inline void efi_set_pgd(struct mm_struct *mm)
 {
+	unsigned long flags;
+
+	protect_inband_mm(flags);
 	check_and_switch_context(mm, NULL);
+	unprotect_inband_mm(flags);
 }
 
 void efi_virtmap_load(void);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/irqflags.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/irqflags.h
--- linux-5.15.26/arch/arm/include/asm/irqflags.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/irqflags.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:8 @
 #ifdef __KERNEL__
 
 #include <asm/ptrace.h>
+#include <asm/barrier.h>
 
 /*
  * CPU interrupt mask handling.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:17 @
 #define IRQMASK_REG_NAME_R "primask"
 #define IRQMASK_REG_NAME_W "primask"
 #define IRQMASK_I_BIT	1
+#define IRQMASK_I_POS	0
 #else
 #define IRQMASK_REG_NAME_R "cpsr"
 #define IRQMASK_REG_NAME_W "cpsr_c"
 #define IRQMASK_I_BIT	PSR_I_BIT
+#define IRQMASK_I_POS	7
 #endif
+#define IRQMASK_i_POS	31
 
 #if __LINUX_ARM_ARCH__ >= 6
 
 #define arch_local_irq_save arch_local_irq_save
-static inline unsigned long arch_local_irq_save(void)
+static inline unsigned long native_irq_save(void)
 {
 	unsigned long flags;
 
 	asm volatile(
-		"	mrs	%0, " IRQMASK_REG_NAME_R "	@ arch_local_irq_save\n"
+		"	mrs	%0, " IRQMASK_REG_NAME_R "	@ native_irq_save\n"
 		"	cpsid	i"
 		: "=r" (flags) : : "memory", "cc");
 	return flags;
 }
 
 #define arch_local_irq_enable arch_local_irq_enable
-static inline void arch_local_irq_enable(void)
+static inline void native_irq_enable(void)
 {
 	asm volatile(
-		"	cpsie i			@ arch_local_irq_enable"
+		"	cpsie i			@ native_irq_enable"
 		:
 		:
 		: "memory", "cc");
 }
 
 #define arch_local_irq_disable arch_local_irq_disable
-static inline void arch_local_irq_disable(void)
+static inline void native_irq_disable(void)
 {
 	asm volatile(
-		"	cpsid i			@ arch_local_irq_disable"
+		"	cpsid i			@ native_irq_disable"
 		:
 		:
 		: "memory", "cc");
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:76 @ static inline void arch_local_irq_disabl
  * Save the current interrupt enable state & disable IRQs
  */
 #define arch_local_irq_save arch_local_irq_save
-static inline unsigned long arch_local_irq_save(void)
+static inline unsigned long native_irq_save(void)
 {
 	unsigned long flags, temp;
 
 	asm volatile(
-		"	mrs	%0, cpsr	@ arch_local_irq_save\n"
+		"	mrs	%0, cpsr	@ native_irq_save\n"
 		"	orr	%1, %0, #128\n"
 		"	msr	cpsr_c, %1"
 		: "=r" (flags), "=r" (temp)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:94 @ static inline unsigned long arch_local_i
  * Enable IRQs
  */
 #define arch_local_irq_enable arch_local_irq_enable
-static inline void arch_local_irq_enable(void)
+static inline void native_irq_enable(void)
 {
 	unsigned long temp;
 	asm volatile(
-		"	mrs	%0, cpsr	@ arch_local_irq_enable\n"
+		"	mrs	%0, cpsr	@ native_irq_enable\n"
 		"	bic	%0, %0, #128\n"
 		"	msr	cpsr_c, %0"
 		: "=r" (temp)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:110 @ static inline void arch_local_irq_enable
  * Disable IRQs
  */
 #define arch_local_irq_disable arch_local_irq_disable
-static inline void arch_local_irq_disable(void)
+static inline void native_irq_disable(void)
 {
 	unsigned long temp;
 	asm volatile(
-		"	mrs	%0, cpsr	@ arch_local_irq_disable\n"
+		"	mrs	%0, cpsr	@ native_irq_disable\n"
 		"	orr	%0, %0, #128\n"
 		"	msr	cpsr_c, %0"
 		: "=r" (temp)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:156 @ static inline void arch_local_irq_disabl
 #define local_abt_disable()	do { } while (0)
 #endif
 
+static inline void native_irq_sync(void)
+{
+	native_irq_enable();
+	isb();
+	native_irq_disable();
+}
+
 /*
  * Save the current interrupt enable state.
  */
 #define arch_local_save_flags arch_local_save_flags
-static inline unsigned long arch_local_save_flags(void)
+static inline unsigned long native_save_flags(void)
 {
 	unsigned long flags;
 	asm volatile(
-		"	mrs	%0, " IRQMASK_REG_NAME_R "	@ local_save_flags"
+		"	mrs	%0, " IRQMASK_REG_NAME_R "	@ native_save_flags"
 		: "=r" (flags) : : "memory", "cc");
 	return flags;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:180 @ static inline unsigned long arch_local_s
  * restore saved IRQ & FIQ state
  */
 #define arch_local_irq_restore arch_local_irq_restore
-static inline void arch_local_irq_restore(unsigned long flags)
+static inline void native_irq_restore(unsigned long flags)
 {
 	asm volatile(
-		"	msr	" IRQMASK_REG_NAME_W ", %0	@ local_irq_restore"
+		"	msr	" IRQMASK_REG_NAME_W ", %0	@ native_irq_restore"
 		:
 		: "r" (flags)
 		: "memory", "cc");
 }
 
 #define arch_irqs_disabled_flags arch_irqs_disabled_flags
-static inline int arch_irqs_disabled_flags(unsigned long flags)
+static inline int native_irqs_disabled_flags(unsigned long flags)
 {
 	return flags & IRQMASK_I_BIT;
 }
 
+static inline bool native_irqs_disabled(void)
+{
+	unsigned long flags = native_save_flags();
+	return native_irqs_disabled_flags(flags);
+}
+
+#include <asm/irq_pipeline.h>
 #include <asm-generic/irqflags.h>
 
 #endif /* ifdef __KERNEL__ */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/irq_pipeline.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/irq_pipeline.h
--- linux-5.15.26/arch/arm/include/asm/irq_pipeline.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/irq_pipeline.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#ifndef _ASM_ARM_IRQ_PIPELINE_H
+#define _ASM_ARM_IRQ_PIPELINE_H
+
+#include <asm-generic/irq_pipeline.h>
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+/*
+ * In order to cope with the limited number of SGIs available to us,
+ * In-band IPI messages are multiplexed over SGI0, whereas out-of-band
+ * IPIs are directly mapped to SGI1-2.
+ */
+#define OOB_NR_IPI		2
+#define OOB_IPI_OFFSET		1 /* SGI1 */
+#define TIMER_OOB_IPI		(ipi_irq_base + OOB_IPI_OFFSET)
+#define RESCHEDULE_OOB_IPI	(TIMER_OOB_IPI + 1)
+
+extern int ipi_irq_base;
+
+static inline notrace
+unsigned long arch_irqs_virtual_to_native_flags(int stalled)
+{
+	return (!!stalled) << IRQMASK_I_POS;
+}
+
+static inline notrace
+unsigned long arch_irqs_native_to_virtual_flags(unsigned long flags)
+{
+	return (!!hard_irqs_disabled_flags(flags)) << IRQMASK_i_POS;
+}
+
+static inline notrace unsigned long arch_local_irq_save(void)
+{
+	int stalled = inband_irq_save();
+	barrier();
+	return arch_irqs_virtual_to_native_flags(stalled);
+}
+
+static inline notrace void arch_local_irq_enable(void)
+{
+	barrier();
+	inband_irq_enable();
+}
+
+static inline notrace void arch_local_irq_disable(void)
+{
+	inband_irq_disable();
+	barrier();
+}
+
+static inline notrace unsigned long arch_local_save_flags(void)
+{
+	int stalled = inband_irqs_disabled();
+	barrier();
+	return arch_irqs_virtual_to_native_flags(stalled);
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+	return native_irqs_disabled_flags(flags);
+}
+
+static inline notrace void arch_local_irq_restore(unsigned long flags)
+{
+	inband_irq_restore(arch_irqs_disabled_flags(flags));
+	barrier();
+}
+
+static inline
+void arch_save_timer_regs(struct pt_regs *dst, struct pt_regs *src)
+{
+	dst->ARM_cpsr = src->ARM_cpsr;
+	dst->ARM_pc = src->ARM_pc;
+}
+
+static inline bool arch_steal_pipelined_tick(struct pt_regs *regs)
+{
+	return !!(regs->ARM_cpsr & IRQMASK_I_BIT);
+}
+
+static inline int arch_enable_oob_stage(void)
+{
+	return 0;
+}
+
+extern void (*handle_arch_irq)(struct pt_regs *);
+
+static inline void arch_handle_irq_pipelined(struct pt_regs *regs)
+{
+	handle_arch_irq(regs);
+}
+
+#define arch_kentry_get_irqstate(__regs)		\
+	({						\
+		to_svc_pt_regs(__regs)->irqstate;	\
+	})
+
+#define arch_kentry_set_irqstate(__regs, __irqstate)		\
+	do {							\
+		to_svc_pt_regs(__regs)->irqstate = __irqstate;	\
+	} while (0)
+
+#else /* !CONFIG_IRQ_PIPELINE */
+
+static inline unsigned long arch_local_irq_save(void)
+{
+	return native_irq_save();
+}
+
+static inline void arch_local_irq_enable(void)
+{
+	native_irq_enable();
+}
+
+static inline void arch_local_irq_disable(void)
+{
+	native_irq_disable();
+}
+
+static inline unsigned long arch_local_save_flags(void)
+{
+	return native_save_flags();
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+	native_irq_restore(flags);
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+	return native_irqs_disabled_flags(flags);
+}
+
+#endif /* !CONFIG_IRQ_PIPELINE */
+
+#endif /* _ASM_ARM_IRQ_PIPELINE_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/mmu_context.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/mmu_context.h
--- linux-5.15.26/arch/arm/include/asm/mmu_context.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/mmu_context.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:17 @
 #include <linux/sched.h>
 #include <linux/mm_types.h>
 #include <linux/preempt.h>
+#include <linux/irq_pipeline.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cachetype.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:78 @ static inline void check_and_switch_cont
 static inline void finish_arch_post_lock_switch(void)
 {
 	struct mm_struct *mm = current->mm;
+	unsigned long flags;
 
 	if (mm && mm->context.switch_pending) {
 		/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:90 @ static inline void finish_arch_post_lock
 		preempt_disable();
 		if (mm->context.switch_pending) {
 			mm->context.switch_pending = 0;
+			protect_inband_mm(flags);
 			cpu_switch_mm(mm->pgd, mm);
+			unprotect_inband_mm(flags);
 		}
 		preempt_enable_no_resched();
 	}
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:103 @ static inline void finish_arch_post_lock
 
 #endif	/* CONFIG_CPU_HAS_ASID */
 
-#define activate_mm(prev,next)		switch_mm(prev, next, NULL)
+#define activate_mm(prev,next)		__switch_mm(prev, next, NULL)
 
 /*
  * This is the actual mm switch as far as the scheduler
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:112 @ static inline void finish_arch_post_lock
  * actually changed.
  */
 static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
-	  struct task_struct *tsk)
+__switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	    struct task_struct *tsk)
 {
 #ifdef CONFIG_MMU
 	unsigned int cpu = smp_processor_id();
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:138 @ switch_mm(struct mm_struct *prev, struct
 
 #include <asm-generic/mmu_context.h>
 
+/*
+ * This is the actual mm switch as far as the scheduler
+ * is concerned.  No registers are touched.  We avoid
+ * calling the CPU specific function when the mm hasn't
+ * actually changed.
+ */
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	  struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	protect_inband_mm(flags);
+	__switch_mm(prev, next, tsk);
+	unprotect_inband_mm(flags);
+}
+
+static inline void
+switch_oob_mm(struct mm_struct *prev, struct mm_struct *next,
+	      struct task_struct *tsk)
+{
+	__switch_mm(prev, next, tsk);
+}
+
 #endif
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/outercache.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/outercache.h
--- linux-5.15.26/arch/arm/include/asm/outercache.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/outercache.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:81 @ static inline void outer_flush_range(phy
  */
 static inline void outer_flush_all(void)
 {
-	if (outer_cache.flush_all)
+	unsigned long flags;
+
+	if (outer_cache.flush_all) {
+		flags = hard_cond_local_irq_save();
 		outer_cache.flush_all();
+		hard_cond_local_irq_restore(flags);
+	}
 }
 
 /**
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/ptrace.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/ptrace.h
--- linux-5.15.26/arch/arm/include/asm/ptrace.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/ptrace.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:22 @ struct pt_regs {
 struct svc_pt_regs {
 	struct pt_regs regs;
 	u32 dacr;
+#ifdef CONFIG_IRQ_PIPELINE
+	long irqstate;
+#endif
 };
 
 #define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/syscall.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/syscall.h
--- linux-5.15.26/arch/arm/include/asm/syscall.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/syscall.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:80 @ static inline void syscall_get_arguments
 	memcpy(args, &regs->ARM_r0 + 1, 5 * sizeof(args[0]));
 }
 
+static inline unsigned long syscall_get_arg0(struct pt_regs *regs)
+{
+	return regs->ARM_ORIG_r0;
+}
+
 static inline void syscall_set_arguments(struct task_struct *task,
 					 struct pt_regs *regs,
 					 const unsigned long *args)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/thread_info.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/thread_info.h
--- linux-5.15.26/arch/arm/include/asm/thread_info.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/thread_info.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:32 @
 
 struct task_struct;
 
+#include <dovetail/thread_info.h>
 #include <asm/types.h>
 
 struct cpu_context_save {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:55 @ struct cpu_context_save {
  */
 struct thread_info {
 	unsigned long		flags;		/* low level flags */
+	__u32			local_flags;	/* local (synchronous) flags */
 	int			preempt_count;	/* 0 => preemptable, <0 => bug */
 	struct task_struct	*task;		/* main task structure */
 	__u32			cpu;		/* cpu */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:72 @ struct thread_info {
 #ifdef CONFIG_ARM_THUMBEE
 	unsigned long		thumbee_state;	/* ThumbEE Handler Base register */
 #endif
+	struct oob_thread_state	oob_state; /* co-kernel thread state */
 };
 
 #define INIT_THREAD_INFO(tsk)						\
 {									\
 	.task		= &tsk,						\
 	.flags		= 0,						\
+	.local_flags	= 0,						\
 	.preempt_count	= INIT_PREEMPT_COUNT,				\
 }
 
+#define ti_local_flags(__ti)	((__ti)->local_flags)
+
 /*
  * how to get the thread information struct from C
  */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:143 @ extern int vfp_restore_user_hwstate(stru
 #define TIF_SYSCALL_TRACEPOINT	6	/* syscall tracepoint instrumentation */
 #define TIF_SECCOMP		7	/* seccomp syscall filtering active */
 #define TIF_NOTIFY_SIGNAL	8	/* signal notifications exist */
+#define TIF_RETUSER		9	/* INBAND_TASK_RETUSER is pending */
 
 #define TIF_USING_IWMMXT	17
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	20
+#define TIF_MAYDAY		21	/* emergency trap pending */
 
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:159 @ extern int vfp_restore_user_hwstate(stru
 #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
 #define _TIF_NOTIFY_SIGNAL	(1 << TIF_NOTIFY_SIGNAL)
+#define _TIF_RETUSER		(1 << TIF_RETUSER)
 #define _TIF_USING_IWMMXT	(1 << TIF_USING_IWMMXT)
+#define _TIF_MAYDAY		(1 << TIF_MAYDAY)
 
-/* Checks for any syscall work in entry-common.S */
+/*
+ * Checks for any syscall work in entry-common.S.
+ * CAUTION: Only bit0-bit15 are tested there.
+ */
 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
 			   _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:175 @ extern int vfp_restore_user_hwstate(stru
  */
 #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
 				 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
-				 _TIF_NOTIFY_SIGNAL)
+				 _TIF_NOTIFY_SIGNAL | _TIF_RETUSER)
+
+/*
+ * Local (synchronous) thread flags.
+ */
+#define _TLF_OOB		0x0001
+#define _TLF_DOVETAIL		0x0002
+#define _TLF_OFFSTAGE		0x0004
+#define _TLF_OOBTRAP		0x0008
 
 #endif /* __KERNEL__ */
 #endif /* __ASM_ARM_THREAD_INFO_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/trace/exceptions.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/trace/exceptions.h
--- linux-5.15.26/arch/arm/include/asm/trace/exceptions.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/trace/exceptions.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM exceptions
+
+#if !defined(_TRACE_EXCEPTIONS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EXCEPTIONS_H
+
+#include <linux/tracepoint.h>
+#include <asm/ptrace.h>
+#include <asm/dovetail.h>
+
+#define __trace_trap(__sym)	{ __sym, #__sym }
+
+#define trace_trap_symbolic(__trapnr)				\
+	__print_symbolic(__trapnr,				\
+			__trace_trap(ARM_TRAP_ACCESS),		\
+			__trace_trap(ARM_TRAP_SECTION),		\
+			__trace_trap(ARM_TRAP_DABT),		\
+			__trace_trap(ARM_TRAP_PABT),		\
+			__trace_trap(ARM_TRAP_BREAK),		\
+			__trace_trap(ARM_TRAP_FPU),		\
+			__trace_trap(ARM_TRAP_VFP),		\
+			__trace_trap(ARM_TRAP_UNDEFINSTR),	\
+			__trace_trap(ARM_TRAP_ALIGNMENT))
+
+DECLARE_EVENT_CLASS(ARM_trap_event,
+	TP_PROTO(int trapnr, struct pt_regs *regs),
+	TP_ARGS(trapnr, regs),
+
+	TP_STRUCT__entry(
+		__field(int, trapnr)
+		__field(struct pt_regs *, regs)
+		),
+
+	TP_fast_assign(
+		__entry->trapnr = trapnr;
+		__entry->regs = regs;
+		),
+
+	TP_printk("%s mode trap: %s",
+		user_mode(__entry->regs) ? "user" : "kernel",
+		trace_trap_symbolic(__entry->trapnr))
+);
+
+DEFINE_EVENT(ARM_trap_event, ARM_trap_entry,
+	TP_PROTO(int trapnr, struct pt_regs *regs),
+	TP_ARGS(trapnr, regs)
+);
+
+DEFINE_EVENT(ARM_trap_event, ARM_trap_exit,
+	TP_PROTO(int trapnr, struct pt_regs *regs),
+	TP_ARGS(trapnr, regs)
+);
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH asm/trace
+#define TRACE_INCLUDE_FILE exceptions
+#endif /*  _TRACE_EXCEPTIONS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/include/asm/vdso/gettimeofday.h linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/vdso/gettimeofday.h
--- linux-5.15.26/arch/arm/include/asm/vdso/gettimeofday.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/include/asm/vdso/gettimeofday.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:145 @ static __always_inline const struct vdso
 	return __get_datapage();
 }
 
+#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO
+
+extern struct vdso_priv *__get_privpage(void);
+
+static __always_inline struct vdso_priv *__arch_get_vdso_priv(void)
+{
+	return __get_privpage();
+}
+
+static __always_inline long clock_open_device(const char *path, int mode)
+{
+	register u32 r0 asm("r0") = (u32)path;
+	register u32 r1 asm("r1") = (u32)mode;
+	register long ret asm ("r0");
+	register long nr asm("r7") = __NR_open;
+
+	asm volatile(
+		"	swi #0\n"
+		: "=r" (ret)
+		: "r"(r0), "r"(r1), "r"(nr)
+		: "memory");
+
+	return ret;
+}
+
+static __always_inline
+long clock_ioctl_device(int fd, unsigned int cmd, long arg)
+{
+	register u32 r0 asm("r0") = (u32)fd;
+	register u32 r1 asm("r1") = (u32)cmd;
+	register u32 r2 asm("r2") = (u32)arg;
+	register long ret asm ("r0");
+	register long nr asm("r7") = __NR_ioctl;
+
+ 	asm volatile(
+		"	swi #0\n"
+		: "=r" (ret)
+		: "r"(r0), "r"(r1), "r"(r2), "r"(nr)
+		: "memory");
+
+ 	return ret;
+}
+
+static __always_inline long clock_close_device(int fd)
+{
+	register u32 r0 asm("r0") = (u32)fd;
+	register long ret asm ("r0");
+	register long nr asm("r7") = __NR_close;
+
+	asm volatile(
+		"	swi #0\n"
+		: "=r" (ret)
+		: "r"(r0), "r"(nr)
+		: "memory");
+
+	return ret;
+}
+
+#endif	/* CONFIG_GENERIC_CLOCKSOURCE_VDSO */
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/Kconfig linux-dovetail-v5.15.y-dovetail/arch/arm/Kconfig
--- linux-5.15.26/arch/arm/Kconfig	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/Kconfig	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:234 @ config NEED_RET_TO_USER
 config ARCH_MTD_XIP
 	bool
 
+# Limited I-pipe compat (syscall routing only).
+config IPIPE_COMPAT
+	bool
+	select DOVETAIL_LEGACY_SYSCALL_RANGE
+
 config ARM_PATCH_PHYS_VIRT
 	bool "Patch physical to virtual translations at runtime" if EMBEDDED
 	default y
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:559 @ config ARCH_MULTI_V7
 config ARCH_MULTI_V6_V7
 	bool
 	select MIGHT_HAVE_CACHE_L2X0
+	select HAVE_IRQ_PIPELINE
+	select HAVE_DOVETAIL if CPU_HAS_ASID
 
 config ARCH_MULTI_CPU_AUTO
 	def_bool !(ARCH_MULTI_V4 || ARCH_MULTI_V4T || ARCH_MULTI_V6_V7)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1193 @ config SCHED_SMT
 	  MultiThreading at a cost of slightly increased overhead in some
 	  places. If unsure say N here.
 
+source "kernel/Kconfig.dovetail"
+
 config HAVE_ARM_SCU
 	bool
 	help
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/asm-offsets.c linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/asm-offsets.c
--- linux-5.15.26/arch/arm/kernel/asm-offsets.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/asm-offsets.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:45 @ int main(void)
 #endif
   BLANK();
   DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
+  DEFINE(TI_LOCAL_FLAGS,	offsetof(struct thread_info, local_flags));
   DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
   DEFINE(TI_TASK,		offsetof(struct thread_info, task));
   DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:55 @ int main(void)
   DEFINE(TI_USED_CP,		offsetof(struct thread_info, used_cp));
   DEFINE(TI_TP_VALUE,		offsetof(struct thread_info, tp_value));
   DEFINE(TI_FPSTATE,		offsetof(struct thread_info, fpstate));
+  DEFINE(TI_OOB_MASK,		STAGE_MASK);
 #ifdef CONFIG_VFP
   DEFINE(TI_VFPSTATE,		offsetof(struct thread_info, vfpstate));
 #ifdef CONFIG_SMP
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:162 @ int main(void)
   BLANK();
 #ifdef CONFIG_VDSO
   DEFINE(VDSO_DATA_SIZE,	sizeof(union vdso_data_store));
+  DEFINE(VDSO_PRIV_SIZE,	PAGE_SIZE);
 #endif
   BLANK();
 #ifdef CONFIG_ARM_MPU
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/entry-armv.S linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/entry-armv.S
--- linux-5.15.26/arch/arm/kernel/entry-armv.S	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/entry-armv.S	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:8 @
  *  Copyright (C) 1996,1997,1998 Russell King.
  *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
  *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
+ *  Copyright (C) 2005 Stelian Pop.
  *
  *  Low-level vector interface routines
  *
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:36 @
 #include "entry-header.S"
 #include <asm/entry-macro-multi.S>
 #include <asm/probes.h>
+#include <asm/dovetail.h>
 
 /*
  * Interrupt handling.
  */
 	.macro	irq_handler
 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
-	ldr	r1, =handle_arch_irq
 	mov	r0, sp
 	badr	lr, 9997f
+#ifdef CONFIG_IRQ_PIPELINE
+	ldr	r1, =handle_arch_irq_pipelined
+	mov	pc, r1
+#else
+	ldr	r1, =handle_arch_irq
 	ldr	pc, [r1]
+#endif
+#elif CONFIG_IRQ_PIPELINE
+#error "Legacy IRQ handling not pipelined"
 #else
 	arch_irq_handler_default
 #endif
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:195 @ ENDPROC(__und_invalid)
 	uaccess_entry tsk, r0, r1, r2, \uaccess
 
 	.if \trace
-#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_IRQ_PIPELINE
+	mov	r0, sp
+	bl	kentry_enter_pipelined
+#elif defined(CONFIG_TRACE_IRQFLAGS)
 	bl	trace_hardirqs_off
 #endif
 	.endif
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:218 @ ENDPROC(__dabt_svc)
 __irq_svc:
 	svc_entry
 	irq_handler
+#ifdef CONFIG_IRQ_PIPELINE
+	tst	r0, r0				@ skip epilogue if oob or in-band stalled
+	beq	1f
+#endif
 
 #ifdef CONFIG_PREEMPTION
 	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:232 @ __irq_svc:
 	blne	svc_preempt
 #endif
 
+1:
 	svc_exit r5, irq = 1			@ return from exception
  UNWIND(.fnend		)
 ENDPROC(__irq_svc)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:242 @ ENDPROC(__irq_svc)
 #ifdef CONFIG_PREEMPTION
 svc_preempt:
 	mov	r8, lr
-1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
+1:	bl	arm_preempt_schedule_irq	@ irq en/disable is done inside
 	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
 	tst	r0, #_TIF_NEED_RESCHED
 	reteq	r8				@ go again
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:272 @ __und_svc:
 #else
 	svc_entry
 #endif
+#ifdef CONFIG_DOVETAIL
+	get_thread_info tsk
+	ldr	r0, [tsk, #TI_PREEMPT]		@ get preempt count
+	tst	r0, #TI_OOB_MASK		@ oob stage?
+	beq	1f
+	mov	r0, #ARM_TRAP_UNDEFINSTR
+	mov	r1, sp				@ r1 = &regs
+	bl	__oob_trap_notify
+1:
+#endif
 
 	mov	r1, #4				@ PC correction to apply
  THUMB(	tst	r5, #PSR_T_BIT		)	@ exception taken in Thumb mode?
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:291 @ __und_svc:
 
 __und_svc_finish:
 	get_thread_info tsk
+#ifdef CONFIG_DOVETAIL
+	ldr	r0, [tsk, #TI_PREEMPT]		@ get preempt count
+	tst	r0, #TI_OOB_MASK		@ oob stage?
+	beq	1f
+	mov	r0, #ARM_TRAP_UNDEFINSTR
+	mov	r1, sp				@ r1 = &regs
+	bl	__oob_trap_unwind
+1:
+#endif
 	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
 	svc_exit r5				@ return from exception
  UNWIND(.fnend		)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:430 @ ENDPROC(__fiq_abt)
 
 	.if	\trace
 #ifdef CONFIG_TRACE_IRQFLAGS
-	bl	trace_hardirqs_off
+	bl	trace_hardirqs_off_pipelined
 #endif
 	ct_user_exit save = 0
 	.endif
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:467 @ __irq_usr:
 	usr_entry
 	kuser_cmpxchg_check
 	irq_handler
-	get_thread_info tsk
 	mov	why, #0
+#ifdef CONFIG_IRQ_PIPELINE
+THUMB(	it ne)
+	tst	r0, r0
+	beq	fast_ret_to_user	@ skip epilogue if oob (in-band cannot be stalled)
+#endif
+	get_thread_info tsk
 	b	ret_to_user_from_irq
  UNWIND(.fnend		)
 ENDPROC(__irq_usr)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:760 @ ENTRY(ret_from_exception)
  UNWIND(.cantunwind	)
 	get_thread_info tsk
 	mov	why, #0
-	b	ret_to_user
+	ret_to_user_pipelined r1
  UNWIND(.fnend		)
 ENDPROC(__pabt_usr)
 ENDPROC(ret_from_exception)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/entry-common.S linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/entry-common.S
--- linux-5.15.26/arch/arm/kernel/entry-common.S	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/entry-common.S	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:6 @
  *  linux/arch/arm/kernel/entry-common.S
  *
  *  Copyright (C) 2000 Russell King
+ *  Copyright (C) 2005 Stelian Pop.
  */
 
 #include <asm/assembler.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:16 @
 #include <asm/memory.h>
 #ifdef CONFIG_AEABI
 #include <asm/unistd-oabi.h>
+#include <uapi/asm-generic/dovetail.h>
 #endif
 
 	.equ	NR_syscalls, __NR_syscalls
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:139 @ no_work_pending:
 	restore_user_regs fast = 0, offset = 0
 ENDPROC(ret_to_user_from_irq)
 ENDPROC(ret_to_user)
+ENTRY(fast_ret_to_user)
+	disable_irq_notrace			@ disable interrupts
+	b	no_work_pending
+ENDPROC(fast_ret_to_user)
 
 /*
  * This is how we return from a fork.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:253 @ ENTRY(vector_swi)
  TRACE(	ldmia	sp, {r0 - r3}		)
 
 local_restart:
+#ifdef CONFIG_DOVETAIL
+	ldr	r10, [tsk, #TI_LOCAL_FLAGS]	@ tsk(r10) is callee-saved
+#ifdef CONFIG_IPIPE_COMPAT
+	ldr	r0, =#0xf0042			@ old syscall signature
+	cmp	scno, r0
+	bne	1f
+	add	scno, scno, #__OOB_SYSCALL_BIT	@ force in oob marker
+	b	fastcall_try
+1:
+#endif	
+#ifdef CONFIG_DOVETAIL_LEGACY_SYSCALL_RANGE
+	ldr	r0, =#__OOB_SYSCALL_BIT
+	ands	r0, scno, r0
+	bne	fastcall_try
+#endif
+	cmp	scno, #__NR_prctl
+	bne	slow_path
+	ldr	r0, [sp, #S_OLD_R0]
+	tst	r0, #__OOB_SYSCALL_BIT
+	beq	slow_path
+fastcall_try:
+	tst	r10, #_TLF_OOB
+	beq	slow_path
+	mov	r0, sp				@ regs
+	bl	handle_oob_syscall
+	ldr	r10, [tsk, #TI_LOCAL_FLAGS]
+	tst	r0, r0
+	beq	slow_path
+	tst	r10, #_TLF_OOB
+	bne	fastcall_exit_check		@ check for MAYDAY
+	bl	sync_inband_irqs
+	b	ret_slow_syscall
+fastcall_exit_check:
+	ldr	r10, [tsk, #TI_FLAGS]
+	tst	r10, #_TIF_MAYDAY
+	beq	fast_ret_to_user
+	mov	r0, sp				@ regs
+	bl	dovetail_call_mayday
+	b	fast_ret_to_user
+slow_path:
+	tst	r10, #_TLF_DOVETAIL
+	bne	pipeline_syscall
+#ifdef CONFIG_DOVETAIL_LEGACY_SYSCALL_RANGE
+	ldr	r0, =#__OOB_SYSCALL_BIT
+	ands	r0, scno, r0
+	bne	pipeline_syscall
+#endif	
+	cmp	scno, #__NR_prctl
+	bne	root_syscall
+	ldr	r0, [sp, #S_OLD_R0]
+	tst	r0, #__OOB_SYSCALL_BIT
+	beq	root_syscall
+pipeline_syscall:
+	mov	r0, sp				@ regs
+	bl	__pipeline_syscall
+	ldr	r10, [tsk, #TI_LOCAL_FLAGS]
+	tst	r10, #_TLF_OOB
+	bne	fast_ret_to_user
+	cmp	r0, #0
+	bgt	ret_slow_syscall
+root_syscall:
+	ldmia	sp, { r0 - r3 }
+#endif /* CONFIG_DOVETAIL */
+
 	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
 	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/entry-header.S linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/entry-header.S
--- linux-5.15.26/arch/arm/kernel/entry-header.S	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/entry-header.S	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:206 @
 	.macro	svc_exit, rpsr, irq = 0
 	.if	\irq != 0
 	@ IRQs already off
-#ifdef CONFIG_TRACE_IRQFLAGS
 	@ The parent context IRQs must have been enabled to get here in
 	@ the first place, so there's no point checking the PSR I bit.
+#ifdef CONFIG_IRQ_PIPELINE
+	mov	r0, sp
+	bl	kentry_exit_pipelined
+#elif defined(CONFIG_TRACE_IRQFLAGS)
 	bl	trace_hardirqs_on
 #endif
 	.else
 	@ IRQs off again before pulling preserved data off the stack
 	disable_irq_notrace
-#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_IRQ_PIPELINE
+	mov	r0, sp
+	bl	kentry_exit_pipelined
+#elif defined(CONFIG_TRACE_IRQFLAGS)
 	tst	\rpsr, #PSR_I_BIT
 	bleq	trace_hardirqs_on
 	tst	\rpsr, #PSR_I_BIT
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:411 @
 	.endm
 
 /*
+ * Branch to the exception epilogue, skipping the in-band work
+ * if running over the out-of-band interrupt stage.
+ */
+	.macro ret_to_user_pipelined, tmp
+#ifdef CONFIG_IRQ_PIPELINE
+	ldr	\tmp, [tsk, #TI_LOCAL_FLAGS]
+THUMB(	it ne)
+	tst	\tmp, #_TLF_OOB
+	bne	fast_ret_to_user
+#endif
+	b	ret_to_user
+	.endm
+
+/*
  * These are the registers used in the syscall handler, and allow us to
  * have in theory up to 7 arguments to a function - r0 to r6.
  *
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/irq.c linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/irq.c
--- linux-5.15.26/arch/arm/kernel/irq.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/irq.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:26 @
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irqchip.h>
+#include <linux/irq_pipeline.h>
 #include <linux/random.h>
 #include <linux/smp.h>
 #include <linux/init.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:121 @ void __init init_IRQ(void)
 	uniphier_cache_init();
 }
 
+#ifdef CONFIG_IRQ_PIPELINE
+asmlinkage int __exception_irq_entry
+handle_arch_irq_pipelined(struct pt_regs *regs)
+{
+	return handle_irq_pipelined(regs);
+}
+#endif
+
 #ifdef CONFIG_SPARSE_IRQ
 int __init arch_probe_nr_irqs(void)
 {
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/irq_pipeline.c linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/irq_pipeline.c
--- linux-5.15.26/arch/arm/kernel/irq_pipeline.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/irq_pipeline.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#include <linux/irq.h>
+#include <linux/irq_pipeline.h>
+
+void arch_do_IRQ_pipelined(struct irq_desc *desc)
+{
+	struct pt_regs *regs = raw_cpu_ptr(&irq_pipeline.tick_regs);
+	struct pt_regs *old_regs = set_irq_regs(regs);
+
+	irq_enter();
+	handle_irq_desc(desc);
+	irq_exit();
+
+	set_irq_regs(old_regs);
+}
+
+void __init arch_irq_pipeline_init(void)
+{
+	/* no per-arch init. */
+}
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/Makefile linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/Makefile
--- linux-5.15.26/arch/arm/kernel/Makefile	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/Makefile	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:95 @ head-y			:= head$(MMUEXT).o
 obj-$(CONFIG_DEBUG_LL)	+= debug.o
 obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
 obj-$(CONFIG_ARM_PATCH_PHYS_VIRT)	+= phys2virt.o
+ifeq ($(CONFIG_DEBUG_LL),y)
+obj-$(CONFIG_RAW_PRINTK)	+= raw_printk.o
+endif
+
+obj-$(CONFIG_IRQ_PIPELINE)	+= irq_pipeline.o
 
 # This is executed very early using a temporary stack when no memory allocator
 # nor global data is available. Everything has to be allocated on the stack.
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/patch.c linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/patch.c
--- linux-5.15.26/arch/arm/kernel/patch.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/patch.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:20 @ struct patch {
 };
 
 #ifdef CONFIG_MMU
-static DEFINE_RAW_SPINLOCK(patch_lock);
+static DEFINE_HARD_SPINLOCK(patch_lock);
 
 static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
 {
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/process.c linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/process.c
--- linux-5.15.26/arch/arm/kernel/process.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/process.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:72 @ void arch_cpu_idle(void)
 		arm_pm_idle();
 	else
 		cpu_do_idle();
+	hard_cond_local_irq_enable();
 	raw_local_irq_enable();
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:445 @ int arch_setup_additional_pages(struct l
 	return ret;
 }
 #endif
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+/*
+ * When pipelining interrupts, we have to reconcile the hardware and
+ * the virtual states. Hard irqs are off on entry while the current
+ * stage has to be unstalled: fix this up by stalling the in-band
+ * stage on entry, unstalling on exit.
+ */
+asmlinkage void __sched arm_preempt_schedule_irq(void)
+{
+	WARN_ON_ONCE(irq_pipeline_debug() && test_inband_stall());
+	stall_inband_nocheck();
+	preempt_schedule_irq();
+	unstall_inband_nocheck();
+}
+
+#else
+
+asmlinkage void __sched arm_preempt_schedule_irq(void)
+{
+	preempt_schedule_irq();
+}
+
+#endif
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/ptrace.c linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/ptrace.c
--- linux-5.15.26/arch/arm/kernel/ptrace.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/ptrace.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:209 @ void ptrace_break(struct pt_regs *regs)
 
 static int break_trap(struct pt_regs *regs, unsigned int instr)
 {
+	oob_trap_notify(ARM_TRAP_BREAK, regs);
 	ptrace_break(regs);
+	oob_trap_unwind(ARM_TRAP_BREAK, regs);
 	return 0;
 }
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/raw_printk.c linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/raw_printk.c
--- linux-5.15.26/arch/arm/kernel/raw_printk.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/raw_printk.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/init.h>
+
+/*
+ * If both CONFIG_DEBUG_LL and CONFIG_RAW_PRINTK are set, create a
+ * console device sending the raw output to printascii().
+ */
+void printascii(const char *s);
+
+static void raw_console_write(struct console *co,
+			      const char *s, unsigned count)
+{
+	printascii(s);
+}
+
+static struct console raw_console = {
+	.name		= "rawcon",
+	.write_raw	= raw_console_write,
+	.flags		= CON_PRINTBUFFER | CON_ENABLED,
+	.index		= -1,
+};
+
+static int __init raw_console_init(void)
+{
+	register_console(&raw_console);
+
+	return 0;
+}
+console_initcall(raw_console_init);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/signal.c linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/signal.c
--- linux-5.15.26/arch/arm/kernel/signal.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/signal.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:11 @
 #include <linux/random.h>
 #include <linux/signal.h>
 #include <linux/personality.h>
+#include <linux/irq_pipeline.h>
 #include <linux/uaccess.h>
 #include <linux/tracehook.h>
 #include <linux/uprobes.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:601 @ static int do_signal(struct pt_regs *reg
 	return 0;
 }
 
+static inline void do_retuser(void)
+{
+	unsigned int thread_flags;
+
+	if (dovetailing()) {
+		thread_flags = current_thread_info()->flags;
+		if (thread_flags & _TIF_RETUSER)
+			inband_retuser_notify();
+	}
+}
+
 asmlinkage int
 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
 {
+	WARN_ON_ONCE(irq_pipeline_debug() &&
+		(irqs_disabled() || running_oob()));
+
 	/*
 	 * The assembly code enters us with IRQs off, but it hasn't
 	 * informed the tracing code of that for efficiency reasons.
 	 * Update the trace code with the current status.
 	 */
-	trace_hardirqs_off();
+	if (!irqs_pipelined())
+		trace_hardirqs_off();
 	do {
+		if (irqs_pipelined()) {
+			local_irq_disable();
+			hard_cond_local_irq_enable();
+		}
+
 		if (likely(thread_flags & _TIF_NEED_RESCHED)) {
 			schedule();
 		} else {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:640 @ do_work_pending(struct pt_regs *regs, un
 			if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
 				int restart = do_signal(regs, syscall);
 				if (unlikely(restart)) {
+					do_retuser();
 					/*
 					 * Restart without handlers.
 					 * Deal with it without leaving
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:654 @ do_work_pending(struct pt_regs *regs, un
 			} else {
 				tracehook_notify_resume(regs);
 			}
+			do_retuser();
 		}
-		local_irq_disable();
+		hard_local_irq_disable();
+
+		/* RETUSER might have switched oob */
+		if (!running_inband())
+			break;
+
 		thread_flags = current_thread_info()->flags;
-	} while (thread_flags & _TIF_WORK_MASK);
+	} while (inband_irq_pending() || (thread_flags & _TIF_WORK_MASK));
 	return 0;
 }
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/smp.c linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/smp.c
--- linux-5.15.26/arch/arm/kernel/smp.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/smp.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:83 @ enum ipi_msg_type {
 	MAX_IPI
 };
 
-static int ipi_irq_base __read_mostly;
+int ipi_irq_base __read_mostly;
 static int nr_ipi __read_mostly = NR_IPI;
 static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:328 @ void arch_cpu_idle_dead(void)
 
 	idle_task_exit();
 
-	local_irq_disable();
+	local_irq_disable_full();
 
 	/*
 	 * Flush the data out of the L1 cache for this CPU.  This must be
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:420 @ asmlinkage void secondary_start_kernel(v
 	local_flush_tlb_all();
 
 	/*
+	 * irq_pipeline: debug_smp_processor_id() accesses percpu
+	 * data.
+	 */
+	if (irqs_pipelined())
+		set_my_cpu_offset(per_cpu_offset(raw_smp_processor_id()));
+
+	/*
 	 * All kernel threads share the same mm context; grab a
 	 * reference and switch to it.
 	 */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:469 @ asmlinkage void secondary_start_kernel(v
 
 	complete(&cpu_running);
 
-	local_irq_enable();
+	local_irq_enable_full();
 	local_fiq_enable();
 	local_abt_enable();
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:544 @ static const char *ipi_types[NR_IPI] __t
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
 
+static unsigned int get_ipi_count(struct irq_desc *desc, unsigned int cpu);
+
 void show_ipi_list(struct seq_file *p, int prec)
 {
 	unsigned int cpu, i;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:557 @ void show_ipi_list(struct seq_file *p, i
 		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
 
 		for_each_online_cpu(cpu)
-			seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
+			seq_printf(p, "%10u ", get_ipi_count(ipi_desc[i], cpu));
 
 		seq_printf(p, " %s\n", ipi_types[i]);
 	}
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:610 @ static void ipi_cpu_stop(unsigned int cp
 	set_cpu_online(cpu, false);
 
 	local_fiq_disable();
-	local_irq_disable();
+	local_irq_disable_full();
 
 	while (1) {
 		cpu_relax();
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:699 @ void handle_IPI(int ipinr, struct pt_reg
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
+	/*
+	 * We don't support legacy IPI delivery when pipelining
+	 * interrupts.
+	 */
+	WARN_ON_ONCE(irqs_pipelined());
+
 	irq_enter();
 	do_handle_IPI(ipinr);
 	irq_exit();
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:712 @ void handle_IPI(int ipinr, struct pt_reg
 	set_irq_regs(old_regs);
 }
 
+static void __smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+	trace_ipi_raise(target, ipi_types[ipinr]);
+	__ipi_send_mask(ipi_desc[ipinr], target);
+}
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+static DEFINE_PER_CPU(unsigned long, ipi_messages);
+
+static DEFINE_PER_CPU(unsigned int [MAX_IPI], ipi_counts);
+
+static irqreturn_t ipi_handler(int irq, void *data)
+{
+	unsigned long *pmsg;
+	unsigned int ipinr;
+
+	/*
+	 * Decode in-band IPIs (0..MAX_IPI - 1) multiplexed over
+	 * SGI0. Out-of-band IPIs (SGI1, SGI2) have their own
+	 * individual handler.
+	 */
+	pmsg = raw_cpu_ptr(&ipi_messages);
+	while (*pmsg) {
+		ipinr = ffs(*pmsg) - 1;
+		clear_bit(ipinr, pmsg);
+		__this_cpu_inc(ipi_counts[ipinr]);
+		do_handle_IPI(ipinr);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+	unsigned int cpu;
+
+	/* regular in-band IPI (multiplexed over SGI0). */
+	for_each_cpu(cpu, target)
+		set_bit(ipinr, &per_cpu(ipi_messages, cpu));
+
+	wmb();
+	__smp_cross_call(target, 0);
+}
+
+static unsigned int get_ipi_count(struct irq_desc *desc, unsigned int cpu)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	return per_cpu(ipi_counts[irq - ipi_irq_base], cpu);
+}
+
+void irq_send_oob_ipi(unsigned int irq,
+		const struct cpumask *cpumask)
+{
+	unsigned int sgi = irq - ipi_irq_base;
+
+	if (WARN_ON(irq_pipeline_debug() &&
+		    (sgi < OOB_IPI_OFFSET ||
+		     sgi >= OOB_IPI_OFFSET + OOB_NR_IPI)))
+		return;
+
+	/* Out-of-band IPI (SGI1-2). */
+	__smp_cross_call(cpumask, sgi);
+}
+EXPORT_SYMBOL_GPL(irq_send_oob_ipi);
+
+#else
+
 static irqreturn_t ipi_handler(int irq, void *data)
 {
 	do_handle_IPI(irq - ipi_irq_base);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:788 @ static irqreturn_t ipi_handler(int irq,
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
 {
-	trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
-	__ipi_send_mask(ipi_desc[ipinr], target);
+	__smp_cross_call(target, ipinr);
+}
+
+static unsigned int get_ipi_count(struct irq_desc *desc, unsigned int cpu)
+{
+	return irq_desc_kstat_cpu(desc, cpu);
 }
 
+#endif /* CONFIG_IRQ_PIPELINE */
+
 static void ipi_setup(int cpu)
 {
 	int i;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:811 @ static void ipi_setup(int cpu)
 
 void __init set_smp_ipi_range(int ipi_base, int n)
 {
-	int i;
+	int i, inband_nr_ipi;
 
 	WARN_ON(n < MAX_IPI);
 	nr_ipi = min(n, MAX_IPI);
+	/*
+	 * irq_pipeline: the in-band stage traps SGI0 only,
+	 * over which IPI messages are mutiplexed. Other SGIs
+	 * are available for exchanging out-of-band IPIs.
+	 */
+	inband_nr_ipi = irqs_pipelined() ? 1 : nr_ipi;
 
 	for (i = 0; i < nr_ipi; i++) {
-		int err;
-
-		err = request_percpu_irq(ipi_base + i, ipi_handler,
-					 "IPI", &irq_stat);
-		WARN_ON(err);
+		if (i < inband_nr_ipi) {
+			int err;
 
+			err = request_percpu_irq(ipi_base + i, ipi_handler,
+						"IPI", &irq_stat);
+			WARN_ON(err);
+		}
 		ipi_desc[i] = irq_to_desc(ipi_base + i);
 		irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
 	}
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/smp_twd.c linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/smp_twd.c
--- linux-5.15.26/arch/arm/kernel/smp_twd.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/smp_twd.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:34 @ static DEFINE_PER_CPU(bool, percpu_setup
 
 static struct clock_event_device __percpu *twd_evt;
 static unsigned int twd_features =
-		CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+		CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PIPELINE;
 static int twd_ppi;
 
 static int twd_shutdown(struct clock_event_device *clk)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:185 @ static irqreturn_t twd_handler(int irq,
 	struct clock_event_device *evt = dev_id;
 
 	if (twd_timer_ack()) {
-		evt->event_handler(evt);
+		clockevents_handle_event(evt);
 		return IRQ_HANDLED;
 	}
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:282 @ static int __init twd_local_timer_common
 		goto out_free;
 	}
 
-	err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt);
+	err = __request_percpu_irq(twd_ppi, twd_handler,
+				   IRQF_TIMER, "twd", twd_evt);
 	if (err) {
 		pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err);
 		goto out_free;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/traps.c linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/traps.c
--- linux-5.15.26/arch/arm/kernel/traps.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/traps.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:394 @ int is_valid_bugaddr(unsigned long pc)
 #endif
 
 static LIST_HEAD(undef_hook);
-static DEFINE_RAW_SPINLOCK(undef_lock);
+static DEFINE_HARD_SPINLOCK(undef_lock);
 
 void register_undef_hook(struct undef_hook *hook)
 {
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/kernel/vdso.c linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/vdso.c
--- linux-5.15.26/arch/arm/kernel/vdso.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/kernel/vdso.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:35 @ static struct page **vdso_text_pagelist;
 
 extern char vdso_start[], vdso_end[];
 
-/* Total number of pages needed for the data and text portions of the VDSO. */
+/*
+ * Total number of pages needed for the data, private and text
+ * portions of the VDSO.
+ */
 unsigned int vdso_total_pages __ro_after_init;
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:177 @ static void __init patch_vdso(void *ehdr
 	/* If the virtual counter is absent or non-functional we don't
 	 * want programs to incur the slight additional overhead of
 	 * dispatching through the VDSO only to fall back to syscalls.
+	 * However, if clocksources supporting generic MMIO access can
+	 * be reached via the vDSO, keep this fast path enabled.
 	 */
-	if (!cntvct_ok) {
+	if (!cntvct_ok && !IS_ENABLED(CONFIG_GENERIC_CLOCKSOURCE_VDSO)) {
 		vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
 		vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
 		vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64");
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:218 @ static int __init vdso_init(void)
 
 	vdso_text_mapping.pages = vdso_text_pagelist;
 
-	vdso_total_pages = 1; /* for the data/vvar page */
+	vdso_total_pages = 2; /* for the data/vvar and vpriv pages */
 	vdso_total_pages += text_pages;
 
 	cntvct_ok = cntvct_functional();
 
 	patch_vdso(vdso_start);
+#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO
+	vdso_data->cs_type_seq = CLOCKSOURCE_VDSO_NONE << 16 | 1;
+#endif
 
 	return 0;
 }
 arch_initcall(vdso_init);
 
+static int install_vpriv(struct mm_struct *mm, unsigned long addr)
+{
+	return mmap_region(NULL, addr, PAGE_SIZE,
+			  VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
+			   0, NULL) != addr ? -EINVAL : 0;
+}
+
 static int install_vvar(struct mm_struct *mm, unsigned long addr)
 {
 	struct vm_area_struct *vma;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:246 @ static int install_vvar(struct mm_struct
 	vma = _install_special_mapping(mm, addr, PAGE_SIZE,
 				       VM_READ | VM_MAYREAD,
 				       &vdso_data_mapping);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	if (cache_is_vivt())
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
-	return PTR_ERR_OR_ZERO(vma);
+	return vma->vm_start != addr ? -EINVAL : 0;
 }
 
 /* assumes mmap_lock is write-locked */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:266 @ void arm_install_vdso(struct mm_struct *
 	if (vdso_text_pagelist == NULL)
 		return;
 
-	if (install_vvar(mm, addr))
+	if (install_vpriv(mm, addr)) {
+		pr_err("cannot map VPRIV at expected address!\n");
 		return;
+	}
+
+	/* Account for the private storage. */
+	addr += PAGE_SIZE;
+	if (install_vvar(mm, addr)) {
+		WARN(1, "cannot map VVAR at expected address!\n");
+		return;
+	}
 
-	/* Account for vvar page. */
+	/* Account for vvar and vpriv pages. */
 	addr += PAGE_SIZE;
-	len = (vdso_total_pages - 1) << PAGE_SHIFT;
+	len = (vdso_total_pages - 2) << PAGE_SHIFT;
 
 	vma = _install_special_mapping(mm, addr, len,
 		VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
 		&vdso_text_mapping);
 
-	if (!IS_ERR(vma))
+	if (IS_ERR(vma) || vma->vm_start != addr)
+		WARN(1, "cannot map VDSO at expected address!\n");
+	else
 		mm->context.vdso = addr;
 }
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/mach-imx/gpc.c linux-dovetail-v5.15.y-dovetail/arch/arm/mach-imx/gpc.c
--- linux-5.15.26/arch/arm/mach-imx/gpc.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/mach-imx/gpc.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:65 @ void imx_gpc_set_l2_mem_power_in_lpm(boo
 void imx_gpc_pre_suspend(bool arm_power_off)
 {
 	void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
+	unsigned long flags;
 	int i;
 
 	/* Tell GPC to power off ARM core when suspend */
 	if (arm_power_off)
 		imx_gpc_set_arm_power_in_lpm(arm_power_off);
 
+	flags = hard_cond_local_irq_save();
+
 	for (i = 0; i < IMR_NUM; i++) {
 		gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4);
 		writel_relaxed(~gpc_wake_irqs[i], reg_imr1 + i * 4);
 	}
+
+	hard_cond_local_irq_restore(flags);
 }
 
 void imx_gpc_post_resume(void)
 {
 	void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
+	unsigned long flags;
 	int i;
 
 	/* Keep ARM core powered on for other low-power modes */
 	imx_gpc_set_arm_power_in_lpm(false);
 
+	flags = hard_cond_local_irq_save();
+
 	for (i = 0; i < IMR_NUM; i++)
 		writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4);
+
+	hard_cond_local_irq_restore(flags);
 }
 
 static int imx_gpc_irq_set_wake(struct irq_data *d, unsigned int on)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:118 @ static int imx_gpc_irq_set_wake(struct i
 void imx_gpc_mask_all(void)
 {
 	void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
+	unsigned long flags;
 	int i;
 
+	flags = hard_cond_local_irq_save();
+
 	for (i = 0; i < IMR_NUM; i++) {
 		gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4);
 		writel_relaxed(~0, reg_imr1 + i * 4);
 	}
+
+	hard_cond_local_irq_restore(flags);
 }
 
 void imx_gpc_restore_all(void)
 {
 	void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
+	unsigned long flags;
 	int i;
 
+	flags = hard_cond_local_irq_save();
+
 	for (i = 0; i < IMR_NUM; i++)
 		writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4);
+
+	hard_cond_local_irq_restore(flags);
 }
 
 void imx_gpc_hwirq_unmask(unsigned int hwirq)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:190 @ static struct irq_chip imx_gpc_chip = {
 #ifdef CONFIG_SMP
 	.irq_set_affinity	= irq_chip_set_affinity_parent,
 #endif
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static int imx_gpc_domain_translate(struct irq_domain *d,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/mm/alignment.c linux-dovetail-v5.15.y-dovetail/arch/arm/mm/alignment.c
--- linux-5.15.26/arch/arm/mm/alignment.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/mm/alignment.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:22 @
 #include <linux/init.h>
 #include <linux/sched/signal.h>
 #include <linux/uaccess.h>
+#include <linux/dovetail.h>
 
 #include <asm/cp15.h>
 #include <asm/system_info.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:811 @ do_alignment(unsigned long addr, unsigne
 	u16 tinstr = 0;
 	int isize = 4;
 	int thumb2_32b = 0;
-	int fault;
+	int fault, ret = 0;
 
 	if (interrupts_enabled(regs))
-		local_irq_enable();
+		hard_local_irq_enable();
+
+	oob_trap_notify(ARM_TRAP_ALIGNMENT, regs);
 
 	instrptr = instruction_pointer(regs);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:941 @ do_alignment(unsigned long addr, unsigne
 	if (type == TYPE_LDST)
 		do_alignment_finish_ldst(addr, instr, regs, offset);
 
-	return 0;
+	goto out;
 
  bad_or_fault:
 	if (type == TYPE_ERROR)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:950 @ do_alignment(unsigned long addr, unsigne
 	 * We got a fault - fix it up, or die.
 	 */
 	do_bad_area(addr, fsr, regs);
-	return 0;
+	goto out;
 
  swp:
 	pr_err("Alignment trap: not handling swp instruction\n");
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:964 @ do_alignment(unsigned long addr, unsigne
 		isize << 1,
 		isize == 2 ? tinstr : instr, instrptr);
 	ai_skipped += 1;
-	return 1;
+	ret = 1;
+	goto out;
 
  user:
 	ai_user += 1;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:996 @ do_alignment(unsigned long addr, unsigne
 		 * entry-common.S) and disable the alignment trap only if
 		 * there is no work pending for this thread.
 		 */
-		raw_local_irq_disable();
+		hard_local_irq_disable();
 		if (!(current_thread_info()->flags & _TIF_WORK_MASK))
 			set_cr(cr_no_alignment);
 	}
 
-	return 0;
+out:
+	oob_trap_unwind(ARM_TRAP_ALIGNMENT, regs);
+
+	return ret;
 }
 
 static int __init noalign_setup(char *__unused)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/mm/cache-l2x0.c linux-dovetail-v5.15.y-dovetail/arch/arm/mm/cache-l2x0.c
--- linux-5.15.26/arch/arm/mm/cache-l2x0.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/mm/cache-l2x0.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:41 @ struct l2c_init_data {
 
 static void __iomem *l2x0_base;
 static const struct l2c_init_data *l2x0_data;
-static DEFINE_RAW_SPINLOCK(l2x0_lock);
+static DEFINE_HARD_SPINLOCK(l2x0_lock);
 static u32 l2x0_way_mask;	/* Bitmask of active ways */
 static u32 l2x0_size;
 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:51 @ struct l2x0_regs l2x0_saved_regs;
 static bool l2x0_bresp_disable;
 static bool l2x0_flz_disable;
 
+#ifdef CONFIG_IRQ_PIPELINE
+#define CACHE_RANGE_ATOMIC_MAX	512UL
+static int l2x0_wa = -1;
+static int __init l2x0_setup_wa(char *str)
+{
+	l2x0_wa = !!simple_strtol(str, NULL, 0);
+	return 0;
+}
+early_param("l2x0_write_allocate", l2x0_setup_wa);
+#else
+#define CACHE_RANGE_ATOMIC_MAX	4096UL
+#endif
+
 /*
  * Common code for all cache controllers.
  */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:136 @ static void l2c_enable(void __iomem *bas
 
 	l2x0_data->unlock(base, num_lock);
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	__l2c_op_way(base + L2X0_INV_WAY);
 	writel_relaxed(0, base + sync_reg_offset);
 	l2c_wait_mask(base + sync_reg_offset, 1);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:241 @ static void l2c210_flush_all(void)
 {
 	void __iomem *base = l2x0_base;
 
-	BUG_ON(!irqs_disabled());
+	BUG_ON(!hard_irqs_disabled());
 
 	__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
 	__l2c210_cache_sync(base);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:300 @ static void l2c220_op_way(void __iomem *
 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
 	unsigned long end, unsigned long flags)
 {
-	raw_spinlock_t *lock = &l2x0_lock;
+	typeof(l2x0_lock) *lock = &l2x0_lock;
 
 	while (start < end) {
-		unsigned long blk_end = start + min(end - start, 4096UL);
+		unsigned long blk_end = start + min(end - start, CACHE_RANGE_ATOMIC_MAX);
 
 		while (start < blk_end) {
 			l2c_wait_mask(reg, 1);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:514 @ static void l2c310_inv_range_erratum(uns
 
 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
 {
-	raw_spinlock_t *lock = &l2x0_lock;
+	typeof(l2x0_lock) *lock = &l2x0_lock;
 	unsigned long flags;
 	void __iomem *base = l2x0_base;
 
 	raw_spin_lock_irqsave(lock, flags);
 	while (start < end) {
-		unsigned long blk_end = start + min(end - start, 4096UL);
+		unsigned long blk_end = start + min(end - start, CACHE_RANGE_ATOMIC_MAX);
 
 		l2c_set_debug(base, 0x03);
 		while (start < blk_end) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:816 @ static int __init __l2c_init(const struc
 	if (aux_val & aux_mask)
 		pr_alert("L2C: platform provided aux values permit register corruption.\n");
 
+#ifdef CONFIG_IRQ_PIPELINE
+	if (!l2x0_wa) {
+		/*
+		 * Disable WA by setting bit 23 in the auxiliary
+		 * control register.
+		 */
+		aux_mask &= ~L220_AUX_CTRL_FWA_MASK;
+		aux_val &= ~L220_AUX_CTRL_FWA_MASK;
+		aux_val |= 1 << L220_AUX_CTRL_FWA_SHIFT;
+		pr_warn("%s: irq_pipeline: write-allocate disabled via command line\n",
+			data->type);
+	} else if ((cache_id & L2X0_CACHE_ID_PART_MASK) == L2X0_CACHE_ID_PART_L220 ||
+		   ((cache_id & L2X0_CACHE_ID_PART_MASK) == L2X0_CACHE_ID_PART_L310 &&
+		    (cache_id & L2X0_CACHE_ID_RTL_MASK) < L310_CACHE_ID_RTL_R3P2))
+		pr_alert("%s: irq_pipeline: write-allocate enabled, may induce high latency\n",
+			 data->type);
+#endif
+
 	old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 	aux &= aux_mask;
 	aux |= aux_val;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/mm/context.c linux-dovetail-v5.15.y-dovetail/arch/arm/mm/context.c
--- linux-5.15.26/arch/arm/mm/context.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/mm/context.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:42 @
 #define ASID_FIRST_VERSION	(1ULL << ASID_BITS)
 #define NUM_USER_ASIDS		ASID_FIRST_VERSION
 
-static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+static DEFINE_HARD_SPINLOCK(cpu_asid_lock);
 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:240 @ static u64 new_context(struct mm_struct
 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
 {
 	unsigned long flags;
-	unsigned int cpu = smp_processor_id();
+	unsigned int cpu = raw_smp_processor_id();
+	bool need_flush;
 	u64 asid;
 
+	WARN_ON_ONCE(dovetail_debug() && !hard_irqs_disabled());
+
 	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
 		__check_vmalloc_seq(mm);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:269 @ void check_and_switch_context(struct mm_
 		atomic64_set(&mm->context.id, asid);
 	}
 
-	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
-		local_flush_bp_all();
-		local_flush_tlb_all();
-	}
-
+	need_flush = cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending);
 	atomic64_set(&per_cpu(active_asids, cpu), asid);
 	cpumask_set_cpu(cpu, mm_cpumask(mm));
 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 
+	if (need_flush) {
+		local_flush_bp_all();
+		local_flush_tlb_all();
+	}
+
 switch_mm_fastpath:
 	cpu_switch_mm(mm->pgd, mm);
 }
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/mm/fault.c linux-dovetail-v5.15.y-dovetail/arch/arm/mm/fault.c
--- linux-5.15.26/arch/arm/mm/fault.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/mm/fault.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:12 @
 #include <linux/signal.h>
 #include <linux/mm.h>
 #include <linux/hardirq.h>
+#include <linux/irq_pipeline.h>
 #include <linux/init.h>
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:25 @
 #include <asm/system_misc.h>
 #include <asm/system_info.h>
 #include <asm/tlbflush.h>
+#include <asm/dovetail.h>
+#define CREATE_TRACE_POINTS
+#include <asm/trace/exceptions.h>
 
 #include "fault.h"
 
 #ifdef CONFIG_MMU
 
+#ifdef CONFIG_IRQ_PIPELINE
+/*
+ * We need to synchronize the virtual interrupt state with the hard
+ * interrupt state we received on entry, then turn hardirqs back on to
+ * allow code which does not require strict serialization to be
+ * preempted by an out-of-band activity.
+ */
+static inline
+unsigned long fault_entry(int exception, struct pt_regs *regs)
+{
+	unsigned long flags;
+
+	trace_ARM_trap_entry(exception, regs);
+
+	flags = hard_local_save_flags();
+
+	oob_trap_notify(exception, regs);
+
+	/*
+	 * CAUTION: The co-kernel might have to demote the current
+	 * context to the in-band stage as a result of handling this
+	 * trap, returning with hard irqs on. We expect stall_inband()
+	 * to complain loudly if we are still running oob afterwards.
+	 */
+	if (raw_irqs_disabled_flags(flags)) {
+		stall_inband();
+		trace_hardirqs_off();
+	}
+
+	hard_local_irq_enable();
+
+	return flags;
+}
+
+static inline
+void fault_exit(int exception, struct pt_regs *regs,
+		unsigned long flags)
+{
+	WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled());
+
+	/*
+	 * We expect kentry_exit_pipelined() to clear the stall bit if
+	 * kentry_enter_pipelined() observed it that way.
+	 */
+	oob_trap_unwind(exception, regs);
+	trace_ARM_trap_exit(exception, regs);
+	hard_local_irq_restore(flags);
+}
+
+#else	/* !CONFIG_IRQ_PIPELINE */
+
+#define fault_entry(__exception, __regs)  ({ 0; })
+#define fault_exit(__exception, __regs, __flags)  \
+	do { (void)(__flags); } while (0)
+
+#endif	/* !CONFIG_IRQ_PIPELINE */
+
 /*
  * This is useful to dump out the page tables associated with
  * 'addr' in mm 'mm'.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:160 @ void show_pte(const char *lvl, struct mm
 	pr_cont("\n");
 }
 #else					/* CONFIG_MMU */
+unsigned long fault_entry(int exception, struct pt_regs *regs)
+{
+	return 0;
+}
+
+static inline void fault_exit(int exception, struct pt_regs *regs,
+			unsigned long combo)
+{ }
+
 void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
 { }
 #endif					/* CONFIG_MMU */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:189 @ __do_kernel_fault(struct mm_struct *mm,
 	/*
 	 * No handler, we'll have to terminate things with extreme prejudice.
 	 */
+	irq_pipeline_oops();
 	bust_spinlocks(1);
 	pr_alert("8<--- cut here ---\n");
 	pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:242 @ void do_bad_area(unsigned long addr, uns
 {
 	struct task_struct *tsk = current;
 	struct mm_struct *mm = tsk->active_mm;
+	unsigned long irqflags;
 
 	/*
 	 * If we are in kernel mode at this point, we
 	 * have no context to handle this fault with.
 	 */
-	if (user_mode(regs))
+	  if (user_mode(regs)) {
+		irqflags = fault_entry(ARM_TRAP_ACCESS, regs);
 		__do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
-	else
+		fault_exit(ARM_TRAP_ACCESS, regs, irqflags);
+	  } else
+		/*
+		 * irq_pipeline: kernel faults are either quickly
+		 * recoverable via fixup, or lethal. In both cases, we
+		 * can skip the interrupt state synchronization.
+		 */
 		__do_kernel_fault(mm, addr, fsr, regs);
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:326 @ do_page_fault(unsigned long addr, unsign
 	int sig, code;
 	vm_fault_t fault;
 	unsigned int flags = FAULT_FLAG_DEFAULT;
+	unsigned long irqflags;
+
+	irqflags = fault_entry(ARM_TRAP_ACCESS, regs);
 
 	if (kprobe_page_fault(regs, fsr))
-		return 0;
+		goto out;
 
 	tsk = current;
 	mm  = tsk->mm;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:387 @ retry:
 	if (fault_signal_pending(fault, regs)) {
 		if (!user_mode(regs))
 			goto no_context;
-		return 0;
+		goto out;
 	}
 
 	if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:403 @ retry:
 	 * Handle the "normal" case first - VM_FAULT_MAJOR
 	 */
 	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
-		return 0;
+		goto out;
 
 	/*
 	 * If we are in kernel mode at this point, we
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:419 @ retry:
 		 * got oom-killed)
 		 */
 		pagefault_out_of_memory();
-		return 0;
+		goto out;
 	}
 
 	if (fault & VM_FAULT_SIGBUS) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:440 @ retry:
 	}
 
 	__do_user_fault(addr, fsr, sig, code, regs);
-	return 0;
+	goto out;
 
 no_context:
 	__do_kernel_fault(mm, addr, fsr, regs);
+out:
+	fault_exit(ARM_TRAP_ACCESS, regs, irqflags);
+
 	return 0;
 }
 #else					/* CONFIG_MMU */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:485 @ do_translation_fault(unsigned long addr,
 	pud_t *pud, *pud_k;
 	pmd_t *pmd, *pmd_k;
 
+	WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled());
+
 	if (addr < TASK_SIZE)
 		return do_page_fault(addr, fsr, regs);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:560 @ do_translation_fault(unsigned long addr,
 static int
 do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
+	unsigned long irqflags;
+
+	irqflags = fault_entry(ARM_TRAP_SECTION, regs);
 	do_bad_area(addr, fsr, regs);
+	fault_exit(ARM_TRAP_SECTION, regs, irqflags);
 	return 0;
 }
 #endif /* CONFIG_ARM_LPAE */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:612 @ asmlinkage void
 do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
 	const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
+	unsigned long irqflags;
 
 	if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
 		return;
 
+	irqflags = fault_entry(ARM_TRAP_DABT, regs);
 	pr_alert("8<--- cut here ---\n");
 	pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
 		inf->name, fsr, addr);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:625 @ do_DataAbort(unsigned long addr, unsigne
 
 	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
 		       fsr, 0);
+	fault_exit(ARM_TRAP_DABT, regs, irqflags);
 }
 
 void __init
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:645 @ asmlinkage void
 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
 {
 	const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
+	unsigned long irqflags;
 
 	if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
 		return;
 
+	irqflags = fault_entry(ARM_TRAP_PABT, regs);
 	pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
 		inf->name, ifsr, addr);
 
 	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
 		       ifsr, 0);
+	fault_exit(ARM_TRAP_PABT, regs, irqflags);
 }
 
 /*
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/vdso/datapage.S linux-dovetail-v5.15.y-dovetail/arch/arm/vdso/datapage.S
--- linux-5.15.26/arch/arm/vdso/datapage.S	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/vdso/datapage.S	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:8 @
 	.align 2
 .L_vdso_data_ptr:
 	.long	_start - . - VDSO_DATA_SIZE
+.L_vdso_priv_ptr:
+	.long	_start - . - VDSO_DATA_SIZE - VDSO_PRIV_SIZE
 
 ENTRY(__get_datapage)
 	.fnstart
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:19 @ ENTRY(__get_datapage)
 	bx	lr
 	.fnend
 ENDPROC(__get_datapage)
+
+ENTRY(__get_privpage)
+	.fnstart
+	adr	r0, .L_vdso_priv_ptr
+	ldr	r1, [r0]
+	add	r0, r0, r1
+	bx	lr
+	.fnend
+ENDPROC(__get_privpage)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/vfp/entry.S linux-dovetail-v5.15.y-dovetail/arch/arm/vfp/entry.S
--- linux-5.15.26/arch/arm/vfp/entry.S	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/vfp/entry.S	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:26 @
 @
 ENTRY(do_vfp)
 	inc_preempt_count r10, r4
+	disable_irq_if_pipelined
  	ldr	r4, .LCvfp
 	ldr	r11, [r10, #TI_CPU]	@ CPU number
 	add	r10, r10, #TI_VFPSTATE	@ r10 = workspace
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:34 @ ENTRY(do_vfp)
 ENDPROC(do_vfp)
 
 ENTRY(vfp_null_entry)
+	enable_irq_if_pipelined
 	dec_preempt_count_ti r10, r4
 	ret	lr
 ENDPROC(vfp_null_entry)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/vfp/vfphw.S linux-dovetail-v5.15.y-dovetail/arch/arm/vfp/vfphw.S
--- linux-5.15.26/arch/arm/vfp/vfphw.S	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/vfp/vfphw.S	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:173 @ vfp_hw_state_valid:
 					@ out before setting an FPEXC that
 					@ stops us reading stuff
 	VFPFMXR	FPEXC, r1		@ Restore FPEXC last
+	enable_irq_if_pipelined
 	sub	r2, r2, #4		@ Retry current instruction - if Thumb
 	str	r2, [sp, #S_PC]		@ mode it's two 16-bit instructions,
 					@ else it's one 32-bit instruction, so
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:203 @ skip:
 	@ Fall into hand on to next handler - appropriate coproc instr
 	@ not recognised by VFP
 
+	enable_irq_if_pipelined
 	DBGSTR	"not VFP"
 	dec_preempt_count_ti r10, r4
 	ret	lr
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm/vfp/vfpmodule.c linux-dovetail-v5.15.y-dovetail/arch/arm/vfp/vfpmodule.c
--- linux-5.15.26/arch/arm/vfp/vfpmodule.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm/vfp/vfpmodule.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:21 @
 #include <linux/uaccess.h>
 #include <linux/user.h>
 #include <linux/export.h>
+#include <linux/smp.h>
 
 #include <asm/cp15.h>
 #include <asm/cputype.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:94 @ static void vfp_force_reload(unsigned in
 static void vfp_thread_flush(struct thread_info *thread)
 {
 	union vfp_state *vfp = &thread->vfpstate;
+	unsigned long flags;
 	unsigned int cpu;
 
 	/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:105 @ static void vfp_thread_flush(struct thre
 	 * Do this first to ensure that preemption won't overwrite our
 	 * state saving should access to the VFP be enabled at this point.
 	 */
-	cpu = get_cpu();
+	cpu = hard_get_cpu(flags);
 	if (vfp_current_hw_state[cpu] == vfp)
 		vfp_current_hw_state[cpu] = NULL;
 	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
-	put_cpu();
+	hard_put_cpu(flags);
 
 	memset(vfp, 0, sizeof(union vfp_state));
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:124 @ static void vfp_thread_exit(struct threa
 {
 	/* release case: Per-thread VFP cleanup. */
 	union vfp_state *vfp = &thread->vfpstate;
-	unsigned int cpu = get_cpu();
+	unsigned long flags;
+	unsigned int cpu = hard_get_cpu(flags);
 
 	if (vfp_current_hw_state[cpu] == vfp)
 		vfp_current_hw_state[cpu] = NULL;
-	put_cpu();
+	hard_put_cpu(flags);
 }
 
 static void vfp_thread_copy(struct thread_info *thread)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:165 @ static void vfp_thread_copy(struct threa
 static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
 {
 	struct thread_info *thread = v;
+	unsigned long flags;
 	u32 fpexc;
 #ifdef CONFIG_SMP
 	unsigned int cpu;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:173 @ static int vfp_notifier(struct notifier_
 
 	switch (cmd) {
 	case THREAD_NOTIFY_SWITCH:
+		flags = hard_cond_local_irq_save();
 		fpexc = fmrx(FPEXC);
 
 #ifdef CONFIG_SMP
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:193 @ static int vfp_notifier(struct notifier_
 		 * old state.
 		 */
 		fmxr(FPEXC, fpexc & ~FPEXC_EN);
+		hard_cond_local_irq_restore(flags);
 		break;
 
 	case THREAD_NOTIFY_FLUSH:
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:331 @ static u32 vfp_emulate_instruction(u32 i
  */
 void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
 {
-	u32 fpscr, orig_fpscr, fpsid, exceptions;
+	u32 fpscr, orig_fpscr, fpsid, exceptions, next_trigger = 0;
 
 	pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:361 @ void VFP_bounce(u32 trigger, u32 fpexc,
 		/*
 		 * Synchronous exception, emulate the trigger instruction
 		 */
+		hard_cond_local_irq_enable();
 		goto emulate;
 	}
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:374 @ void VFP_bounce(u32 trigger, u32 fpexc,
 		trigger = fmrx(FPINST);
 		regs->ARM_pc -= 4;
 #endif
-	} else if (!(fpexc & FPEXC_DEX)) {
+		if (fpexc & FPEXC_FP2V) {
+			/*
+			 * The barrier() here prevents fpinst2 being read
+			 * before the condition above.
+			 */
+			barrier();
+			next_trigger = fmrx(FPINST2);
+		}
+	}
+	hard_cond_local_irq_enable();
+
+	if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
 		/*
 		 * Illegal combination of bits. It can be caused by an
 		 * unallocated VFP instruction but with FPSCR.IXE set and not
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:425 @ void VFP_bounce(u32 trigger, u32 fpexc,
 	if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
 		goto exit;
 
-	/*
-	 * The barrier() here prevents fpinst2 being read
-	 * before the condition above.
-	 */
-	barrier();
-	trigger = fmrx(FPINST2);
+	trigger = next_trigger;
 
  emulate:
 	exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
 	if (exceptions)
 		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
  exit:
+	hard_cond_local_irq_enable();
 	preempt_enable();
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:532 @ static inline void vfp_pm_init(void) { }
  */
 void vfp_sync_hwstate(struct thread_info *thread)
 {
-	unsigned int cpu = get_cpu();
+	unsigned long flags;
+	unsigned int cpu = hard_get_cpu(flags);
 
 	if (vfp_state_in_hw(cpu, thread)) {
 		u32 fpexc = fmrx(FPEXC);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:546 @ void vfp_sync_hwstate(struct thread_info
 		fmxr(FPEXC, fpexc);
 	}
 
-	put_cpu();
+	hard_put_cpu(flags);
 }
 
 /* Ensure that the thread reloads the hardware VFP state on the next use. */
 void vfp_flush_hwstate(struct thread_info *thread)
 {
-	unsigned int cpu = get_cpu();
+	unsigned long flags;
+	unsigned int cpu = hard_get_cpu(flags);
 
 	vfp_force_reload(cpu, thread);
 
-	put_cpu();
+	hard_put_cpu(flags);
 }
 
 /*
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b-nobt.dts linux-dovetail-v5.15.y-dovetail/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b-nobt.dts
--- linux-5.15.26/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b-nobt.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b-nobt.dts	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/dts-v1/;
+#include "bcm2837-rpi-3-b.dts"
+
+&uart0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_gpio32>;
+};
+
+&uart1 {
+	status = "disabled";
+};
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/boot/dts/broadcom/Makefile linux-dovetail-v5.15.y-dovetail/arch/arm64/boot/dts/broadcom/Makefile
--- linux-5.15.26/arch/arm64/boot/dts/broadcom/Makefile	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/boot/dts/broadcom/Makefile	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:7 @ dtb-$(CONFIG_ARCH_BCM2835) += bcm2711-rp
 			      bcm2837-rpi-3-a-plus.dtb \
 			      bcm2837-rpi-3-b.dtb \
 			      bcm2837-rpi-3-b-plus.dtb \
+			      bcm2837-rpi-3-b-nobt.dtb \
 			      bcm2837-rpi-cm3-io3.dtb
 
 subdir-y	+= bcm4908
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/include/asm/daifflags.h linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/daifflags.h
--- linux-5.15.26/arch/arm64/include/asm/daifflags.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/daifflags.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:15 @
 #include <asm/cpufeature.h>
 #include <asm/ptrace.h>
 
+/*
+ * irq_pipeline: DAIF masking is only used in contexts where hard
+ * interrupt masking applies, so no need to virtualize for the inband
+ * stage here (the pipeline core does assume this).
+ */
+
 #define DAIF_PROCCTX		0
 #define DAIF_PROCCTX_NOIRQ	(PSR_I_BIT | PSR_F_BIT)
 #define DAIF_ERRCTX		(PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:44 @ static inline void local_daif_mask(void)
 	if (system_uses_irq_prio_masking())
 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 
-	trace_hardirqs_off();
+	trace_hardirqs_off_pipelined();
 }
 
 static inline unsigned long local_daif_save_flags(void)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:81 @ static inline void local_daif_restore(un
 		(read_sysreg(daif) & (PSR_I_BIT | PSR_F_BIT)) != (PSR_I_BIT | PSR_F_BIT));
 
 	if (!irq_disabled) {
-		trace_hardirqs_on();
+		trace_hardirqs_on_pipelined();
 
 		if (system_uses_irq_prio_masking()) {
 			gic_write_pmr(GIC_PRIO_IRQON);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:126 @ static inline void local_daif_restore(un
 	write_sysreg(flags, daif);
 
 	if (irq_disabled)
-		trace_hardirqs_off();
+		trace_hardirqs_off_pipelined();
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:138 @ static inline void local_daif_inherit(st
 	unsigned long flags = regs->pstate & DAIF_MASK;
 
 	if (interrupts_enabled(regs))
-		trace_hardirqs_on();
+		trace_hardirqs_on_pipelined();
 
 	if (system_uses_irq_prio_masking())
 		gic_write_pmr(regs->pmr_save);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/include/asm/dovetail.h linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/dovetail.h
--- linux-5.15.26/arch/arm64/include/asm/dovetail.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/dovetail.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#ifndef _ASM_ARM64_DOVETAIL_H
+#define _ASM_ARM64_DOVETAIL_H
+
+#include <asm/fpsimd.h>
+
+/* ARM64 traps */
+#define ARM64_TRAP_ACCESS	0	/* Data or instruction access exception */
+#define ARM64_TRAP_ALIGN	1	/* SP/PC alignment abort */
+#define ARM64_TRAP_SEA		2	/* Synchronous external abort */
+#define ARM64_TRAP_DEBUG	3	/* Debug trap */
+#define ARM64_TRAP_UNDI		4	/* Undefined instruction */
+#define ARM64_TRAP_UNDSE	5	/* Undefined synchronous exception */
+#define ARM64_TRAP_FPE		6	/* FPSIMD exception */
+#define ARM64_TRAP_SVE		7	/* SVE access trap */
+#define ARM64_TRAP_BTI		8	/* Branch target identification */
+
+#ifdef CONFIG_DOVETAIL
+
+static inline void arch_dovetail_exec_prepare(void)
+{ }
+
+static inline void arch_dovetail_switch_prepare(bool leave_inband)
+{ }
+
+static inline void arch_dovetail_switch_finish(bool enter_inband)
+{
+	fpsimd_restore_current_oob();
+}
+
+#endif
+
+#endif /* _ASM_ARM64_DOVETAIL_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/include/asm/efi.h linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/efi.h
--- linux-5.15.26/arch/arm64/include/asm/efi.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/efi.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:105 @ static inline void free_screen_info(stru
 
 static inline void efi_set_pgd(struct mm_struct *mm)
 {
+	unsigned long flags;
+
+	protect_inband_mm(flags);
+
 	__switch_mm(mm);
 
 	if (system_uses_ttbr0_pan()) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:133 @ static inline void efi_set_pgd(struct mm
 			update_saved_ttbr0(current, current->active_mm);
 		}
 	}
+
+	unprotect_inband_mm(flags);
 }
 
 void efi_virtmap_load(void);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/include/asm/fpsimd.h linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/fpsimd.h
--- linux-5.15.26/arch/arm64/include/asm/fpsimd.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/fpsimd.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:46 @ extern void fpsimd_flush_thread(void);
 extern void fpsimd_signal_preserve_current_state(void);
 extern void fpsimd_preserve_current_state(void);
 extern void fpsimd_restore_current_state(void);
+extern void fpsimd_restore_current_oob(void);
 extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
 
 extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/include/asm/irqflags.h linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/irqflags.h
--- linux-5.15.26/arch/arm64/include/asm/irqflags.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/irqflags.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:13 @
 #include <asm/ptrace.h>
 #include <asm/sysreg.h>
 
+#define IRQMASK_I_BIT	PSR_I_BIT
+#define IRQMASK_I_POS	7
+#define IRQMASK_i_POS	31
+
 /*
  * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
  * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif'
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:31 @
 /*
  * CPU interrupt mask handling.
  */
-static inline void arch_local_irq_enable(void)
+static inline void native_irq_enable(void)
 {
 	if (system_has_prio_mask_debugging()) {
 		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:40 @ static inline void arch_local_irq_enable
 	}
 
 	asm volatile(ALTERNATIVE(
-		"msr	daifclr, #3		// arch_local_irq_enable",
+		"msr	daifclr, #3		// native_irq_enable",
 		__msr_s(SYS_ICC_PMR_EL1, "%0"),
 		ARM64_HAS_IRQ_PRIO_MASKING)
 		:
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:50 @ static inline void arch_local_irq_enable
 	pmr_sync();
 }
 
-static inline void arch_local_irq_disable(void)
+static inline void native_irq_disable(void)
 {
 	if (system_has_prio_mask_debugging()) {
 		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:59 @ static inline void arch_local_irq_disabl
 	}
 
 	asm volatile(ALTERNATIVE(
-		"msr	daifset, #3		// arch_local_irq_disable",
+		"msr	daifset, #3		// native_irq_disable",
 		__msr_s(SYS_ICC_PMR_EL1, "%0"),
 		ARM64_HAS_IRQ_PRIO_MASKING)
 		:
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:67 @ static inline void arch_local_irq_disabl
 		: "memory");
 }
 
+static inline void native_irq_sync(void)
+{
+	native_irq_enable();
+	isb();
+	native_irq_disable();
+}
+
 /*
  * Save the current interrupt enable state.
  */
-static inline unsigned long arch_local_save_flags(void)
+static inline unsigned long native_save_flags(void)
 {
 	unsigned long flags;
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:92 @ static inline unsigned long arch_local_s
 	return flags;
 }
 
-static inline int arch_irqs_disabled_flags(unsigned long flags)
+static inline int native_irqs_disabled_flags(unsigned long flags)
 {
 	int res;
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:107 @ static inline int arch_irqs_disabled_fla
 	return res;
 }
 
-static inline int arch_irqs_disabled(void)
-{
-	return arch_irqs_disabled_flags(arch_local_save_flags());
-}
-
-static inline unsigned long arch_local_irq_save(void)
+static inline unsigned long native_irq_save(void)
 {
 	unsigned long flags;
 
-	flags = arch_local_save_flags();
+	flags = native_save_flags();
 
 	/*
 	 * There are too many states with IRQs disabled, just keep the current
 	 * state if interrupts are already disabled/masked.
 	 */
-	if (!arch_irqs_disabled_flags(flags))
-		arch_local_irq_disable();
+	if (!native_irqs_disabled_flags(flags))
+		native_irq_disable();
 
 	return flags;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:126 @ static inline unsigned long arch_local_i
 /*
  * restore saved IRQ state
  */
-static inline void arch_local_irq_restore(unsigned long flags)
+static inline void native_irq_restore(unsigned long flags)
 {
 	asm volatile(ALTERNATIVE(
 		"msr	daif, %0",
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:139 @ static inline void arch_local_irq_restor
 	pmr_sync();
 }
 
+static inline bool native_irqs_disabled(void)
+{
+	unsigned long flags = native_save_flags();
+	return native_irqs_disabled_flags(flags);
+}
+
+#include <asm/irq_pipeline.h>
+
 #endif /* __ASM_IRQFLAGS_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/include/asm/irq_pipeline.h linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/irq_pipeline.h
--- linux-5.15.26/arch/arm64/include/asm/irq_pipeline.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/irq_pipeline.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#ifndef _ASM_ARM64_IRQ_PIPELINE_H
+#define _ASM_ARM64_IRQ_PIPELINE_H
+
+#include <asm-generic/irq_pipeline.h>
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+/*
+ * In order to cope with the limited number of SGIs available to us,
+ * In-band IPI messages are multiplexed over SGI0, whereas out-of-band
+ * IPIs are directly mapped to SGI1-2.
+ */
+#define OOB_NR_IPI		2
+#define OOB_IPI_OFFSET		1 /* SGI1 */
+#define TIMER_OOB_IPI		(ipi_irq_base + OOB_IPI_OFFSET)
+#define RESCHEDULE_OOB_IPI	(TIMER_OOB_IPI + 1)
+
+extern int ipi_irq_base;
+
+static inline notrace
+unsigned long arch_irqs_virtual_to_native_flags(int stalled)
+{
+	return (!!stalled) << IRQMASK_I_POS;
+}
+
+static inline notrace
+unsigned long arch_irqs_native_to_virtual_flags(unsigned long flags)
+{
+	return (!!hard_irqs_disabled_flags(flags)) << IRQMASK_i_POS;
+}
+
+static inline notrace unsigned long arch_local_irq_save(void)
+{
+	int stalled = inband_irq_save();
+	barrier();
+	return arch_irqs_virtual_to_native_flags(stalled);
+}
+
+static inline notrace void arch_local_irq_enable(void)
+{
+	barrier();
+	inband_irq_enable();
+}
+
+static inline notrace void arch_local_irq_disable(void)
+{
+	inband_irq_disable();
+	barrier();
+}
+
+static inline notrace unsigned long arch_local_save_flags(void)
+{
+	int stalled = inband_irqs_disabled();
+	barrier();
+	return arch_irqs_virtual_to_native_flags(stalled);
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+	return native_irqs_disabled_flags(flags);
+}
+
+static inline notrace void arch_local_irq_restore(unsigned long flags)
+{
+	inband_irq_restore(arch_irqs_disabled_flags(flags));
+	barrier();
+}
+
+static inline
+void arch_save_timer_regs(struct pt_regs *dst, struct pt_regs *src)
+{
+	dst->pstate = src->pstate;
+	dst->pc = src->pc;
+}
+
+static inline bool arch_steal_pipelined_tick(struct pt_regs *regs)
+{
+	return !!(regs->pstate & IRQMASK_I_BIT);
+}
+
+static inline int arch_enable_oob_stage(void)
+{
+	return 0;
+}
+
+extern void (*handle_arch_irq)(struct pt_regs *);
+
+static inline void arch_handle_irq_pipelined(struct pt_regs *regs)
+{
+	handle_arch_irq(regs);
+}
+
+/*
+ * We use neither the generic entry code nor
+ * kentry_enter/exit_pipelined yet. We still build a no-op version of
+ * the latter for now, until we enventually switch to using whichever
+ * of them is available first.
+ */
+#define arch_kentry_get_irqstate(__regs)	0
+
+#define arch_kentry_set_irqstate(__regs, __irqstate)	\
+	do { (void)__irqstate; } while (0)
+
+#else  /* !CONFIG_IRQ_PIPELINE */
+
+static inline unsigned long arch_local_irq_save(void)
+{
+	return native_irq_save();
+}
+
+static inline void arch_local_irq_enable(void)
+{
+	native_irq_enable();
+}
+
+static inline void arch_local_irq_disable(void)
+{
+	native_irq_disable();
+}
+
+static inline unsigned long arch_local_save_flags(void)
+{
+	return native_save_flags();
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+	native_irq_restore(flags);
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+	return native_irqs_disabled_flags(flags);
+}
+
+#endif /* !CONFIG_IRQ_PIPELINE */
+
+static inline int arch_irqs_disabled(void)
+{
+	return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* _ASM_ARM64_IRQ_PIPELINE_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/include/asm/mmu_context.h linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/mmu_context.h
--- linux-5.15.26/arch/arm64/include/asm/mmu_context.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/mmu_context.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:18 @
 #include <linux/sched/hotplug.h>
 #include <linux/mm_types.h>
 #include <linux/pgtable.h>
+#include <linux/irq_pipeline.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cpufeature.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:101 @ static inline void __cpu_set_tcr_t0sz(un
 static inline void cpu_uninstall_idmap(void)
 {
 	struct mm_struct *mm = current->active_mm;
+	unsigned long flags;
+
+	flags = hard_cond_local_irq_save();
 
 	cpu_set_reserved_ttbr0();
 	local_flush_tlb_all();
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:111 @ static inline void cpu_uninstall_idmap(v
 
 	if (mm != &init_mm && !system_uses_ttbr0_pan())
 		cpu_switch_mm(mm->pgd, mm);
+
+	hard_cond_local_irq_restore(flags);
 }
 
 static inline void cpu_install_idmap(void)
 {
+	unsigned long flags;
+
+	flags = hard_cond_local_irq_save();
+
 	cpu_set_reserved_ttbr0();
 	local_flush_tlb_all();
 	cpu_set_idmap_tcr_t0sz();
 
 	cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
+
+	hard_cond_local_irq_restore(flags);
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:231 @ static inline void __switch_mm(struct mm
 }
 
 static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
+do_switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	  struct task_struct *tsk)
 {
 	if (prev != next)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:246 @ switch_mm(struct mm_struct *prev, struct
 	update_saved_ttbr0(tsk, next);
 }
 
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	  struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	protect_inband_mm(flags);
+	do_switch_mm(prev, next, tsk);
+	unprotect_inband_mm(flags);
+}
+
+static inline void
+switch_oob_mm(struct mm_struct *prev, struct mm_struct *next,
+	      struct task_struct *tsk) /* hard irqs off */
+{
+	do_switch_mm(prev, next, tsk);
+}
+
 static inline const struct cpumask *
 task_cpu_possible_mask(struct task_struct *p)
 {
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/include/asm/ptrace.h linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/ptrace.h
--- linux-5.15.26/arch/arm64/include/asm/ptrace.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/ptrace.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:203 @ struct pt_regs {
 
 	/* Only valid for some EL1 exceptions. */
 	u64 lockdep_hardirqs;
+#ifdef CONFIG_IRQ_PIPELINE
+	u64 exit_rcu : 1,
+		oob_on_entry : 1,
+		stalled_on_entry : 1;
+#else
 	u64 exit_rcu;
+#endif
 };
 
 static inline bool in_syscall(struct pt_regs const *regs)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/include/asm/syscall.h linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/syscall.h
--- linux-5.15.26/arch/arm64/include/asm/syscall.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/syscall.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:76 @ static inline void syscall_get_arguments
 	memcpy(args, &regs->regs[1], 5 * sizeof(args[0]));
 }
 
+static inline unsigned long syscall_get_arg0(struct pt_regs *regs)
+{
+	return regs->orig_x0;
+}
+
 static inline void syscall_set_arguments(struct task_struct *task,
 					 struct pt_regs *regs,
 					 const unsigned long *args)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/include/asm/thread_info.h linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/thread_info.h
--- linux-5.15.26/arch/arm64/include/asm/thread_info.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/thread_info.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:17 @
 
 struct task_struct;
 
+#include <dovetail/thread_info.h>
 #include <asm/memory.h>
 #include <asm/stack_pointer.h>
 #include <asm/types.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:27 @ struct task_struct;
  */
 struct thread_info {
 	unsigned long		flags;		/* low level flags */
+	unsigned long		local_flags;	/* local (synchronous) flags */
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 	u64			ttbr0;		/* saved TTBR0_EL1 */
 #endif
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:47 @ struct thread_info {
 	void			*scs_base;
 	void			*scs_sp;
 #endif
+	struct oob_thread_state	oob_state;
 };
 
 #define thread_saved_pc(tsk)	\
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:64 @ void arch_release_task_struct(struct tas
 int arch_dup_task_struct(struct task_struct *dst,
 				struct task_struct *src);
 
+#define ti_local_flags(__ti)	((__ti)->local_flags)
+
 #endif
 
 #define TIF_SIGPENDING		0	/* signal pending */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:75 @ int arch_dup_task_struct(struct task_str
 #define TIF_UPROBE		4	/* uprobe breakpoint or singlestep */
 #define TIF_MTE_ASYNC_FAULT	5	/* MTE Asynchronous Tag Check Fault */
 #define TIF_NOTIFY_SIGNAL	6	/* signal notifications exist */
+#define TIF_RETUSER		7	/* INBAND_TASK_RETUSER is pending */
 #define TIF_SYSCALL_TRACE	8	/* syscall trace active */
 #define TIF_SYSCALL_AUDIT	9	/* syscall auditing */
 #define TIF_SYSCALL_TRACEPOINT	10	/* syscall tracepoint for ftrace */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:90 @ int arch_dup_task_struct(struct task_str
 #define TIF_SVE_VL_INHERIT	24	/* Inherit sve_vl_onexec across exec */
 #define TIF_SSBD		25	/* Wants SSB mitigation */
 #define TIF_TAGGED_ADDR		26	/* Allow tagged user addresses */
+#define TIF_MAYDAY		27	/* Emergency trap pending */
 
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:107 @ int arch_dup_task_struct(struct task_str
 #define _TIF_SVE		(1 << TIF_SVE)
 #define _TIF_MTE_ASYNC_FAULT	(1 << TIF_MTE_ASYNC_FAULT)
 #define _TIF_NOTIFY_SIGNAL	(1 << TIF_NOTIFY_SIGNAL)
+#define _TIF_RETUSER		(1 << TIF_RETUSER)
+#define _TIF_MAYDAY		(1 << TIF_MAYDAY)
 
 #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
 				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
 				 _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \
-				 _TIF_NOTIFY_SIGNAL)
+				 _TIF_NOTIFY_SIGNAL | _TIF_RETUSER)
 
 #define _TIF_SYSCALL_WORK	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
 				 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:134 @ int arch_dup_task_struct(struct task_str
 	INIT_SCS							\
 }
 
+/*
+ * Local (synchronous) thread flags.
+ */
+#define _TLF_OOB		0x0001
+#define _TLF_DOVETAIL		0x0002
+#define _TLF_OFFSTAGE		0x0004
+#define _TLF_OOBTRAP		0x0008
+
 #endif /* __ASM_THREAD_INFO_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/include/asm/uaccess.h linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/uaccess.h
--- linux-5.15.26/arch/arm64/include/asm/uaccess.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/include/asm/uaccess.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:87 @ static inline void __uaccess_ttbr0_disab
 {
 	unsigned long flags, ttbr;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	ttbr = read_sysreg(ttbr1_el1);
 	ttbr &= ~TTBR_ASID_MASK;
 	/* reserved_pg_dir placed before swapper_pg_dir */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:96 @ static inline void __uaccess_ttbr0_disab
 	/* Set reserved ASID */
 	write_sysreg(ttbr, ttbr1_el1);
 	isb();
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static inline void __uaccess_ttbr0_enable(void)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:108 @ static inline void __uaccess_ttbr0_enabl
 	 * variable and the MSR. A context switch could trigger an ASID
 	 * roll-over and an update of 'ttbr0'.
 	 */
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
 
 	/* Restore active ASID */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:121 @ static inline void __uaccess_ttbr0_enabl
 	/* Restore user page table */
 	write_sysreg(ttbr0, ttbr0_el1);
 	isb();
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static inline bool uaccess_ttbr0_disable(void)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/Kconfig linux-dovetail-v5.15.y-dovetail/arch/arm64/Kconfig
--- linux-5.15.26/arch/arm64/Kconfig	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/Kconfig	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:188 @ config ARM64
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_GCC_PLUGINS
 	select HAVE_HW_BREAKPOINT if PERF_EVENTS
+	select HAVE_IRQ_PIPELINE
+	select HAVE_DOVETAIL
 	select HAVE_IRQ_TIME_ACCOUNTING
 	select HAVE_NMI
 	select HAVE_PATA_PLATFORM
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1065 @ config ARCH_HAS_FILTER_PGPROT
 config CC_HAVE_SHADOW_CALL_STACK
 	def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18)
 
+source "kernel/Kconfig.dovetail"
+
 config PARAVIRT
 	bool "Enable paravirtualization code"
 	help
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/kernel/asm-offsets.c linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/asm-offsets.c
--- linux-5.15.26/arch/arm64/kernel/asm-offsets.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/asm-offsets.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:33 @ int main(void)
   DEFINE(TSK_CPU,		offsetof(struct task_struct, cpu));
   BLANK();
   DEFINE(TSK_TI_FLAGS,		offsetof(struct task_struct, thread_info.flags));
+  DEFINE(TSK_TI_LOCAL_FLAGS,	offsetof(struct task_struct, thread_info.local_flags));
   DEFINE(TSK_TI_PREEMPT,	offsetof(struct task_struct, thread_info.preempt_count));
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
   DEFINE(TSK_TI_TTBR0,		offsetof(struct task_struct, thread_info.ttbr0));
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/kernel/debug-monitors.c linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/debug-monitors.c
--- linux-5.15.26/arch/arm64/kernel/debug-monitors.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/debug-monitors.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:235 @ static void send_user_sigtrap(int si_cod
 		return;
 
 	if (interrupts_enabled(regs))
-		local_irq_enable();
+		local_irq_enable_full();
 
 	arm64_force_sig_fault(SIGTRAP, si_code, instruction_pointer(regs),
 			      "User debug trap");
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/kernel/entry-common.c linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/entry-common.c
--- linux-5.15.26/arch/arm64/kernel/entry-common.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/entry-common.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:15 @
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
 #include <linux/thread_info.h>
+#include <linux/irq_pipeline.h>
 
 #include <asm/cpufeature.h>
 #include <asm/daifflags.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:55 @ static __always_inline void __enter_from
 	trace_hardirqs_off_finish();
 }
 
+static void noinstr _enter_from_kernel_mode(struct pt_regs *regs)
+{
+	__enter_from_kernel_mode(regs);
+	mte_check_tfsr_entry();
+}
+
+#ifdef CONFIG_IRQ_PIPELINE
+
 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
 {
+	/*
+	 * CAUTION: we may switch in-band as a result of handling a
+	 * trap, so if we are running out-of-band, we must make sure
+	 * not to perform the RCU exit since we did not enter it in
+	 * the first place.
+	 */
+	regs->oob_on_entry = running_oob();
+	if (regs->oob_on_entry) {
+		regs->exit_rcu = false;
+		goto out;
+	}
+
+	/*
+	 * We trapped from kernel space running in-band, we need to
+	 * record the virtual interrupt state into the current
+	 * register frame (regs->stalled_on_entry) in order to
+	 * reinstate it from exit_to_kernel_mode(). Next we stall the
+	 * in-band stage in order to mirror the current hardware state
+	 * (i.e. hardirqs are off).
+	 */
+	regs->stalled_on_entry = test_and_stall_inband_nocheck();
+
 	__enter_from_kernel_mode(regs);
+
+	/*
+	 * Our caller is going to inherit the hardware interrupt state
+	 * from the trapped context once we have returned: if running
+	 * in-band, align the stall bit on the upcoming state.
+	 */
+	if (running_inband() && interrupts_enabled(regs))
+		unstall_inband_nocheck();
+out:
 	mte_check_tfsr_entry();
 }
 
+#else
+
+static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
+{
+	_enter_from_kernel_mode(regs);
+}
+
+#endif	/* !CONFIG_IRQ_PIPELINE */
+
 /*
  * Handle IRQ/context state management when exiting to kernel mode.
  * After this function returns it is not safe to call regular kernel code,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:140 @ static __always_inline void __exit_to_ke
 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
 {
 	mte_check_tfsr_exit();
+
+	if (running_oob())
+		return;
+
 	__exit_to_kernel_mode(regs);
+
+#ifdef CONFIG_IRQ_PIPELINE
+	/*
+	 * Reinstate the virtual interrupt state which was in effect
+	 * on entry to the trap.
+	 */
+	if (!regs->oob_on_entry) {
+		if (regs->stalled_on_entry)
+			stall_inband_nocheck();
+		else
+			unstall_inband_nocheck();
+	}
+#endif
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:167 @ static void noinstr exit_to_kernel_mode(
  */
 static __always_inline void __enter_from_user_mode(void)
 {
-	lockdep_hardirqs_off(CALLER_ADDR0);
-	CT_WARN_ON(ct_state() != CONTEXT_USER);
-	user_exit_irqoff();
-	trace_hardirqs_off_finish();
+	if (running_inband()) {
+		lockdep_hardirqs_off(CALLER_ADDR0);
+		WARN_ON_ONCE(irq_pipeline_debug() && test_inband_stall());
+		CT_WARN_ON(ct_state() != CONTEXT_USER);
+		stall_inband_nocheck();
+		user_exit_irqoff();
+		unstall_inband_nocheck();
+		trace_hardirqs_off_finish();
+	}
 }
 
 static __always_inline void enter_from_user_mode(struct pt_regs *regs)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:187 @ static __always_inline void enter_from_u
  * Handle IRQ/context state management when exiting to user mode.
  * After this function returns it is not safe to call regular kernel code,
  * intrumentable code, or any code which may trigger an exception.
+ *
+ * irq_pipeline: prepare_exit_to_user_mode() tells the caller whether
+ * it is safe to return via the common in-band exit path, i.e. the
+ * in-band stage was unstalled on entry, and we are (still) running on
+ * it.
  */
 static __always_inline void __exit_to_user_mode(void)
 {
+	stall_inband_nocheck();
 	trace_hardirqs_on_prepare();
 	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
 	user_enter_irqoff();
 	lockdep_hardirqs_on(CALLER_ADDR0);
+	unstall_inband_nocheck();
 }
 
-static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
+static __always_inline
+bool prepare_exit_to_user_mode(struct pt_regs *regs)
 {
 	unsigned long flags;
 
 	local_daif_mask();
 
-	flags = READ_ONCE(current_thread_info()->flags);
-	if (unlikely(flags & _TIF_WORK_MASK))
-		do_notify_resume(regs, flags);
+	if (running_inband() && !test_inband_stall()) {
+		flags = READ_ONCE(current_thread_info()->flags);
+		if (unlikely(flags & _TIF_WORK_MASK))
+			do_notify_resume(regs, flags);
+		/*
+		 * Caution: do_notify_resume() might have switched us
+		 * to the out-of-band stage.
+		 */
+		return running_inband();
+	}
+
+	return false;
 }
 
 static __always_inline void exit_to_user_mode(struct pt_regs *regs)
 {
-	prepare_exit_to_user_mode(regs);
+	bool ret;
+
+	ret = prepare_exit_to_user_mode(regs);
 	mte_check_tfsr_exit();
-	__exit_to_user_mode();
+	if (ret)
+		__exit_to_user_mode();
 }
 
 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:246 @ asmlinkage void noinstr asm_exit_to_user
  */
 static void noinstr arm64_enter_nmi(struct pt_regs *regs)
 {
+	/* irq_pipeline: running this code oob is ok. */
 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
 
 	__nmi_enter();
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:316 @ static void noinstr arm64_exit_el1_dbg(s
 
 static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
 {
-	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+	/*
+	 * IRQ pipeline: the interrupt entry is special in that we may
+	 * run the regular kernel entry prologue/epilogue only if the
+	 * IRQ is going to be dispatched to its handler on behalf of
+	 * the current context, i.e. only if running in-band and
+	 * unstalled. If so, we also have to reconcile the hardware
+	 * and virtual interrupt states temporarily in order to run
+	 * such prologue.
+	 */
+	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) {
 		arm64_enter_nmi(regs);
-	else
+	} else {
+#ifdef CONFIG_IRQ_PIPELINE
+		if (running_inband()) {
+			regs->stalled_on_entry = test_inband_stall();
+			if (!regs->stalled_on_entry) {
+				stall_inband_nocheck();
+				_enter_from_kernel_mode(regs);
+				unstall_inband_nocheck();
+				return;
+			}
+		}
+#else
 		enter_from_kernel_mode(regs);
+#endif
+	}
 }
 
 static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
 {
-	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) {
 		arm64_exit_nmi(regs);
-	else
+	} else {
+#ifdef CONFIG_IRQ_PIPELINE
+		/*
+		 * See enter_el1_irq_or_nmi() for details. UGLY: we
+		 * also have to tell the tracer that irqs are off,
+		 * since sync_current_irq_stage() did the opposite on
+		 * exit. Hopefully, at some point arm64 will convert
+		 * to the generic entry code which exhibits a less
+		 * convoluted logic.
+		 */
+		if (running_inband() && !regs->stalled_on_entry) {
+			stall_inband_nocheck();
+			trace_hardirqs_off();
+			exit_to_kernel_mode(regs);
+			unstall_inband_nocheck();
+		}
+#else
 		exit_to_kernel_mode(regs);
+#endif
+	}
+}
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+/*
+ * When pipelining interrupts, we have to reconcile the hardware and
+ * the virtual states. Hard irqs are off on entry while the current
+ * stage has to be unstalled: fix this up by stalling the in-band
+ * stage on entry, unstalling on exit.
+ */
+static inline void arm64_preempt_irq_enter(void)
+{
+	WARN_ON_ONCE(irq_pipeline_debug() && test_inband_stall());
+	stall_inband_nocheck();
+	trace_hardirqs_off();
 }
 
+static inline void arm64_preempt_irq_exit(void)
+{
+	trace_hardirqs_on();
+	unstall_inband_nocheck();
+}
+
+#else
+
+static inline void arm64_preempt_irq_enter(void)
+{ }
+
+static inline void arm64_preempt_irq_exit(void)
+{ }
+
+#endif
+
 static void __sched arm64_preempt_schedule_irq(void)
 {
+	arm64_preempt_irq_enter();
+
 	lockdep_assert_irqs_disabled();
 
 	/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:414 @ static void __sched arm64_preempt_schedu
 	 * DAIF we must have handled an NMI, so skip preemption.
 	 */
 	if (system_uses_irq_prio_masking() && read_sysreg(daif))
-		return;
+		goto out;
 
 	/*
 	 * Preempting a task from an IRQ means we leave copies of PSTATE
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:426 @ static void __sched arm64_preempt_schedu
 	 */
 	if (system_capabilities_finalized())
 		preempt_schedule_irq();
+out:
+	arm64_preempt_irq_exit();
 }
 
-static void do_interrupt_handler(struct pt_regs *regs,
-				 void (*handler)(struct pt_regs *))
+#ifdef CONFIG_DOVETAIL
+/*
+ * When Dovetail is enabled, the companion core may switch contexts
+ * over the irq stack, therefore subsequent interrupts might be taken
+ * over sibling stack contexts. So we need a not so subtle way of
+ * figuring out whether the irq stack was actually exited, which
+ * cannot depend on the current task pointer. Instead, we track the
+ * interrupt nesting depth for a CPU in irq_nesting.
+ */
+DEFINE_PER_CPU(int, irq_nesting);
+
+static void __do_interrupt_handler(struct pt_regs *regs,
+				void (*handler)(struct pt_regs *))
+{
+	if (this_cpu_inc_return(irq_nesting) == 1)
+		call_on_irq_stack(regs, handler);
+	else
+		handler(regs);
+
+	this_cpu_dec(irq_nesting);
+}
+
+#else
+static void __do_interrupt_handler(struct pt_regs *regs,
+				void (*handler)(struct pt_regs *))
 {
 	if (on_thread_stack())
 		call_on_irq_stack(regs, handler);
 	else
 		handler(regs);
 }
+#endif
+
+#ifdef CONFIG_IRQ_PIPELINE
+static bool do_interrupt_handler(struct pt_regs *regs,
+				void (*handler)(struct pt_regs *))
+{
+	if (handler == handle_arch_irq)
+		handler = (void (*)(struct pt_regs *))handle_irq_pipelined;
+
+	__do_interrupt_handler(regs, handler);
+
+	return running_inband() && !irqs_disabled();
+}
+#else
+static bool do_interrupt_handler(struct pt_regs *regs,
+				void (*handler)(struct pt_regs *))
+{
+	__do_interrupt_handler(regs, handler);
+
+	return true;
+}
+#endif
 
 extern void (*handle_arch_irq)(struct pt_regs *);
 extern void (*handle_arch_fiq)(struct pt_regs *);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:490 @ extern void (*handle_arch_fiq)(struct pt
 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
 				      unsigned int esr)
 {
+	/*
+	 * Dovetail: Same as __do_kernel_fault(), don't bother
+	 * restoring the in-band stage, this trap is fatal and we are
+	 * already walking on thin ice.
+	 */
 	arm64_enter_nmi(regs);
 
 	console_verbose();
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:655 @ asmlinkage void noinstr el1h_64_sync_han
 static void noinstr el1_interrupt(struct pt_regs *regs,
 				  void (*handler)(struct pt_regs *))
 {
+	bool ret;
+
 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
 
 	enter_el1_irq_or_nmi(regs);
-	do_interrupt_handler(regs, handler);
+	ret = do_interrupt_handler(regs, handler);
 
 	/*
 	 * Note: thread_info::preempt_count includes both thread_info::count
 	 * and thread_info::need_resched, and is not equivalent to
 	 * preempt_count().
 	 */
-	if (IS_ENABLED(CONFIG_PREEMPTION) &&
+	if (IS_ENABLED(CONFIG_PREEMPTION) && ret &&
 	    READ_ONCE(current_thread_info()->preempt_count) == 0)
 		arm64_preempt_schedule_irq();
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:882 @ asmlinkage void noinstr el0t_64_sync_han
 static void noinstr el0_interrupt(struct pt_regs *regs,
 				  void (*handler)(struct pt_regs *))
 {
-	enter_from_user_mode(regs);
+	if (handler == handle_arch_fiq ||
+		(running_inband() && !test_inband_stall()))
+		enter_from_user_mode(regs);
 
 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/kernel/fpsimd.c linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/fpsimd.c
--- linux-5.15.26/arch/arm64/kernel/fpsimd.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/fpsimd.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:174 @ static void __get_cpu_fpsimd_context(voi
 	WARN_ON(busy);
 }
 
+static void __put_cpu_fpsimd_context(void)
+{
+	bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
+
+	WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
+}
+
+#ifdef CONFIG_DOVETAIL
+
+#define get_cpu_fpsimd_context(__flags)			\
+	do {						\
+		(__flags) = hard_preempt_disable();	\
+		__get_cpu_fpsimd_context();		\
+	} while (0)
+
+#define put_cpu_fpsimd_context(__flags)			\
+	do {						\
+		__put_cpu_fpsimd_context();		\
+		hard_preempt_enable(__flags);		\
+	} while (0)
+
+void fpsimd_restore_current_oob(void)
+{
+	/*
+	 * Restore the fpsimd context for the current task as it
+	 * resumes from dovetail_context_switch(), which always happen
+	 * on the out-of-band stage. Skip this for kernel threads
+	 * which have no such context but always bear
+	 * TIF_FOREIGN_FPSTATE.
+	 */
+	if (current->mm)
+		fpsimd_restore_current_state();
+}
+
+#else
+
 /*
  * Claim ownership of the CPU FPSIMD context for use by the calling context.
  *
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:219 @ static void __get_cpu_fpsimd_context(voi
  * The double-underscore version must only be called if you know the task
  * can't be preempted.
  */
-static void get_cpu_fpsimd_context(void)
-{
-	local_bh_disable();
-	__get_cpu_fpsimd_context();
-}
-
-static void __put_cpu_fpsimd_context(void)
-{
-	bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
-
-	WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
-}
+#define get_cpu_fpsimd_context(__flags)			\
+	do {						\
+		local_bh_disable();			\
+		__get_cpu_fpsimd_context();		\
+		(void)(__flags);			\
+	} while (0)
 
 /*
  * Release the CPU FPSIMD context.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:233 @ static void __put_cpu_fpsimd_context(voi
  * previously called, with no call to put_cpu_fpsimd_context() in the
  * meantime.
  */
-static void put_cpu_fpsimd_context(void)
-{
-	__put_cpu_fpsimd_context();
-	local_bh_enable();
-}
+#define put_cpu_fpsimd_context(__flags)			\
+	do {						\
+		__put_cpu_fpsimd_context();		\
+		local_bh_enable();			\
+		(void)(__flags);			\
+	} while (0)
+
+#endif	/* !CONFIG_DOVETAIL */
 
 static bool have_cpu_fpsimd_context(void)
 {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:321 @ static void sve_free(struct task_struct
 static void task_fpsimd_load(void)
 {
 	WARN_ON(!system_supports_fpsimd());
-	WARN_ON(!have_cpu_fpsimd_context());
+	WARN_ON(!hard_irqs_disabled() && !have_cpu_fpsimd_context());
 
 	if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE))
 		sve_load_state(sve_pffr(&current->thread),
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:335 @ static void task_fpsimd_load(void)
  * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
  * date with respect to the CPU registers.
  */
-static void fpsimd_save(void)
+static void __fpsimd_save(void)
 {
 	struct fpsimd_last_state_struct const *last =
 		this_cpu_ptr(&fpsimd_last_state);
 	/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
 
 	WARN_ON(!system_supports_fpsimd());
-	WARN_ON(!have_cpu_fpsimd_context());
+	WARN_ON(!hard_irqs_disabled() && !have_cpu_fpsimd_context());
 
 	if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
 		if (IS_ENABLED(CONFIG_ARM64_SVE) &&
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:365 @ static void fpsimd_save(void)
 	}
 }
 
+void fpsimd_save(void)
+{
+	unsigned long flags;
+
+	flags = hard_cond_local_irq_save();
+	__fpsimd_save();
+	hard_cond_local_irq_restore(flags);
+}
+
 /*
  * All vector length selection from userspace comes through here.
  * We're on a slow path, so some sanity-checks are included.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:492 @ static void __fpsimd_to_sve(void *sst, s
  * task->thread.uw.fpsimd_state must be up to date before calling this
  * function.
  */
-static void fpsimd_to_sve(struct task_struct *task)
+static void _fpsimd_to_sve(struct task_struct *task)
 {
 	unsigned int vq;
 	void *sst = task->thread.sve_state;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:505 @ static void fpsimd_to_sve(struct task_st
 	__fpsimd_to_sve(sst, fst, vq);
 }
 
+static void fpsimd_to_sve(struct task_struct *task)
+{
+	unsigned long flags;
+
+	flags = hard_cond_local_irq_save();
+	_fpsimd_to_sve(task);
+	hard_cond_local_irq_restore(flags);
+}
+
 /*
  * Transfer the SVE state in task->thread.sve_state to
  * task->thread.uw.fpsimd_state.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:532 @ static void sve_to_fpsimd(struct task_st
 	struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
 	unsigned int i;
 	__uint128_t const *p;
+	unsigned long flags;
 
 	if (!system_supports_sve())
 		return;
 
+	flags = hard_cond_local_irq_save();
+
 	vq = sve_vq_from_vl(task->thread.sve_vl);
 	for (i = 0; i < SVE_NUM_ZREGS; ++i) {
 		p = (__uint128_t const *)ZREG(sst, vq, i);
 		fst->vregs[i] = arm64_le128_to_cpu(*p);
 	}
+
+	hard_cond_local_irq_restore(flags);
 }
 
 #ifdef CONFIG_ARM64_SVE
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:640 @ void sve_sync_from_fpsimd_zeropad(struct
 int sve_set_vector_length(struct task_struct *task,
 			  unsigned long vl, unsigned long flags)
 {
+	unsigned long irqflags = 0;
+
 	if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
 				     PR_SVE_SET_VL_ONEXEC))
 		return -EINVAL;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:679 @ int sve_set_vector_length(struct task_st
 	 * non-SVE thread.
 	 */
 	if (task == current) {
-		get_cpu_fpsimd_context();
+		get_cpu_fpsimd_context(irqflags);
 
-		fpsimd_save();
+		__fpsimd_save();
 	}
 
 	fpsimd_flush_task_state(task);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:689 @ int sve_set_vector_length(struct task_st
 		sve_to_fpsimd(task);
 
 	if (task == current)
-		put_cpu_fpsimd_context();
+		put_cpu_fpsimd_context(irqflags);
 
 	/*
 	 * Force reallocation of task SVE state to the correct size
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:993 @ void fpsimd_release_task(struct task_str
  */
 void do_sve_acc(unsigned int esr, struct pt_regs *regs)
 {
+	unsigned long flags;
+
+	oob_trap_notify(ARM64_TRAP_SVE, regs);
+
 	/* Even if we chose not to use SVE, the hardware could still trap: */
 	if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
 		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
-		return;
+		goto out;
 	}
 
 	sve_alloc(current);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1009 @ void do_sve_acc(unsigned int esr, struct
 		return;
 	}
 
-	get_cpu_fpsimd_context();
+	get_cpu_fpsimd_context(flags);
 
 	if (test_and_set_thread_flag(TIF_SVE))
 		WARN_ON(1); /* SVE access shouldn't have trapped */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1028 @ void do_sve_acc(unsigned int esr, struct
 		sve_flush_live(vq_minus_one);
 		fpsimd_bind_task_to_cpu();
 	} else {
-		fpsimd_to_sve(current);
+		_fpsimd_to_sve(current);
 	}
 
-	put_cpu_fpsimd_context();
+	put_cpu_fpsimd_context(flags);
+out:
+	oob_trap_unwind(ARM64_TRAP_SVE, regs);
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1065 @ void do_fpsimd_exc(unsigned int esr, str
 			si_code = FPE_FLTRES;
 	}
 
+	oob_trap_notify(ARM64_TRAP_FPE, regs);
+
 	send_sig_fault(SIGFPE, si_code,
 		       (void __user *)instruction_pointer(regs),
 		       current);
+
+	oob_trap_unwind(ARM64_TRAP_FPE, regs);
 }
 
 void fpsimd_thread_switch(struct task_struct *next)
 {
 	bool wrong_task, wrong_cpu;
+	unsigned long flags;
 
 	if (!system_supports_fpsimd())
 		return;
 
+	flags = hard_cond_local_irq_save();
+
 	__get_cpu_fpsimd_context();
 
 	/* Save unsaved fpsimd state, if any: */
-	fpsimd_save();
+	__fpsimd_save();
 
 	/*
 	 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1102 @ void fpsimd_thread_switch(struct task_st
 			       wrong_task || wrong_cpu);
 
 	__put_cpu_fpsimd_context();
+
+	hard_cond_local_irq_restore(flags);
 }
 
 void fpsimd_flush_thread(void)
 {
 	int vl, supported_vl;
+	unsigned long flags;
 
 	if (!system_supports_fpsimd())
 		return;
 
-	get_cpu_fpsimd_context();
+	get_cpu_fpsimd_context(flags);
 
 	fpsimd_flush_task_state(current);
 	memset(&current->thread.uw.fpsimd_state, 0,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1155 @ void fpsimd_flush_thread(void)
 			current->thread.sve_vl_onexec = 0;
 	}
 
-	put_cpu_fpsimd_context();
+	put_cpu_fpsimd_context(flags);
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1164 @ void fpsimd_flush_thread(void)
  */
 void fpsimd_preserve_current_state(void)
 {
+	unsigned long flags;
+
 	if (!system_supports_fpsimd())
 		return;
 
-	get_cpu_fpsimd_context();
-	fpsimd_save();
-	put_cpu_fpsimd_context();
+	get_cpu_fpsimd_context(flags);
+	__fpsimd_save();
+	put_cpu_fpsimd_context(flags);
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1213 @ static void fpsimd_bind_task_to_cpu(void
 	}
 }
 
-void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+static void __fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
 			      unsigned int sve_vl)
 {
 	struct fpsimd_last_state_struct *last =
 		this_cpu_ptr(&fpsimd_last_state);
 
 	WARN_ON(!system_supports_fpsimd());
-	WARN_ON(!in_softirq() && !irqs_disabled());
 
 	last->st = st;
 	last->sve_state = sve_state;
 	last->sve_vl = sve_vl;
 }
 
+void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+			      unsigned int sve_vl)
+{
+	unsigned long flags;
+
+	WARN_ON(!in_softirq() && !irqs_disabled());
+
+	flags = hard_cond_local_irq_save();
+	__fpsimd_bind_state_to_cpu(st, sve_state, sve_vl);
+	hard_cond_local_irq_restore(flags);
+}
+
 /*
  * Load the userland FPSIMD state of 'current' from memory, but only if the
  * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1245 @ void fpsimd_bind_state_to_cpu(struct use
  */
 void fpsimd_restore_current_state(void)
 {
+	unsigned long flags;
+
 	/*
 	 * For the tasks that were created before we detected the absence of
 	 * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1261 @ void fpsimd_restore_current_state(void)
 		return;
 	}
 
-	get_cpu_fpsimd_context();
+	get_cpu_fpsimd_context(flags);
 
 	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
 		task_fpsimd_load();
 		fpsimd_bind_task_to_cpu();
 	}
 
-	put_cpu_fpsimd_context();
+	put_cpu_fpsimd_context(flags);
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1278 @ void fpsimd_restore_current_state(void)
  */
 void fpsimd_update_current_state(struct user_fpsimd_state const *state)
 {
+	unsigned long flags;
+
 	if (WARN_ON(!system_supports_fpsimd()))
 		return;
 
-	get_cpu_fpsimd_context();
+	get_cpu_fpsimd_context(flags);
 
 	current->thread.uw.fpsimd_state = *state;
 	if (test_thread_flag(TIF_SVE))
-		fpsimd_to_sve(current);
+		_fpsimd_to_sve(current);
 
 	task_fpsimd_load();
 	fpsimd_bind_task_to_cpu();
 
 	clear_thread_flag(TIF_FOREIGN_FPSTATE);
 
-	put_cpu_fpsimd_context();
+	put_cpu_fpsimd_context(flags);
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1344 @ void fpsimd_save_and_flush_cpu_state(voi
 {
 	if (!system_supports_fpsimd())
 		return;
-	WARN_ON(preemptible());
+	WARN_ON(!hard_irqs_disabled() && preemptible());
 	__get_cpu_fpsimd_context();
-	fpsimd_save();
+	__fpsimd_save();
 	fpsimd_flush_cpu_state();
 	__put_cpu_fpsimd_context();
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1372 @ void fpsimd_save_and_flush_cpu_state(voi
  */
 void kernel_neon_begin(void)
 {
+	unsigned long flags;
+
 	if (WARN_ON(!system_supports_fpsimd()))
 		return;
 
 	BUG_ON(!may_use_simd());
 
-	get_cpu_fpsimd_context();
+	get_cpu_fpsimd_context(flags);
 
 	/* Save unsaved fpsimd state, if any: */
-	fpsimd_save();
+	__fpsimd_save();
 
 	/* Invalidate any task state remaining in the fpsimd regs: */
 	fpsimd_flush_cpu_state();
+
+	if (dovetailing())
+		hard_cond_local_irq_restore(flags);
 }
 EXPORT_SYMBOL(kernel_neon_begin);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1403 @ EXPORT_SYMBOL(kernel_neon_begin);
  */
 void kernel_neon_end(void)
 {
+	unsigned long flags = hard_local_save_flags();
+
 	if (!system_supports_fpsimd())
 		return;
 
-	put_cpu_fpsimd_context();
+	put_cpu_fpsimd_context(flags);
 }
 EXPORT_SYMBOL(kernel_neon_end);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1498 @ void __efi_fpsimd_end(void)
 static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
 				  unsigned long cmd, void *v)
 {
+	unsigned long flags;
+
 	switch (cmd) {
 	case CPU_PM_ENTER:
+		flags = hard_cond_local_irq_save();
 		fpsimd_save_and_flush_cpu_state();
+		hard_cond_local_irq_restore(flags);
 		break;
 	case CPU_PM_EXIT:
 		break;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/kernel/idle.c linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/idle.c
--- linux-5.15.26/arch/arm64/kernel/idle.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/idle.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:45 @ void noinstr arch_cpu_idle(void)
 	 * tricks
 	 */
 	cpu_do_idle();
+	hard_cond_local_irq_enable();
 	raw_local_irq_enable();
 }
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/kernel/irq.c linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/irq.c
--- linux-5.15.26/arch/arm64/kernel/irq.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/irq.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:17 @
 #include <linux/memory.h>
 #include <linux/smp.h>
 #include <linux/hardirq.h>
+#include <linux/irq_pipeline.h>
 #include <linux/init.h>
 #include <linux/irqchip.h>
 #include <linux/kprobes.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:32 @ DEFINE_PER_CPU(struct nmi_ctx, nmi_conte
 
 DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
 
-
 DECLARE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr);
 
 #ifdef CONFIG_SHADOW_CALL_STACK
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/kernel/irq_pipeline.c linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/irq_pipeline.c
--- linux-5.15.26/arch/arm64/kernel/irq_pipeline.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/irq_pipeline.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#include <linux/irq.h>
+#include <linux/irq_pipeline.h>
+
+void arch_do_IRQ_pipelined(struct irq_desc *desc)
+{
+	struct pt_regs *regs = raw_cpu_ptr(&irq_pipeline.tick_regs);
+	struct pt_regs *old_regs = set_irq_regs(regs);
+
+	irq_enter();
+	handle_irq_desc(desc);
+	irq_exit();
+
+	set_irq_regs(old_regs);
+}
+
+void __init arch_irq_pipeline_init(void)
+{
+	/* no per-arch init. */
+}
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/kernel/Makefile linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/Makefile
--- linux-5.15.26/arch/arm64/kernel/Makefile	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/Makefile	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:62 @ obj-$(CONFIG_ACPI)			+= acpi.o
 obj-$(CONFIG_ACPI_NUMA)			+= acpi_numa.o
 obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)	+= acpi_parking_protocol.o
 obj-$(CONFIG_PARAVIRT)			+= paravirt.o
+obj-$(CONFIG_IRQ_PIPELINE)		+= irq_pipeline.o
 obj-$(CONFIG_RANDOMIZE_BASE)		+= kaslr.o
 obj-$(CONFIG_HIBERNATION)		+= hibernate.o hibernate-asm.o
 obj-$(CONFIG_KEXEC_CORE)		+= machine_kexec.o relocate_kernel.o	\
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/kernel/signal.c linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/signal.c
--- linux-5.15.26/arch/arm64/kernel/signal.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/signal.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:14 @
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/signal.h>
+#include <linux/irq_pipeline.h>
 #include <linux/personality.h>
 #include <linux/freezer.h>
 #include <linux/stddef.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:921 @ static void do_signal(struct pt_regs *re
 	restore_saved_sigmask();
 }
 
+static inline void do_retuser(void)
+{
+	unsigned long thread_flags;
+
+	if (dovetailing()) {
+		thread_flags = current_thread_info()->flags;
+		if (thread_flags & _TIF_RETUSER)
+			inband_retuser_notify();
+	}
+}
+
 void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
 {
+	WARN_ON_ONCE(irq_pipeline_debug() && running_oob());
+	WARN_ON_ONCE(irq_pipeline_debug() && test_inband_stall());
+
 	do {
+		stall_inband_nocheck();
+
 		if (thread_flags & _TIF_NEED_RESCHED) {
 			/* Unmask Debug and SError for the next task */
-			local_daif_restore(DAIF_PROCCTX_NOIRQ);
+			local_daif_restore(irqs_pipelined() ? DAIF_PROCCTX :
+					DAIF_PROCCTX_NOIRQ);
 
 			schedule();
 		} else {
+			unstall_inband_nocheck();
 			local_daif_restore(DAIF_PROCCTX);
 
 			if (thread_flags & _TIF_UPROBE)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:967 @ void do_notify_resume(struct pt_regs *re
 
 			if (thread_flags & _TIF_FOREIGN_FPSTATE)
 				fpsimd_restore_current_state();
+
+			do_retuser();
+			/* RETUSER might have switched oob */
+			if (running_oob()) {
+				local_daif_mask();
+				return;
+			}
 		}
 
+		/*
+		 * Dovetail: we may have restored the fpsimd state for
+		 * current with no other opportunity to check for
+		 * _TIF_FOREIGN_FPSTATE until we are back running on
+		 * el0, so we must not take any interrupt until then,
+		 * otherwise we may end up resuming with some OOB
+		 * thread's fpsimd state.
+		 */
 		local_daif_mask();
 		thread_flags = READ_ONCE(current_thread_info()->flags);
 	} while (thread_flags & _TIF_WORK_MASK);
+
+	/*
+	 * irq_pipeline: trace_hardirqs_off was in effect on entry, we
+	 * leave it this way by virtue of calling local_daif_mask()
+	 * before exiting the loop. However, we did enter unstalled
+	 * and we must restore such state on exit.
+	 */
+	unstall_inband_nocheck();
 }
 
 unsigned long __ro_after_init signal_minsigstksz;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/kernel/smp.c linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/smp.c
--- linux-5.15.26/arch/arm64/kernel/smp.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/smp.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:80 @ enum ipi_msg_type {
 	NR_IPI
 };
 
-static int ipi_irq_base __read_mostly;
+int ipi_irq_base __read_mostly;
 static int nr_ipi __read_mostly = NR_IPI;
 static struct irq_desc *ipi_desc[NR_IPI] __read_mostly;
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:261 @ asmlinkage notrace void secondary_start_
 	complete(&cpu_running);
 
 	local_daif_restore(DAIF_PROCCTX);
+	local_irq_enable_full();
 
 	/*
 	 * OK, it's off to the idle thread for us
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:804 @ static const char *ipi_types[NR_IPI] __t
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
 
+static unsigned int get_ipi_count(struct irq_desc *desc, unsigned int cpu);
+
 unsigned long irq_err_count;
 
 int arch_show_interrupts(struct seq_file *p, int prec)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:816 @ int arch_show_interrupts(struct seq_file
 		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
 			   prec >= 4 ? " " : "");
 		for_each_online_cpu(cpu)
-			seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
+			seq_printf(p, "%10u ", get_ipi_count(ipi_desc[i], cpu));
 		seq_printf(p, "      %s\n", ipi_types[i]);
 	}
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:878 @ static void ipi_cpu_crash_stop(unsigned
 
 	atomic_dec(&waiting_for_crash_ipi);
 
-	local_irq_disable();
+	local_irq_disable_full();
 	sdei_mask_local_cpu();
 
 	if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:890 @ static void ipi_cpu_crash_stop(unsigned
 }
 
 /*
- * Main handler for inter-processor interrupts
+ * Main handler for inter-processor interrupts on the in-band stage.
  */
 static void do_handle_IPI(int ipinr)
 {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:949 @ static void do_handle_IPI(int ipinr)
 		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
 }
 
+static void __smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+	trace_ipi_raise(target, ipi_types[ipinr]);
+	__ipi_send_mask(ipi_desc[ipinr], target);
+}
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+static DEFINE_PER_CPU(unsigned long, ipi_messages);
+
+static DEFINE_PER_CPU(unsigned int [NR_IPI], ipi_counts);
+
+static irqreturn_t ipi_handler(int irq, void *data)
+{
+	unsigned long *pmsg;
+	unsigned int ipinr;
+
+	/*
+	 * Decode in-band IPIs (0..NR_IPI - 1) multiplexed over
+	 * SGI0. Out-of-band IPIs (SGI1, SGI2) have their own
+	 * individual handler.
+	 */
+	pmsg = raw_cpu_ptr(&ipi_messages);
+	while (*pmsg) {
+		ipinr = ffs(*pmsg) - 1;
+		clear_bit(ipinr, pmsg);
+		__this_cpu_inc(ipi_counts[ipinr]);
+		do_handle_IPI(ipinr);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+	unsigned int cpu;
+
+	/* regular in-band IPI (multiplexed over SGI0). */
+	for_each_cpu(cpu, target)
+		set_bit(ipinr, &per_cpu(ipi_messages, cpu));
+
+	wmb();
+	__smp_cross_call(target, 0);
+}
+
+static unsigned int get_ipi_count(struct irq_desc *desc, unsigned int cpu)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	return per_cpu(ipi_counts[irq - ipi_irq_base], cpu);
+}
+
+void irq_send_oob_ipi(unsigned int irq,
+		const struct cpumask *cpumask)
+{
+	unsigned int sgi = irq - ipi_irq_base;
+
+	if (WARN_ON(irq_pipeline_debug() &&
+		    (sgi < OOB_IPI_OFFSET ||
+		     sgi >= OOB_IPI_OFFSET + OOB_NR_IPI)))
+		return;
+
+	/* Out-of-band IPI (SGI1-2). */
+	__smp_cross_call(cpumask, sgi);
+}
+EXPORT_SYMBOL_GPL(irq_send_oob_ipi);
+
+#else
+
 static irqreturn_t ipi_handler(int irq, void *data)
 {
 	do_handle_IPI(irq - ipi_irq_base);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1025 @ static irqreturn_t ipi_handler(int irq,
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
 {
-	trace_ipi_raise(target, ipi_types[ipinr]);
-	__ipi_send_mask(ipi_desc[ipinr], target);
+	__smp_cross_call(target, ipinr);
+}
+
+static unsigned int get_ipi_count(struct irq_desc *desc, unsigned int cpu)
+{
+	return irq_desc_kstat_cpu(desc, cpu);
 }
 
+#endif /* CONFIG_IRQ_PIPELINE */
+
 static void ipi_setup(int cpu)
 {
 	int i;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1061 @ static void ipi_teardown(int cpu)
 
 void __init set_smp_ipi_range(int ipi_base, int n)
 {
-	int i;
+	int i, inband_nr_ipi;
 
 	WARN_ON(n < NR_IPI);
 	nr_ipi = min(n, NR_IPI);
+	/*
+	 * irq_pipeline: the in-band stage traps SGI0 only,
+	 * over which IPI messages are mutiplexed. Other SGIs
+	 * are available for exchanging out-of-band IPIs.
+	 */
+	inband_nr_ipi = irqs_pipelined() ? 1 : nr_ipi;
 
 	for (i = 0; i < nr_ipi; i++) {
-		int err;
-
-		err = request_percpu_irq(ipi_base + i, ipi_handler,
-					 "IPI", &cpu_number);
-		WARN_ON(err);
+		if (i < inband_nr_ipi) {
+			int err;
 
+			err = request_percpu_irq(ipi_base + i, ipi_handler,
+						"IPI", &cpu_number);
+			WARN_ON(err);
+		}
 		ipi_desc[i] = irq_to_desc(ipi_base + i);
 		irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
 	}
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/kernel/syscall.c linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/syscall.c
--- linux-5.15.26/arch/arm64/kernel/syscall.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/syscall.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:5 @
 
 #include <linux/compiler.h>
 #include <linux/context_tracking.h>
+#include <linux/irqstage.h>
 #include <linux/errno.h>
 #include <linux/nospec.h>
 #include <linux/ptrace.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:86 @ static void el0_svc_common(struct pt_reg
 			   const syscall_fn_t syscall_table[])
 {
 	unsigned long flags = current_thread_info()->flags;
+	int ret;
 
 	regs->orig_x0 = regs->regs[0];
 	regs->syscallno = scno;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:109 @ static void el0_svc_common(struct pt_reg
 	 * (Similarly for HVC and SMC elsewhere.)
 	 */
 
+	WARN_ON_ONCE(dovetail_debug() &&
+		     running_inband() && test_inband_stall());
 	local_daif_restore(DAIF_PROCCTX);
 
+	ret = pipeline_syscall(scno, regs);
+	if (ret > 0)
+		return;
+
+	if (ret < 0)
+		goto tail_work;
+
 	if (flags & _TIF_MTE_ASYNC_FAULT) {
 		/*
 		 * Process the asynchronous tag check fault before the actual
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:160 @ static void el0_svc_common(struct pt_reg
 	 * check again. However, if we were tracing entry, then we always trace
 	 * exit regardless, as the old entry assembly did.
 	 */
+tail_work:
 	if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
 		local_daif_mask();
 		flags = current_thread_info()->flags;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/kernel/traps.c linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/traps.c
--- linux-5.15.26/arch/arm64/kernel/traps.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/kernel/traps.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:18 @
 #include <linux/spinlock.h>
 #include <linux/uaccess.h>
 #include <linux/hardirq.h>
+#include <linux/irqstage.h>
 #include <linux/kdebug.h>
 #include <linux/module.h>
 #include <linux/kexec.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:206 @ static int __die(const char *str, int er
 	return ret;
 }
 
-static DEFINE_RAW_SPINLOCK(die_lock);
+static DEFINE_HARD_SPINLOCK(die_lock);
 
 /*
  * This function is protected against re-entrancy.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:378 @ void arm64_skip_faulting_instruction(str
 }
 
 static LIST_HEAD(undef_hook);
-static DEFINE_RAW_SPINLOCK(undef_lock);
+static DEFINE_HARD_SPINLOCK(undef_lock);
 
 void register_undef_hook(struct undef_hook *hook)
 {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:500 @ void do_undefinstr(struct pt_regs *regs)
 		return;
 
 	BUG_ON(!user_mode(regs));
+	oob_trap_notify(ARM64_TRAP_UNDI, regs);
 	force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+	oob_trap_unwind(ARM64_TRAP_UNDI, regs);
 }
 NOKPROBE_SYMBOL(do_undefinstr);
 
 void do_bti(struct pt_regs *regs)
 {
 	BUG_ON(!user_mode(regs));
+	oob_trap_notify(ARM64_TRAP_BTI, regs);
 	force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+	oob_trap_unwind(ARM64_TRAP_BTI, regs);
 }
 NOKPROBE_SYMBOL(do_bti);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:580 @ static void user_cache_maint_handler(uns
 		return;
 	}
 
-	if (ret)
+	if (ret) {
+		oob_trap_notify(ARM64_TRAP_ACCESS, regs);
 		arm64_notify_segfault(tagged_address);
-	else
+		oob_trap_unwind(ARM64_TRAP_ACCESS, regs);
+	} else {
 		arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+	}
 }
 
 static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:631 @ static void mrs_handler(unsigned int esr
 	rt = ESR_ELx_SYS64_ISS_RT(esr);
 	sysreg = esr_sys64_to_sysreg(esr);
 
-	if (do_emulate_mrs(regs, sysreg, rt) != 0)
+	if (do_emulate_mrs(regs, sysreg, rt) != 0) {
+		oob_trap_notify(ARM64_TRAP_ACCESS, regs);
 		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+		oob_trap_unwind(ARM64_TRAP_ACCESS, regs);
+	}
 }
 
 static void wfi_handler(unsigned int esr, struct pt_regs *regs)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:864 @ void bad_el0_sync(struct pt_regs *regs,
 {
 	unsigned long pc = instruction_pointer(regs);
 
+	oob_trap_notify(ARM64_TRAP_ACCESS, regs);
 	current->thread.fault_address = 0;
 	current->thread.fault_code = esr;
 
 	arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
 			      "Bad EL0 synchronous exception");
+	oob_trap_unwind(ARM64_TRAP_ACCESS, regs);
 }
 
 #ifdef CONFIG_VMAP_STACK
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/mm/context.c linux-dovetail-v5.15.y-dovetail/arch/arm64/mm/context.c
--- linux-5.15.26/arch/arm64/mm/context.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/mm/context.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:21 @
 #include <asm/tlbflush.h>
 
 static u32 asid_bits;
-static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+static DEFINE_HARD_SPINLOCK(cpu_asid_lock);
 
 static atomic64_t asid_generation;
 static unsigned long *asid_map;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:220 @ void check_and_switch_context(struct mm_
 	unsigned long flags;
 	unsigned int cpu;
 	u64 asid, old_active_asid;
+	bool need_flush;
+
+	WARN_ON_ONCE(dovetail_debug() && !hard_irqs_disabled());
 
 	if (system_supports_cnp())
 		cpu_set_reserved_ttbr0();
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:258 @ void check_and_switch_context(struct mm_
 	}
 
 	cpu = smp_processor_id();
-	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
-		local_flush_tlb_all();
+	need_flush = cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending);
 
 	atomic64_set(this_cpu_ptr(&active_asids), asid);
 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 
+	if (need_flush)
+		local_flush_tlb_all();
+
 switch_mm_fastpath:
 
 	arm64_apply_bp_hardening();
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/arm64/mm/fault.c linux-dovetail-v5.15.y-dovetail/arch/arm64/mm/fault.c
--- linux-5.15.26/arch/arm64/mm/fault.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/arm64/mm/fault.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:271 @ static bool __kprobes is_spurious_el1_tr
 	    (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
 		return false;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	asm volatile("at s1e1r, %0" :: "r" (addr));
 	isb();
 	par = read_sysreg_par();
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	/*
 	 * If we now have a valid translation, treat the translation fault as
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:391 @ static void __do_kernel_fault(unsigned l
 		msg = "paging request";
 	}
 
+	/*
+	 * Dovetail: Don't bother restoring the in-band stage in the
+	 * non-recoverable fault case, we got busted and a full stage
+	 * switch is likely to make things even worse. Try at least to
+	 * get some debug output before panicing.
+	 */
 	die_kernel_fault(msg, addr, esr, regs);
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:469 @ static void do_bad_area(unsigned long fa
 	if (user_mode(regs)) {
 		const struct fault_info *inf = esr_to_fault_info(esr);
 
+		oob_trap_notify(ARM64_TRAP_ACCESS, regs);
 		set_thread_esr(addr, esr);
 		arm64_force_sig_fault(inf->sig, inf->code, far, inf->name);
+		oob_trap_unwind(ARM64_TRAP_ACCESS, regs);
 	} else {
 		__do_kernel_fault(addr, esr, regs);
 	}
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:537 @ static int __kprobes do_page_fault(unsig
 	if (kprobe_page_fault(regs, esr))
 		return 0;
 
+	oob_trap_notify(ARM64_TRAP_ACCESS, regs);
+
 	/*
 	 * If we're in an interrupt or have no user context, we must not take
 	 * the fault.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:615 @ retry:
 	if (fault_signal_pending(fault, regs)) {
 		if (!user_mode(regs))
 			goto no_context;
-		return 0;
+		goto out;
 	}
 
 	if (fault & VM_FAULT_RETRY) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:631 @ retry:
 	 */
 	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
 			      VM_FAULT_BADACCESS))))
-		return 0;
+		goto out;
 
 	/*
 	 * If we are in kernel mode at this point, we have no context to
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:647 @ retry:
 		 * oom-killed).
 		 */
 		pagefault_out_of_memory();
-		return 0;
+		goto out;
 	}
 
 	inf = esr_to_fault_info(esr);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:676 @ retry:
 				      far, inf->name);
 	}
 
-	return 0;
+	goto out;
 
 no_context:
 	__do_kernel_fault(addr, esr, regs);
+out:
+	oob_trap_unwind(ARM64_TRAP_ACCESS, regs);
 	return 0;
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:715 @ static int do_sea(unsigned long far, uns
 	const struct fault_info *inf;
 	unsigned long siaddr;
 
+	oob_trap_notify(ARM64_TRAP_SEA, regs);
+
 	inf = esr_to_fault_info(esr);
 
 	if (user_mode(regs) && apei_claim_sea(regs) == 0) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:724 @ static int do_sea(unsigned long far, uns
 		 * APEI claimed this as a firmware-first notification.
 		 * Some processing deferred to task_work before ret_to_user().
 		 */
-		return 0;
+		goto out;
 	}
 
 	if (esr & ESR_ELx_FnV) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:738 @ static int do_sea(unsigned long far, uns
 		siaddr  = untagged_addr(far);
 	}
 	arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
+out:
+	oob_trap_unwind(ARM64_TRAP_SEA, regs);
 
 	return 0;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:832 @ void do_mem_abort(unsigned long far, uns
 	if (!inf->fn(far, esr, regs))
 		return;
 
+	oob_trap_notify(ARM64_TRAP_ACCESS, regs);
+
 	if (!user_mode(regs)) {
 		pr_alert("Unhandled fault at 0x%016lx\n", addr);
 		mem_abort_decode(esr);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:846 @ void do_mem_abort(unsigned long far, uns
 	 * address to the signal handler.
 	 */
 	arm64_notify_die(inf->name, regs, inf->sig, inf->code, addr, esr);
+	oob_trap_unwind(ARM64_TRAP_ACCESS, regs);
 }
 NOKPROBE_SYMBOL(do_mem_abort);
 
 void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
 {
+	oob_trap_notify(ARM64_TRAP_ALIGN, regs);
+
 	arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,
 			 addr, esr);
+
+	oob_trap_unwind(ARM64_TRAP_ALIGN, regs);
 }
 NOKPROBE_SYMBOL(do_sp_pc_abort);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:920 @ void do_debug_exception(unsigned long ad
 	const struct fault_info *inf = esr_to_debug_fault_info(esr);
 	unsigned long pc = instruction_pointer(regs);
 
+	oob_trap_notify(ARM64_TRAP_DEBUG, regs);
+
 	debug_exception_enter(regs);
 
 	if (user_mode(regs) && !is_ttbr0_addr(pc))
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:932 @ void do_debug_exception(unsigned long ad
 	}
 
 	debug_exception_exit(regs);
+
+	oob_trap_unwind(ARM64_TRAP_DEBUG, regs);
 }
 NOKPROBE_SYMBOL(do_debug_exception);
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/Kconfig linux-dovetail-v5.15.y-dovetail/arch/Kconfig
--- linux-5.15.26/arch/Kconfig	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/Kconfig	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:197 @ config HAVE_KPROBES_ON_FTRACE
 config HAVE_FUNCTION_ERROR_INJECTION
 	bool
 
+config HAVE_PERCPU_PREEMPT_COUNT
+	bool
+
 config HAVE_NMI
 	bool
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/entry/common.c linux-dovetail-v5.15.y-dovetail/arch/x86/entry/common.c
--- linux-5.15.26/arch/x86/entry/common.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/entry/common.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:78 @ __visible noinstr void do_syscall_64(str
 	add_random_kstack_offset();
 	nr = syscall_enter_from_user_mode(regs, nr);
 
+	if (dovetailing()) {
+		if (nr == EXIT_SYSCALL_OOB) {
+			hard_local_irq_disable();
+			return;
+		}
+		if (nr == EXIT_SYSCALL_TAIL)
+			goto done;
+	}
+
 	instrumentation_begin();
 
 	if (!do_syscall_x64(regs, nr) && !do_syscall_x32(regs, nr) && nr != -1) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:95 @ __visible noinstr void do_syscall_64(str
 	}
 
 	instrumentation_end();
+done:
 	syscall_exit_to_user_mode(regs);
 }
 #endif
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:140 @ __visible noinstr void do_int80_syscall_
 	 * the semantics of syscall_get_nr().
 	 */
 	nr = syscall_enter_from_user_mode(regs, nr);
+
+	if (dovetailing()) {
+		if (nr == EXIT_SYSCALL_OOB) {
+			hard_local_irq_disable();
+			return;
+		}
+		if (nr == EXIT_SYSCALL_TAIL)
+			goto done;
+	}
+
 	instrumentation_begin();
 
 	do_syscall_32_irqs_on(regs, nr);
 
 	instrumentation_end();
+done:
 	syscall_exit_to_user_mode(regs);
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:198 @ static noinstr bool __do_fast_syscall_32
 
 	nr = syscall_enter_from_user_mode_work(regs, nr);
 
+	if (dovetailing()) {
+		if (nr == EXIT_SYSCALL_OOB) {
+			instrumentation_end();
+			hard_local_irq_disable();
+			return true;
+		}
+		if (nr == EXIT_SYSCALL_TAIL)
+			goto done;
+	}
+
 	/* Now this is just like a normal syscall. */
 	do_syscall_32_irqs_on(regs, nr);
 
+done:
 	instrumentation_end();
 	syscall_exit_to_user_mode(regs);
 	return true;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/entry/entry_64.S linux-dovetail-v5.15.y-dovetail/arch/x86/entry/entry_64.S
--- linux-5.15.26/arch/x86/entry/entry_64.S	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/entry/entry_64.S	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:416 @ SYM_CODE_END(\asmsym)
  * If hits in kernel mode then it needs to go through the paranoid
  * entry as the exception can hit any random state. No preemption
  * check on exit to keep the paranoid path simple.
+ *
+ * irq_pipeline: since those events are non-maskable in essence,
+ * we may assume NMI-type restrictions for their handlers, which
+ * means the latter may - and actually have to - run immediately
+ * regardless of the current stage.
  */
 .macro idtentry_mce_db vector asmsym cfunc
 SYM_CODE_START(\asmsym)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/hyperv/hv_init.c linux-dovetail-v5.15.y-dovetail/arch/x86/hyperv/hv_init.c
--- linux-5.15.26/arch/x86/hyperv/hv_init.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/hyperv/hv_init.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:130 @ static inline bool hv_reenlightenment_av
 		ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT;
 }
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERV_REENLIGHTENMENT_VECTOR,
+				 sysvec_hyperv_reenlightenment)
 {
 	ack_APIC_irq();
 	inc_irq_stat(irq_hv_reenlightenment_count);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/apic.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/apic.h
--- linux-5.15.26/arch/x86/include/asm/apic.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/apic.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:439 @ static inline void apic_set_eoi_write(vo
 
 extern void apic_ack_irq(struct irq_data *data);
 
-static inline void ack_APIC_irq(void)
+static inline void __ack_APIC_irq(void)
 {
 	/*
 	 * ack_APIC_irq() actually gets compiled as a single instruction
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:448 @ static inline void ack_APIC_irq(void)
 	apic_eoi();
 }
 
+static inline void ack_APIC_irq(void)
+{
+	if (!irqs_pipelined())
+		__ack_APIC_irq();
+}
 
 static inline bool lapic_vector_set_in_irr(unsigned int vector)
 {
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/dovetail.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/dovetail.h
--- linux-5.15.26/arch/x86/include/asm/dovetail.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/dovetail.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum.
+ */
+#ifndef _ASM_X86_DOVETAIL_H
+#define _ASM_X86_DOVETAIL_H
+
+#if !defined(__ASSEMBLY__) && defined(CONFIG_DOVETAIL)
+
+#include <asm/fpu/api.h>
+
+static inline void arch_dovetail_exec_prepare(void)
+{
+	clear_thread_flag(TIF_NEED_FPU_LOAD);
+}
+
+static inline
+void arch_dovetail_switch_prepare(bool leave_inband)
+{
+	if (leave_inband)
+		fpu__suspend_inband();
+}
+
+static inline
+void arch_dovetail_switch_finish(bool enter_inband)
+{
+	if (enter_inband)
+		fpu__resume_inband();
+	else if (!(current->flags & PF_KTHREAD) &&
+		test_thread_flag(TIF_NEED_FPU_LOAD))
+		switch_fpu_return();
+}
+
+#endif
+
+#endif /* _ASM_X86_DOVETAIL_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/fpu/api.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/fpu/api.h
--- linux-5.15.26/arch/x86/include/asm/fpu/api.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/fpu/api.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:67 @ static inline void kernel_fpu_begin(void
  *
  * Disabling preemption also serializes against kernel_fpu_begin().
  */
-static inline void fpregs_lock(void)
+static inline unsigned long fpregs_lock(void)
 {
+	if (IS_ENABLED(CONFIG_IRQ_PIPELINE))
+		return hard_preempt_disable();
+
 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
 		local_bh_disable();
 	else
 		preempt_disable();
+
+	return 0;
 }
 
-static inline void fpregs_unlock(void)
+static inline void fpregs_unlock(unsigned long flags)
 {
-	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
-		local_bh_enable();
-	else
-		preempt_enable();
+	if (IS_ENABLED(CONFIG_IRQ_PIPELINE)) {
+		hard_preempt_enable(flags);
+	} else {
+		if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+			local_bh_enable();
+		else
+			preempt_enable();
+	}
 }
 
 #ifdef CONFIG_X86_DEBUG_FPU
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:103 @ static inline void fpregs_assert_state_c
  */
 extern void switch_fpu_return(void);
 
+/* For Dovetail context switching. */
+void fpu__suspend_inband(void);
+void fpu__resume_inband(void);
+
 /*
  * Query the presence of one or more xfeatures. Works on any legacy CPU as well.
  *
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/fpu/internal.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/fpu/internal.h
--- linux-5.15.26/arch/x86/include/asm/fpu/internal.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/fpu/internal.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:18 @
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
+#include <linux/dovetail.h>
 
 #include <asm/user.h>
 #include <asm/fpu/api.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:487 @ static inline void fpregs_restore_userre
 	clear_thread_flag(TIF_NEED_FPU_LOAD);
 }
 
+#ifdef CONFIG_DOVETAIL
+
+static inline void oob_fpu_set_preempt(struct fpu *fpu)
+{
+	fpu->preempted = 1;
+}
+
+static inline void oob_fpu_clear_preempt(struct fpu *fpu)
+{
+	fpu->preempted = 0;
+}
+
+static inline bool oob_fpu_preempted(struct fpu *old_fpu)
+{
+	return old_fpu->preempted;
+}
+
+#else
+
+static inline bool oob_fpu_preempted(struct fpu *old_fpu)
+{
+	return false;
+}
+
+#endif	/* !CONFIG_DOVETAIL */
+
 /*
  * FPU state switching for scheduling.
  *
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:537 @ static inline void fpregs_restore_userre
  */
 static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 {
-	if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
+	if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD) &&
+	    !oob_fpu_preempted(old_fpu)) {
 		save_fpregs_to_fpstate(old_fpu);
 		/*
 		 * The save operation preserved register state, so the
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/fpu/types.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/fpu/types.h
--- linux-5.15.26/arch/x86/include/asm/fpu/types.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/fpu/types.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:332 @ struct fpu {
 	 */
 	unsigned int			last_cpu;
 
+#ifdef CONFIG_DOVETAIL
+	/*
+	 * @preempted:
+	 *
+	 * When Dovetail is enabled, this flag is set for the inband
+	 * task context saved when entering a kernel_fpu_begin/end()
+	 * section before the latter got preempted by an out-of-band
+	 * task.
+	 */
+	unsigned char			preempted : 1;
+#endif
+
 	/*
 	 * @avx512_timestamp:
 	 *
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/i8259.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/i8259.h
--- linux-5.15.26/arch/x86/include/asm/i8259.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/i8259.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:31 @ extern unsigned int cached_irq_mask;
 #define SLAVE_ICW4_DEFAULT	0x01
 #define PIC_ICW4_AEOI		2
 
-extern raw_spinlock_t i8259A_lock;
+extern hard_spinlock_t i8259A_lock;
 
 /* the PIC may need a careful delay on some platforms, hence specific calls */
 static inline unsigned char inb_pic(unsigned int port)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/idtentry.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/idtentry.h
--- linux-5.15.26/arch/x86/include/asm/idtentry.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/idtentry.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:177 @ __visible noinstr void func(struct pt_re
 #define DECLARE_IDTENTRY_IRQ(vector, func)				\
 	DECLARE_IDTENTRY_ERRORCODE(vector, func)
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+struct irq_stage_data;
+
+void arch_pipeline_entry(struct pt_regs *regs, u8 vector);
+
+#define DECLARE_IDTENTRY_SYSVEC_PIPELINED(vector, func)			\
+	DECLARE_IDTENTRY_SYSVEC(vector, func);				\
+	__visible void __##func(struct pt_regs *regs)
+
+#define DEFINE_IDTENTRY_IRQ_PIPELINED(func)				\
+__visible noinstr void func(struct pt_regs *regs,			\
+			    unsigned long error_code)			\
+{									\
+	arch_pipeline_entry(regs, (u8)error_code);			\
+}									\
+static __always_inline void __##func(struct pt_regs *regs, u8 vector)
+
+/*
+ * In a pipelined model, the actual sysvec __handler() is directly
+ * instrumentable, just like it is in fact in the non-pipelined
+ * model. The indirect call via run_on_irqstack_cond() in
+ * DEFINE_IDTENTRY_SYSVEC() happens to hide the noinstr dependency
+ * from objtool in the latter case.
+ */
+#define DEFINE_IDTENTRY_SYSVEC_PIPELINED(vector, func)			\
+__visible noinstr void func(struct pt_regs *regs)			\
+{									\
+	arch_pipeline_entry(regs, vector);				\
+}									\
+									\
+__visible void __##func(struct pt_regs *regs)
+
+#define DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(vector, func)		\
+	DEFINE_IDTENTRY_SYSVEC_PIPELINED(vector, func)
+
+#else  /* !CONFIG_IRQ_PIPELINE */
+
+#define DECLARE_IDTENTRY_SYSVEC_PIPELINED(vector, func)		DECLARE_IDTENTRY_SYSVEC(vector, func)
+
+#define DEFINE_IDTENTRY_IRQ_PIPELINED(func)			DEFINE_IDTENTRY_IRQ(func)
+#define DEFINE_IDTENTRY_SYSVEC_PIPELINED(vector, func)		DEFINE_IDTENTRY_SYSVEC(func)
+#define DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(vector, func)	DEFINE_IDTENTRY_SYSVEC_SIMPLE(func)
+
 /**
  * DEFINE_IDTENTRY_IRQ - Emit code for device interrupt IDT entry points
  * @func:	Function name of the entry point
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:251 @ __visible noinstr void func(struct pt_re
 									\
 static noinline void __##func(struct pt_regs *regs, u32 vector)
 
+#endif	/* !CONFIG_IRQ_PIPELINE */
+
 /**
  * DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points
  * @vector:	Vector number (ignored for C)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:496 @ __visible noinstr void func(struct pt_re
 #define DECLARE_IDTENTRY_SYSVEC(vector, func)				\
 	idtentry_sysvec vector func
 
+#define DECLARE_IDTENTRY_SYSVEC_PIPELINED(vector, func)			\
+	DECLARE_IDTENTRY_SYSVEC(vector, func)
+
 #ifdef CONFIG_X86_64
 # define DECLARE_IDTENTRY_MCE(vector, func)				\
 	idtentry_mce_db vector asm_##func func
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:687 @ DECLARE_IDTENTRY_IRQ(X86_TRAP_OTHER,	spu
 #ifdef CONFIG_X86_LOCAL_APIC
 DECLARE_IDTENTRY_SYSVEC(ERROR_APIC_VECTOR,		sysvec_error_interrupt);
 DECLARE_IDTENTRY_SYSVEC(SPURIOUS_APIC_VECTOR,		sysvec_spurious_apic_interrupt);
-DECLARE_IDTENTRY_SYSVEC(LOCAL_TIMER_VECTOR,		sysvec_apic_timer_interrupt);
-DECLARE_IDTENTRY_SYSVEC(X86_PLATFORM_IPI_VECTOR,	sysvec_x86_platform_ipi);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(LOCAL_TIMER_VECTOR,		sysvec_apic_timer_interrupt);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(X86_PLATFORM_IPI_VECTOR,	sysvec_x86_platform_ipi);
 #endif
 
 #ifdef CONFIG_SMP
-DECLARE_IDTENTRY(RESCHEDULE_VECTOR,			sysvec_reschedule_ipi);
-DECLARE_IDTENTRY_SYSVEC(IRQ_MOVE_CLEANUP_VECTOR,	sysvec_irq_move_cleanup);
-DECLARE_IDTENTRY_SYSVEC(REBOOT_VECTOR,			sysvec_reboot);
-DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_SINGLE_VECTOR,	sysvec_call_function_single);
-DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR,		sysvec_call_function);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(RESCHEDULE_VECTOR,		sysvec_reschedule_ipi);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(IRQ_MOVE_CLEANUP_VECTOR,	sysvec_irq_move_cleanup);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(REBOOT_VECTOR,		sysvec_reboot);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(CALL_FUNCTION_SINGLE_VECTOR,	sysvec_call_function_single);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(CALL_FUNCTION_VECTOR,		sysvec_call_function);
+#ifdef CONFIG_IRQ_PIPELINE
+DECLARE_IDTENTRY_SYSVEC(RESCHEDULE_OOB_VECTOR,		sysvec_reschedule_oob_ipi);
+DECLARE_IDTENTRY_SYSVEC(TIMER_OOB_VECTOR,		sysvec_timer_oob_ipi);
+#endif
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
 # ifdef CONFIG_X86_MCE_THRESHOLD
-DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR,		sysvec_threshold);
+DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR,	sysvec_threshold);
 # endif
 
 # ifdef CONFIG_X86_MCE_AMD
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:717 @ DECLARE_IDTENTRY_SYSVEC(THERMAL_APIC_VEC
 # endif
 
 # ifdef CONFIG_IRQ_WORK
-DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR,		sysvec_irq_work);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(IRQ_WORK_VECTOR,	sysvec_irq_work);
 # endif
 #endif
 
 #ifdef CONFIG_HAVE_KVM
-DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR,		sysvec_kvm_posted_intr_ipi);
-DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR,	sysvec_kvm_posted_intr_wakeup_ipi);
-DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR,	sysvec_kvm_posted_intr_nested_ipi);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(POSTED_INTR_VECTOR,		sysvec_kvm_posted_intr_ipi);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(POSTED_INTR_WAKEUP_VECTOR,	sysvec_kvm_posted_intr_wakeup_ipi);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(POSTED_INTR_NESTED_VECTOR,	sysvec_kvm_posted_intr_nested_ipi);
 #endif
 
 #if IS_ENABLED(CONFIG_HYPERV)
-DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR,	sysvec_hyperv_callback);
-DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR,	sysvec_hyperv_reenlightenment);
-DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR,	sysvec_hyperv_stimer0);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0);
 #endif
 
 #if IS_ENABLED(CONFIG_ACRN_GUEST)
-DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR,	sysvec_acrn_hv_callback);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR,	sysvec_acrn_hv_callback);
 #endif
 
 #ifdef CONFIG_XEN_PVHVM
-DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR,	sysvec_xen_hvm_callback);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR,	sysvec_xen_hvm_callback);
 #endif
 
 #ifdef CONFIG_KVM_GUEST
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/irqflags.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/irqflags.h
--- linux-5.15.26/arch/x86/include/asm/irqflags.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/irqflags.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:29 @ extern __always_inline unsigned long nat
 	 * it evaluates its effective address -- this is part of the
 	 * documented behavior of the "pop" instruction.
 	 */
-	asm volatile("# __raw_save_flags\n\t"
+	asm volatile("# __native_save_flags\n\t"
 		     "pushf ; pop %0"
 		     : "=rm" (flags)
 		     : /* no input */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:38 @ extern __always_inline unsigned long nat
 	return flags;
 }
 
+extern inline void native_restore_fl(unsigned long flags);
+extern __always_inline void native_restore_fl(unsigned long flags)
+{
+	asm volatile("push %0 ; popf"
+		     : /* no output */
+		     :"g" (flags)
+		     :"memory", "cc");
+}
+
 static __always_inline void native_irq_disable(void)
 {
 	asm volatile("cli": : :"memory");
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:57 @ static __always_inline void native_irq_e
 	asm volatile("sti": : :"memory");
 }
 
+static inline unsigned long native_save_flags(void)
+{
+	return native_save_fl();
+}
+
+static __always_inline void native_irq_sync(void)
+{
+	asm volatile("sti ; nop ; cli": : :"memory");
+}
+
+static __always_inline unsigned long native_irq_save(void)
+{
+	unsigned long flags;
+
+	flags = native_save_flags();
+
+	native_irq_disable();
+
+	return flags;
+}
+
+static __always_inline int native_irqs_disabled_flags(unsigned long flags)
+{
+	return !(flags & X86_EFLAGS_IF);
+}
+
+static __always_inline void native_irq_restore(unsigned long flags)
+{
+	/*
+	 * CAUTION: the hard_irq_* API may be used to bracket code
+	 * which re-enables interrupts inside save/restore pairs, so
+	 * do not try to be (too) smart: do restore the original flags
+	 * unconditionally.
+	 */
+	native_restore_fl(flags);
+}
+
+static __always_inline bool native_irqs_disabled(void)
+{
+	unsigned long flags = native_save_flags();
+	return native_irqs_disabled_flags(flags);
+}
+
 static inline __cpuidle void native_safe_halt(void)
 {
 	mds_idle_clear_cpu_buffers();
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:119 @ static inline __cpuidle void native_halt
 #else
 #ifndef __ASSEMBLY__
 #include <linux/types.h>
-
-static __always_inline unsigned long arch_local_save_flags(void)
-{
-	return native_save_fl();
-}
-
-static __always_inline void arch_local_irq_disable(void)
-{
-	native_irq_disable();
-}
-
-static __always_inline void arch_local_irq_enable(void)
-{
-	native_irq_enable();
-}
+#include <asm/irq_pipeline.h>
 
 /*
  * Used in the idle loop; sti takes one instruction cycle
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:139 @ static inline __cpuidle void halt(void)
 	native_halt();
 }
 
-/*
- * For spinlocks, etc:
- */
-static __always_inline unsigned long arch_local_irq_save(void)
-{
-	unsigned long flags = arch_local_save_flags();
-	arch_local_irq_disable();
-	return flags;
-}
 #else
 
 #ifdef CONFIG_X86_64
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:156 @ static __always_inline unsigned long arc
 #ifndef __ASSEMBLY__
 static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
 {
-	return !(flags & X86_EFLAGS_IF);
+	return native_irqs_disabled_flags(flags);
 }
 
 static __always_inline int arch_irqs_disabled(void)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:166 @ static __always_inline int arch_irqs_dis
 	return arch_irqs_disabled_flags(flags);
 }
 
-static __always_inline void arch_local_irq_restore(unsigned long flags)
+#ifndef CONFIG_IRQ_PIPELINE
+static inline notrace void arch_local_irq_restore(unsigned long flags)
 {
 	if (!arch_irqs_disabled_flags(flags))
 		arch_local_irq_enable();
 }
+#endif
+
 #else
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_XEN_PV
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/irq_pipeline.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/irq_pipeline.h
--- linux-5.15.26/arch/x86/include/asm/irq_pipeline.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/irq_pipeline.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#ifndef _ASM_X86_IRQ_PIPELINE_H
+#define _ASM_X86_IRQ_PIPELINE_H
+
+#include <asm-generic/irq_pipeline.h>
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+#include <asm/ptrace.h>
+
+#define FIRST_SYSTEM_IRQ	NR_IRQS
+#define TIMER_OOB_IPI		apicm_vector_irq(TIMER_OOB_VECTOR)
+#define RESCHEDULE_OOB_IPI	apicm_vector_irq(RESCHEDULE_OOB_VECTOR)
+#define apicm_irq_vector(__irq) ((__irq) - FIRST_SYSTEM_IRQ + FIRST_SYSTEM_VECTOR)
+#define apicm_vector_irq(__vec) ((__vec) - FIRST_SYSTEM_VECTOR + FIRST_SYSTEM_IRQ)
+
+#define X86_EFLAGS_SS_BIT	31
+
+static inline notrace
+unsigned long arch_irqs_virtual_to_native_flags(int stalled)
+{
+	return (!stalled) << X86_EFLAGS_IF_BIT;
+}
+
+static inline notrace
+unsigned long arch_irqs_native_to_virtual_flags(unsigned long flags)
+{
+	return hard_irqs_disabled_flags(flags) << X86_EFLAGS_SS_BIT;
+}
+
+#ifndef CONFIG_PARAVIRT_XXL
+
+static inline notrace unsigned long arch_local_irq_save(void)
+{
+	int stalled = inband_irq_save();
+	barrier();
+	return arch_irqs_virtual_to_native_flags(stalled);
+}
+
+static inline notrace void arch_local_irq_enable(void)
+{
+	barrier();
+	inband_irq_enable();
+}
+
+static inline notrace void arch_local_irq_disable(void)
+{
+	inband_irq_disable();
+	barrier();
+}
+
+static inline notrace unsigned long arch_local_save_flags(void)
+{
+	int stalled = inband_irqs_disabled();
+	barrier();
+	return arch_irqs_virtual_to_native_flags(stalled);
+}
+
+#endif /* !CONFIG_PARAVIRT_XXL */
+
+static inline notrace void arch_local_irq_restore(unsigned long flags)
+{
+	inband_irq_restore(native_irqs_disabled_flags(flags));
+	barrier();
+}
+
+static inline
+void arch_save_timer_regs(struct pt_regs *dst, struct pt_regs *src)
+{
+	dst->flags = src->flags;
+	dst->cs = src->cs;
+	dst->ip = src->ip;
+	dst->bp = src->bp;
+	dst->ss = src->ss;
+	dst->sp = src->sp;
+}
+
+static inline bool arch_steal_pipelined_tick(struct pt_regs *regs)
+{
+	return !(regs->flags & X86_EFLAGS_IF);
+}
+
+static inline int arch_enable_oob_stage(void)
+{
+	return 0;
+}
+
+static inline void arch_handle_irq_pipelined(struct pt_regs *regs)
+{ }
+
+#else /* !CONFIG_IRQ_PIPELINE */
+
+struct pt_regs;
+
+#ifndef CONFIG_PARAVIRT_XXL
+
+static inline notrace unsigned long arch_local_save_flags(void)
+{
+	return native_save_fl();
+}
+
+static inline notrace void arch_local_irq_disable(void)
+{
+	native_irq_disable();
+}
+
+static inline notrace void arch_local_irq_enable(void)
+{
+	native_irq_enable();
+}
+
+/*
+ * For spinlocks, etc:
+ */
+static inline notrace unsigned long arch_local_irq_save(void)
+{
+	unsigned long flags = arch_local_save_flags();
+	arch_local_irq_disable();
+	return flags;
+}
+
+#endif /* !CONFIG_PARAVIRT_XXL */
+
+#endif /* !CONFIG_IRQ_PIPELINE */
+
+#endif /* _ASM_X86_IRQ_PIPELINE_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/irq_stack.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/irq_stack.h
--- linux-5.15.26/arch/x86/include/asm/irq_stack.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/irq_stack.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:135 @
 	/*								\
 	 * User mode entry and interrupt on the irq stack do not	\
 	 * switch stacks. If from user mode the task stack is empty.	\
+	 *								\
+	 * irq_pipeline: we always start from a kernel context when	\
+	 * replaying interrupts, so the user check is not relevant	\
+	 * in this case.						\
 	 */								\
-	if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) {	\
+	if ((!irqs_pipelined() && user_mode(regs)) ||			\
+		__this_cpu_read(hardirq_stack_inuse)) {			\
 		irq_enter_rcu();					\
 		func(c_args);						\
 		irq_exit_rcu();						\
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:151 @
 		 * switching stacks. Interrupts are disabled in both	\
 		 * places. Invoke the stack switch macro with the call	\
 		 * sequence which matches the above direct invocation.	\
+		 *							\
+		 * IRQ pipeline: only in-band (soft-)irq handlers have	\
+		 * to run on the irqstack. Out-of-band irq handlers     \
+		 * run directly over the preempted context, therefore   \
+		 * they never land there.				\
 		 */							\
 		__this_cpu_write(hardirq_stack_inuse, true);		\
 		call_on_irqstack(func, asm_call, constr);		\
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/irq_vectors.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/irq_vectors.h
--- linux-5.15.26/arch/x86/include/asm/irq_vectors.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/irq_vectors.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:109 @
 
 #define LOCAL_TIMER_VECTOR		0xec
 
+#ifdef CONFIG_IRQ_PIPELINE
+#define TIMER_OOB_VECTOR		0xeb
+#define RESCHEDULE_OOB_VECTOR		0xea
+#define FIRST_SYSTEM_APIC_VECTOR	RESCHEDULE_OOB_VECTOR
+#define NR_APIC_VECTORS	        	(NR_VECTORS - FIRST_SYSTEM_VECTOR)
+#else
+#define FIRST_SYSTEM_APIC_VECTOR	LOCAL_TIMER_VECTOR
+#endif
+
 #define NR_VECTORS			 256
 
 #ifdef CONFIG_X86_LOCAL_APIC
-#define FIRST_SYSTEM_VECTOR		LOCAL_TIMER_VECTOR
+#define FIRST_SYSTEM_VECTOR		FIRST_SYSTEM_APIC_VECTOR
 #else
 #define FIRST_SYSTEM_VECTOR		NR_VECTORS
 #endif
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/mmu_context.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/mmu_context.h
--- linux-5.15.26/arch/x86/include/asm/mmu_context.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/mmu_context.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:135 @ extern void switch_mm_irqs_off(struct mm
 			       struct task_struct *tsk);
 #define switch_mm_irqs_off switch_mm_irqs_off
 
+static inline void
+switch_oob_mm(struct mm_struct *prev, struct mm_struct *next,
+	      struct task_struct *tsk)
+{
+	switch_mm_irqs_off(prev, next, tsk);
+}
+
 #define activate_mm(prev, next)			\
 do {						\
 	paravirt_activate_mm((prev), (next));	\
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/special_insns.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/special_insns.h
--- linux-5.15.26/arch/x86/include/asm/special_insns.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/special_insns.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:129 @ static inline void native_load_gs_index(
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	asm_load_gs_index(selector);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static inline unsigned long __read_cr4(void)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/syscall.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/syscall.h
--- linux-5.15.26/arch/x86/include/asm/syscall.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/syscall.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:130 @ static inline void syscall_get_arguments
 	}
 }
 
+static inline unsigned long syscall_get_arg0(struct pt_regs *regs)
+{
+	return regs->di;
+}
+
 static inline void syscall_set_arguments(struct task_struct *task,
 					 struct pt_regs *regs,
 					 const unsigned long *args)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/thread_info.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/thread_info.h
--- linux-5.15.26/arch/x86/include/asm/thread_info.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/thread_info.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:55 @
 struct task_struct;
 #include <asm/cpufeature.h>
 #include <linux/atomic.h>
+#include <dovetail/thread_info.h>
 
 struct thread_info {
 	unsigned long		flags;		/* low level flags */
 	unsigned long		syscall_work;	/* SYSCALL_WORK_ flags */
 	u32			status;		/* thread synchronous flags */
+	struct oob_thread_state	oob_state;	/* co-kernel thread state */
 };
 
 #define INIT_THREAD_INFO(tsk)			\
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:69 @ struct thread_info {
 	.flags		= 0,			\
 }
 
+#define ti_local_flags(__ti)	((__ti)->status)
+
 #else /* !__ASSEMBLY__ */
 
 #include <asm/asm-offsets.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:97 @ struct thread_info {
 #define TIF_NOTSC		16	/* TSC is not accessible in userland */
 #define TIF_NOTIFY_SIGNAL	17	/* signal notifications exist */
 #define TIF_SLD			18	/* Restore split lock detection on context switch */
+#define TIF_RETUSER		19	/* INBAND_TASK_RETUSER is pending */
 #define TIF_MEMDIE		20	/* is terminating due to OOM killer */
 #define TIF_POLLING_NRFLAG	21	/* idle is polling for TIF_NEED_RESCHED */
 #define TIF_IO_BITMAP		22	/* uses I/O bitmap */
 #define TIF_SPEC_FORCE_UPDATE	23	/* Force speculation MSR update in context switch */
 #define TIF_FORCED_TF		24	/* true if TF in eflags artificially */
 #define TIF_BLOCKSTEP		25	/* set when we want DEBUGCTLMSR_BTF */
+#define TIF_MAYDAY		26	/* emergency trap pending */
 #define TIF_LAZY_MMU_UPDATES	27	/* task is updating the mmu lazily */
 #define TIF_ADDR32		29	/* 32-bit address space on 64 bits */
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:123 @ struct thread_info {
 #define _TIF_NOTSC		(1 << TIF_NOTSC)
 #define _TIF_NOTIFY_SIGNAL	(1 << TIF_NOTIFY_SIGNAL)
 #define _TIF_SLD		(1 << TIF_SLD)
+#define _TIF_RETUSER		(1 << TIF_RETUSER)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_IO_BITMAP		(1 << TIF_IO_BITMAP)
 #define _TIF_SPEC_FORCE_UPDATE	(1 << TIF_SPEC_FORCE_UPDATE)
 #define _TIF_FORCED_TF		(1 << TIF_FORCED_TF)
+#define _TIF_MAYDAY		(1 << TIF_MAYDAY)
 #define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)
 #define _TIF_LAZY_MMU_UPDATES	(1 << TIF_LAZY_MMU_UPDATES)
 #define _TIF_ADDR32		(1 << TIF_ADDR32)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:220 @ static inline int arch_within_stack_fram
  * have to worry about atomic accesses.
  */
 #define TS_COMPAT		0x0002	/* 32bit syscall active (64BIT)*/
+/* bits 2 and 3 reserved for compat */
+#define TS_OOB			0x0010	/* Thread is running out-of-band */
+#define TS_DOVETAIL		0x0020  /* Dovetail notifier enabled */
+#define TS_OFFSTAGE		0x0040	/* Thread is in-flight to OOB context */
+#define TS_OOBTRAP		0x0080	/* Handling a trap from OOB context */
+
+#define _TLF_OOB		TS_OOB
+#define _TLF_DOVETAIL		TS_DOVETAIL
+#define _TLF_OFFSTAGE		TS_OFFSTAGE
+#define _TLF_OOBTRAP		TS_OOBTRAP
 
 #ifndef __ASSEMBLY__
 #ifdef CONFIG_COMPAT
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/tlbflush.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/tlbflush.h
--- linux-5.15.26/arch/x86/include/asm/tlbflush.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/tlbflush.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:40 @ static inline void cr4_set_bits(unsigned
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	cr4_set_bits_irqsoff(mask);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /* Clear in this cpu's CR4. */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:50 @ static inline void cr4_clear_bits(unsign
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	cr4_clear_bits_irqsoff(mask);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 #ifndef MODULE
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/include/asm/uaccess.h linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/uaccess.h
--- linux-5.15.26/arch/x86/include/asm/uaccess.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/include/asm/uaccess.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:47 @ static inline bool __chk_range_not_ok(un
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 static inline bool pagefault_disabled(void);
 # define WARN_ON_IN_IRQ()	\
-	WARN_ON_ONCE(!in_task() && !pagefault_disabled())
+	WARN_ON_ONCE(running_inband() && !in_task() && !pagefault_disabled())
 #else
 # define WARN_ON_IN_IRQ()
 #endif
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/Kconfig linux-dovetail-v5.15.y-dovetail/arch/x86/Kconfig
--- linux-5.15.26/arch/x86/Kconfig	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/Kconfig	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:32 @ config X86_64
 	select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
 	select ARCH_USE_CMPXCHG_LOCKREF
 	select HAVE_ARCH_SOFT_DIRTY
+	select HAVE_IRQ_PIPELINE
+	select HAVE_DOVETAIL
 	select MODULES_USE_ELF_RELA
 	select NEED_DMA_MAP_STATE
 	select SWIOTLB
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:227 @ config X86
 	select HAVE_MOVE_PMD
 	select HAVE_MOVE_PUD
 	select HAVE_NMI
+	select HAVE_PERCPU_PREEMPT_COUNT
 	select HAVE_OPTPROBES
 	select HAVE_PCSPKR_PLATFORM
 	select HAVE_PERF_EVENTS
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:878 @ config ACRN_GUEST
 
 endif #HYPERVISOR_GUEST
 
+source "kernel/Kconfig.dovetail"
 source "arch/x86/Kconfig.cpu"
 
 config HPET_TIMER
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/alternative.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/alternative.c
--- linux-5.15.26/arch/x86/kernel/alternative.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/alternative.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:11 @
 #include <linux/list.h>
 #include <linux/stringify.h>
 #include <linux/highmem.h>
+#include <linux/irq_pipeline.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
 #include <linux/memory.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:212 @ static __always_inline int optimize_nops
 	if (nnops <= 1)
 		return nnops;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	add_nops(instr + off, nnops);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:698 @ void __init_or_module text_poke_early(vo
 		 */
 		memcpy(addr, opcode, len);
 	} else {
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		memcpy(addr, opcode, len);
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		sync_core();
 
 		/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:732 @ static inline temp_mm_state_t use_tempor
 	temp_mm_state_t temp_state;
 
 	lockdep_assert_irqs_disabled();
+	WARN_ON_ONCE(irq_pipeline_debug() && !hard_irqs_disabled());
 
 	/*
 	 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:826 @ static void *__text_poke(void *addr, con
 	 */
 	VM_BUG_ON(!ptep);
 
-	local_irq_save(flags);
+	local_irq_save_full(flags);
 
 	pte = mk_pte(pages[0], pgprot);
 	set_pte_at(poking_mm, poking_addr, ptep, pte);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:877 @ static void *__text_poke(void *addr, con
 	 */
 	BUG_ON(memcmp(addr, opcode, len));
 
-	local_irq_restore(flags);
+	local_irq_restore_full(flags);
 	pte_unmap_unlock(ptep, ptl);
 	return addr;
 }
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/apic/apic.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/apic.c
--- linux-5.15.26/arch/x86/kernel/apic/apic.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/apic.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:34 @
 #include <linux/i8253.h>
 #include <linux/dmar.h>
 #include <linux/init.h>
+#include <linux/irq.h>
 #include <linux/cpu.h>
 #include <linux/dmi.h>
 #include <linux/smp.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:278 @ void native_apic_icr_write(u32 low, u32
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
 	apic_write(APIC_ICR, low);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 u64 native_apic_icr_read(void)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:337 @ int lapic_get_maxlvt(void)
 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
 {
 	unsigned int lvtt_value, tmp_value;
+	unsigned long flags;
+
+	flags = hard_cond_local_irq_save();
 
 	lvtt_value = LOCAL_TIMER_VECTOR;
 	if (!oneshot)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:362 @ static void __setup_APIC_LVTT(unsigned i
 		 * According to Intel, MFENCE can do the serialization here.
 		 */
 		asm volatile("mfence" : : : "memory");
+		hard_cond_local_irq_restore(flags);
+		printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
 		return;
 	}
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:377 @ static void __setup_APIC_LVTT(unsigned i
 
 	if (!oneshot)
 		apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
+
+	hard_cond_local_irq_restore(flags);
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:484 @ static int lapic_next_event(unsigned lon
 static int lapic_next_deadline(unsigned long delta,
 			       struct clock_event_device *evt)
 {
+	unsigned long flags;
 	u64 tsc;
 
 	/* This MSR is special and need a special fence: */
 	weak_wrmsr_fence();
 
+	flags = hard_local_irq_save();
 	tsc = rdtsc();
 	wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
+	hard_local_irq_restore(flags);
 	return 0;
 }
 
 static int lapic_timer_shutdown(struct clock_event_device *evt)
 {
+	unsigned long flags;
 	unsigned int v;
 
 	/* Lapic used as dummy for broadcast ? */
 	if (evt->features & CLOCK_EVT_FEAT_DUMMY)
 		return 0;
 
+	flags = hard_local_irq_save();
 	v = apic_read(APIC_LVTT);
 	v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
 	apic_write(APIC_LVTT, v);
 	apic_write(APIC_TMICT, 0);
+	hard_local_irq_restore(flags);
 	return 0;
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:546 @ static void lapic_timer_broadcast(const
 #endif
 }
 
+static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+#define LAPIC_TIMER_IRQ  apicm_vector_irq(LOCAL_TIMER_VECTOR)
+
+static irqreturn_t lapic_oob_handler(int irq, void *dev_id)
+{
+	struct clock_event_device *evt = this_cpu_ptr(&lapic_events);
+
+	trace_local_timer_entry(LOCAL_TIMER_VECTOR);
+	clockevents_handle_event(evt);
+	trace_local_timer_exit(LOCAL_TIMER_VECTOR);
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction lapic_oob_action = {
+	.handler = lapic_oob_handler,
+	.name = "Out-of-band LAPIC timer interrupt",
+	.flags = IRQF_TIMER | IRQF_PERCPU,
+};
+
+#else
+#define LAPIC_TIMER_IRQ  -1
+#endif
 
 /*
  * The local apic timer can be used for any function which is CPU local.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:579 @ static void lapic_timer_broadcast(const
 static struct clock_event_device lapic_clockevent = {
 	.name				= "lapic",
 	.features			= CLOCK_EVT_FEAT_PERIODIC |
-					  CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
-					  | CLOCK_EVT_FEAT_DUMMY,
+					  CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP  |
+					  CLOCK_EVT_FEAT_PIPELINE | CLOCK_EVT_FEAT_DUMMY,
 	.shift				= 32,
 	.set_state_shutdown		= lapic_timer_shutdown,
 	.set_state_periodic		= lapic_timer_set_periodic,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:589 @ static struct clock_event_device lapic_c
 	.set_next_event			= lapic_next_event,
 	.broadcast			= lapic_timer_broadcast,
 	.rating				= 100,
-	.irq				= -1,
+	.irq				= LAPIC_TIMER_IRQ,
 };
-static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
 
 static const struct x86_cpu_id deadline_match[] __initconst = {
 	X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1086 @ void __init setup_boot_APIC_clock(void)
 	/* Setup the lapic or request the broadcast */
 	setup_APIC_timer();
 	amd_e400_c1e_apic_setup();
+#ifdef CONFIG_IRQ_PIPELINE
+	setup_percpu_irq(LAPIC_TIMER_IRQ, &lapic_oob_action);
+#endif
 }
 
 void setup_secondary_APIC_clock(void)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1139 @ static void local_apic_timer_interrupt(v
  * [ if a single-CPU system runs an SMP kernel then we call the local
  *   interrupt as well. Thus we cannot inline the local irq ... ]
  */
-DEFINE_IDTENTRY_SYSVEC(sysvec_apic_timer_interrupt)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(LOCAL_TIMER_VECTOR,
+				 sysvec_apic_timer_interrupt)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1561 @ static bool apic_check_and_ack(union api
 		 * per set bit.
 		 */
 		for_each_set_bit(bit, isr->map, APIC_IR_BITS)
-			ack_APIC_irq();
+			__ack_APIC_irq();
 		return true;
 	}
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2211 @ static noinline void handle_spurious_int
 	if (v & (1 << (vector & 0x1f))) {
 		pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n",
 			vector, smp_processor_id());
-		ack_APIC_irq();
+		__ack_APIC_irq();
 	} else {
 		pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n",
 			vector, smp_processor_id());
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2229 @ out:
  * trigger on an entry which is routed to the common_spurious idtentry
  * point.
  */
-DEFINE_IDTENTRY_IRQ(spurious_interrupt)
+DEFINE_IDTENTRY_IRQ_PIPELINED(spurious_interrupt)
 {
 	handle_spurious_interrupt(vector);
 }
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(SPURIOUS_APIC_VECTOR,
+				 sysvec_spurious_apic_interrupt)
 {
 	handle_spurious_interrupt(SPURIOUS_APIC_VECTOR);
 }
 
 /*
  * This interrupt should never happen with our APIC/SMP architecture
+ *
+ * irq_pipeline: same as spurious_interrupt, would run directly out of
+ * the IDT, no deferral via the interrupt log which means that only
+ * the hardware IRQ state is considered for masking.
  */
 DEFINE_IDTENTRY_SYSVEC(sysvec_error_interrupt)
 {
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/apic/apic_flat_64.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/apic_flat_64.c
--- linux-5.15.26/arch/x86/kernel/apic/apic_flat_64.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/apic_flat_64.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:55 @ static void _flat_send_IPI_mask(unsigned
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	__default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/apic/apic_numachip.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/apic_numachip.c
--- linux-5.15.26/arch/x86/kernel/apic/apic_numachip.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/apic_numachip.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:106 @ static void numachip_send_IPI_one(int cp
 	if (!((apicid ^ local_apicid) >> NUMACHIP_LAPIC_BITS)) {
 		unsigned long flags;
 
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		__default_send_IPI_dest_field(apicid, vector,
 			APIC_DEST_PHYSICAL);
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		preempt_enable();
 		return;
 	}
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/apic/io_apic.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/io_apic.c
--- linux-5.15.26/arch/x86/kernel/apic/io_apic.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/io_apic.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:81 @
 #define for_each_irq_pin(entry, head) \
 	list_for_each_entry(entry, &head, list)
 
-static DEFINE_RAW_SPINLOCK(ioapic_lock);
+static DEFINE_HARD_SPINLOCK(ioapic_lock);
 static DEFINE_MUTEX(ioapic_mutex);
 static unsigned int ioapic_dynirq_base;
 static int ioapic_initialized;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1637 @ static int __init timer_irq_works(void)
 	if (no_timer_check)
 		return 1;
 
-	local_irq_enable();
+	local_irq_enable_full();
 	if (boot_cpu_has(X86_FEATURE_TSC))
 		delay_with_tsc();
 	else
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1651 @ static int __init timer_irq_works(void)
 	 * least one tick may be lost due to delays.
 	 */
 
-	local_irq_disable();
+	local_irq_disable_full();
 
 	/* Did jiffies advance? */
 	return time_after(jiffies, t1 + 4);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1722 @ static bool io_apic_level_ack_pending(st
 	return false;
 }
 
+static inline void do_prepare_move(struct irq_data *data)
+{
+	if (!irqd_irq_masked(data))
+		mask_ioapic_irq(data);
+}
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+static inline void ioapic_finish_move(struct irq_data *data, bool moveit);
+
+static void ioapic_deferred_irq_move(struct irq_work *work)
+{
+	struct irq_data *data;
+	struct irq_desc *desc;
+	unsigned long flags;
+
+	data = container_of(work, struct irq_data, move_work);
+	desc = irq_data_to_desc(data);
+	raw_spin_lock_irqsave(&desc->lock, flags);
+	do_prepare_move(data);
+	ioapic_finish_move(data, true);
+	raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+static inline bool __ioapic_prepare_move(struct irq_data *data)
+{
+	init_irq_work(&data->move_work, ioapic_deferred_irq_move);
+	irq_work_queue(&data->move_work);
+
+	return false;	/* Postpone ioapic_finish_move(). */
+}
+
+#else  /* !CONFIG_IRQ_PIPELINE */
+
+static inline bool __ioapic_prepare_move(struct irq_data *data)
+{
+	do_prepare_move(data);
+
+	return true;
+}
+
+#endif
+
 static inline bool ioapic_prepare_move(struct irq_data *data)
 {
 	/* If we are moving the IRQ we need to mask it */
-	if (unlikely(irqd_is_setaffinity_pending(data))) {
-		if (!irqd_irq_masked(data))
-			mask_ioapic_irq(data);
-		return true;
-	}
+	if (irqd_is_setaffinity_pending(data) &&
+		!irqd_is_setaffinity_blocked(data))
+		return __ioapic_prepare_move(data);
+
 	return false;
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1870 @ static void ioapic_ack_level(struct irq_
 	 * We must acknowledge the irq before we move it or the acknowledge will
 	 * not propagate properly.
 	 */
-	ack_APIC_irq();
+	__ack_APIC_irq();
 
 	/*
 	 * Tail end of clearing remote IRR bit (either by delivering the EOI
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2032 @ static struct irq_chip ioapic_chip __rea
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.irq_get_irqchip_state	= ioapic_irq_get_chip_state,
 	.flags			= IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_AFFINITY_PRE_STARTUP,
+				  IRQCHIP_AFFINITY_PRE_STARTUP |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct irq_chip ioapic_ir_chip __read_mostly = {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2047 @ static struct irq_chip ioapic_ir_chip __
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.irq_get_irqchip_state	= ioapic_irq_get_chip_state,
 	.flags			= IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_AFFINITY_PRE_STARTUP,
+				  IRQCHIP_AFFINITY_PRE_STARTUP |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 static inline void init_IO_APIC_traps(void)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2095 @ static void unmask_lapic_irq(struct irq_
 
 static void ack_lapic_irq(struct irq_data *data)
 {
-	ack_APIC_irq();
+	__ack_APIC_irq();
 }
 
 static struct irq_chip lapic_chip __read_mostly = {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2103 @ static struct irq_chip lapic_chip __read
 	.irq_mask	= mask_lapic_irq,
 	.irq_unmask	= unmask_lapic_irq,
 	.irq_ack	= ack_lapic_irq,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static void lapic_register_intr(int irq)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2223 @ static inline void __init check_timer(vo
 	if (!global_clock_event)
 		return;
 
-	local_irq_disable();
+	local_irq_disable_full();
 
 	/*
 	 * get/set the timer IRQ vector:
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2356 @ static inline void __init check_timer(vo
 	panic("IO-APIC + timer doesn't work!  Boot with apic=debug and send a "
 		"report.  Then try booting with the 'noapic' option.\n");
 out:
-	local_irq_enable();
+	local_irq_enable_full();
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:3085 @ int mp_irqdomain_alloc(struct irq_domain
 	mp_preconfigure_entry(data);
 	mp_register_handler(virq, data->is_level);
 
-	local_irq_save(flags);
+	local_irq_save_full(flags);
 	if (virq < nr_legacy_irqs())
 		legacy_pic->mask(virq);
-	local_irq_restore(flags);
+	local_irq_restore_full(flags);
 
 	apic_printk(APIC_VERBOSE, KERN_DEBUG
 		    "IOAPIC[%d]: Preconfigured routing entry (%d-%d -> IRQ %d Level:%i ActiveLow:%i)\n",
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/apic/ipi.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/ipi.c
--- linux-5.15.26/arch/x86/kernel/apic/ipi.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/ipi.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:120 @ void __default_send_IPI_shortcut(unsigne
 	 * cli/sti.  Otherwise we use an even cheaper single atomic write
 	 * to the APIC.
 	 */
+	unsigned long flags;
 	unsigned int cfg;
 
+	flags = hard_cond_local_irq_save();
 	/*
 	 * Wait for idle.
 	 */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:142 @ void __default_send_IPI_shortcut(unsigne
 	 * Send the IPI. The write to APIC_ICR fires this off.
 	 */
 	native_apic_mem_write(APIC_ICR, cfg);
+
+	hard_cond_local_irq_restore(flags);
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:152 @ void __default_send_IPI_shortcut(unsigne
  */
 void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
 {
+	unsigned long flags;
 	unsigned long cfg;
 
+	flags = hard_cond_local_irq_save();
 	/*
 	 * Wait for idle.
 	 */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:179 @ void __default_send_IPI_dest_field(unsig
 	 * Send the IPI. The write to APIC_ICR fires this off.
 	 */
 	native_apic_mem_write(APIC_ICR, cfg);
+
+	hard_cond_local_irq_restore(flags);
 }
 
 void default_send_IPI_single_phys(int cpu, int vector)
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
 				      vector, APIC_DEST_PHYSICAL);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:203 @ void default_send_IPI_mask_sequence_phys
 	 * to an arbitrary mask, so I do a unicast to each CPU instead.
 	 * - mbligh
 	 */
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	for_each_cpu(query_cpu, mask) {
 		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
 				query_cpu), vector, APIC_DEST_PHYSICAL);
 	}
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:220 @ void default_send_IPI_mask_allbutself_ph
 
 	/* See Hack comment above */
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	for_each_cpu(query_cpu, mask) {
 		if (query_cpu == this_cpu)
 			continue;
 		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
 				 query_cpu), vector, APIC_DEST_PHYSICAL);
 	}
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:267 @ void default_send_IPI_mask_sequence_logi
 	 * should be modified to do 1 message per cluster ID - mbligh
 	 */
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	for_each_cpu(query_cpu, mask)
 		__default_send_IPI_dest_field(
 			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
 			vector, APIC_DEST_LOGICAL);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:284 @ void default_send_IPI_mask_allbutself_lo
 
 	/* See Hack comment above */
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	for_each_cpu(query_cpu, mask) {
 		if (query_cpu == this_cpu)
 			continue;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:292 @ void default_send_IPI_mask_allbutself_lo
 			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
 			vector, APIC_DEST_LOGICAL);
 		}
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:306 @ void default_send_IPI_mask_logical(const
 	if (!mask)
 		return;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
 	__default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /* must come after the send_IPI functions above for inlining */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/apic/msi.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/msi.c
--- linux-5.15.26/arch/x86/kernel/apic/msi.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/msi.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:156 @ static struct irq_chip pci_msi_controlle
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.irq_set_affinity	= msi_set_affinity,
 	.flags			= IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_AFFINITY_PRE_STARTUP,
+				  IRQCHIP_AFFINITY_PRE_STARTUP |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:227 @ static struct irq_chip pci_msi_ir_contro
 	.irq_ack		= irq_chip_ack_parent,
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.flags			= IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_AFFINITY_PRE_STARTUP,
+				  IRQCHIP_AFFINITY_PRE_STARTUP |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct msi_domain_info pci_msi_ir_domain_info = {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:283 @ static struct irq_chip dmar_msi_controll
 	.irq_compose_msi_msg	= dmar_msi_compose_msg,
 	.irq_write_msi_msg	= dmar_msi_write_msg,
 	.flags			= IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_AFFINITY_PRE_STARTUP,
+				  IRQCHIP_AFFINITY_PRE_STARTUP |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 static int dmar_msi_init(struct irq_domain *domain,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/apic/vector.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/vector.c
--- linux-5.15.26/arch/x86/kernel/apic/vector.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/vector.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:42 @ struct apic_chip_data {
 
 struct irq_domain *x86_vector_domain;
 EXPORT_SYMBOL_GPL(x86_vector_domain);
-static DEFINE_RAW_SPINLOCK(vector_lock);
+static DEFINE_HARD_SPINLOCK(vector_lock);
 static cpumask_var_t vector_searchmask;
 static struct irq_chip lapic_controller;
 static struct irq_matrix *vector_matrix;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:816 @ static struct irq_desc *__setup_vector_i
 {
 	int isairq = vector - ISA_IRQ_VECTOR(0);
 
+	/* Copy the cleanup vector if irqs are pipelined. */
+	if (IS_ENABLED(CONFIG_IRQ_PIPELINE) &&
+		vector == IRQ_MOVE_CLEANUP_VECTOR)
+		return irq_to_desc(IRQ_MOVE_CLEANUP_VECTOR); /* 1:1 mapping */
 	/* Check whether the irq is in the legacy space */
 	if (isairq < 0 || isairq >= nr_legacy_irqs())
 		return VECTOR_UNUSED;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:854 @ void lapic_online(void)
 
 void lapic_offline(void)
 {
-	lock_vector_lock();
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&vector_lock, flags);
 	irq_matrix_offline(vector_matrix);
-	unlock_vector_lock();
+	raw_spin_unlock_irqrestore(&vector_lock, flags);
 }
 
 static int apic_set_affinity(struct irq_data *irqd,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:866 @ static int apic_set_affinity(struct irq_
 {
 	int err;
 
+	WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled());
+
 	if (WARN_ON_ONCE(!irqd_is_activated(irqd)))
 		return -EIO;
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:897 @ static int apic_retrigger_irq(struct irq
 	return 1;
 }
 
-void apic_ack_irq(struct irq_data *irqd)
+#if defined(CONFIG_IRQ_PIPELINE) &&	\
+	defined(CONFIG_GENERIC_PENDING_IRQ)
+
+static void apic_deferred_irq_move(struct irq_work *work)
+{
+	struct irq_data *irqd;
+	struct irq_desc *desc;
+	unsigned long flags;
+
+	irqd = container_of(work, struct irq_data, move_work);
+	desc = irq_data_to_desc(irqd);
+	raw_spin_lock_irqsave(&desc->lock, flags);
+	__irq_move_irq(irqd);
+	raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+static inline void apic_move_irq(struct irq_data *irqd)
+{
+	if (irqd_is_setaffinity_pending(irqd) &&
+		!irqd_is_setaffinity_blocked(irqd)) {
+		init_irq_work(&irqd->move_work, apic_deferred_irq_move);
+		irq_work_queue(&irqd->move_work);
+	}
+}
+
+#else
+
+static inline void apic_move_irq(struct irq_data *irqd)
 {
 	irq_move_irq(irqd);
-	ack_APIC_irq();
+}
+
+#endif
+
+void apic_ack_irq(struct irq_data *irqd)
+{
+	apic_move_irq(irqd);
+	__ack_APIC_irq();
 }
 
 void apic_ack_edge(struct irq_data *irqd)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:983 @ static void free_moved_vector(struct api
 	apicd->move_in_progress = 0;
 }
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(IRQ_MOVE_CLEANUP_VECTOR,
+				 sysvec_irq_move_cleanup)
 {
 	struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
 	struct apic_chip_data *apicd;
 	struct hlist_node *tmp;
+	unsigned long flags;
 
 	ack_APIC_irq();
 	/* Prevent vectors vanishing under us */
-	raw_spin_lock(&vector_lock);
+	raw_spin_lock_irqsave(&vector_lock, flags);
 
 	hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
 		unsigned int irr, vector = apicd->prev_vector;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1015 @ DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_c
 		free_moved_vector(apicd);
 	}
 
-	raw_spin_unlock(&vector_lock);
+	raw_spin_unlock_irqrestore(&vector_lock, flags);
 }
 
 static void __send_cleanup_vector(struct apic_chip_data *apicd)
 {
+	unsigned long flags;
 	unsigned int cpu;
 
-	raw_spin_lock(&vector_lock);
+	raw_spin_lock_irqsave(&vector_lock, flags);
 	apicd->move_in_progress = 0;
 	cpu = apicd->prev_cpu;
 	if (cpu_online(cpu)) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1032 @ static void __send_cleanup_vector(struct
 	} else {
 		apicd->prev_vector = 0;
 	}
-	raw_spin_unlock(&vector_lock);
+	raw_spin_unlock_irqrestore(&vector_lock, flags);
 }
 
 void send_cleanup_vector(struct irq_cfg *cfg)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1071 @ void irq_force_complete_move(struct irq_
 	struct irq_data *irqd;
 	unsigned int vector;
 
+	WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled());
+
 	/*
 	 * The function is called for all descriptors regardless of which
 	 * irqdomain they belong to. For example if an IRQ is provided by
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1163 @ unlock:
 int lapic_can_unplug_cpu(void)
 {
 	unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
+	unsigned long flags;
 	int ret = 0;
 
-	raw_spin_lock(&vector_lock);
+	raw_spin_lock_irqsave(&vector_lock, flags);
 	tomove = irq_matrix_allocated(vector_matrix);
 	avl = irq_matrix_available(vector_matrix, true);
 	if (avl < tomove) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1181 @ int lapic_can_unplug_cpu(void)
 			rsvd, avl);
 	}
 out:
-	raw_spin_unlock(&vector_lock);
+	raw_spin_unlock_irqrestore(&vector_lock, flags);
 	return ret;
 }
 #endif /* HOTPLUG_CPU */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/apic/x2apic_cluster.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/x2apic_cluster.c
--- linux-5.15.26/arch/x86/kernel/apic/x2apic_cluster.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/x2apic_cluster.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:47 @ __x2apic_send_IPI_mask(const struct cpum
 
 	/* x2apic MSRs are special and need a special fence: */
 	weak_wrmsr_fence();
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 
 	tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
 	cpumask_copy(tmpmsk, mask);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:71 @ __x2apic_send_IPI_mask(const struct cpum
 		cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
 	}
 
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/apic/x2apic_phys.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/x2apic_phys.c
--- linux-5.15.26/arch/x86/kernel/apic/x2apic_phys.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/apic/x2apic_phys.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:61 @ __x2apic_send_IPI_mask(const struct cpum
 	/* x2apic MSRs are special and need a special fence: */
 	weak_wrmsr_fence();
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 
 	this_cpu = smp_processor_id();
 	for_each_cpu(query_cpu, mask) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:70 @ __x2apic_send_IPI_mask(const struct cpum
 		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
 				       vector, APIC_DEST_PHYSICAL);
 	}
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/asm-offsets.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/asm-offsets.c
--- linux-5.15.26/arch/x86/kernel/asm-offsets.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/asm-offsets.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:41 @ static void __used common(void)
 #endif
 
 	BLANK();
+#ifdef CONFIG_IRQ_PIPELINE
+	DEFINE(OOB_stage_mask, STAGE_MASK);
+#endif
 	OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
 
 	BLANK();
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/cpu/acrn.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/cpu/acrn.c
--- linux-5.15.26/arch/x86/kernel/cpu/acrn.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/cpu/acrn.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:40 @ static bool acrn_x2apic_available(void)
 
 static void (*acrn_intr_handler)(void);
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_acrn_hv_callback)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR,
+				 sysvec_acrn_hv_callback)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/cpu/mce/amd.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/cpu/mce/amd.c
--- linux-5.15.26/arch/x86/kernel/cpu/mce/amd.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/cpu/mce/amd.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:937 @ static void __log_error(unsigned int ban
 	mce_log(&m);
 }
 
+/*
+ * irq_pipeline: Deferred error events have NMI semantics wrt to
+ * pipelining, they can and should be handled immediately out of the
+ * IDT.
+ */
 DEFINE_IDTENTRY_SYSVEC(sysvec_deferred_error)
 {
 	trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
 	inc_irq_stat(irq_deferred_error_count);
 	deferred_error_int_vector();
 	trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
-	ack_APIC_irq();
+	__ack_APIC_irq();
 }
 
 /*
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/cpu/mce/core.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/cpu/mce/core.c
--- linux-5.15.26/arch/x86/kernel/cpu/mce/core.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/cpu/mce/core.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1487 @ noinstr void do_machine_check(struct pt_
 		/* If this triggers there is no way to recover. Die hard. */
 		BUG_ON(!on_thread_stack() || !user_mode(regs));
 
+		hard_local_irq_enable();
 		queue_task_work(&m, msg, kill_current_task);
+		hard_local_irq_disable();
 
 	} else {
 		/*
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/cpu/mce/threshold.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/cpu/mce/threshold.c
--- linux-5.15.26/arch/x86/kernel/cpu/mce/threshold.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/cpu/mce/threshold.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:30 @ DEFINE_IDTENTRY_SYSVEC(sysvec_threshold)
 	inc_irq_stat(irq_threshold_count);
 	mce_threshold_vector();
 	trace_threshold_apic_exit(THRESHOLD_APIC_VECTOR);
-	ack_APIC_irq();
+	__ack_APIC_irq();
 }
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/cpu/mshyperv.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/cpu/mshyperv.c
--- linux-5.15.26/arch/x86/kernel/cpu/mshyperv.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/cpu/mshyperv.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:46 @ static void (*hv_stimer0_handler)(void);
 static void (*hv_kexec_handler)(void);
 static void (*hv_crash_handler)(struct pt_regs *regs);
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR,
+				 sysvec_hyperv_callback)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:76 @ void hv_remove_vmbus_handler(void)
  * Routines to do per-architecture handling of stimer0
  * interrupts when in Direct Mode
  */
-DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERV_STIMER0_VECTOR,
+				 sysvec_hyperv_stimer0)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/cpu/mtrr/generic.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/cpu/mtrr/generic.c
--- linux-5.15.26/arch/x86/kernel/cpu/mtrr/generic.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/cpu/mtrr/generic.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:452 @ void __init mtrr_bp_pat_init(void)
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	prepare_set();
 
 	pat_init();
 
 	post_set();
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /* Grab all of the MTRR state for this CPU into *state */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:799 @ static void generic_set_all(void)
 	unsigned long mask, count;
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	prepare_set();
 
 	/* Actually set the state */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:809 @ static void generic_set_all(void)
 	pat_init();
 
 	post_set();
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	/* Use the atomic bitops to update the global mask */
 	for (count = 0; count < sizeof(mask) * 8; ++count) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:838 @ static void generic_set_mtrr(unsigned in
 
 	vr = &mtrr_state.var_ranges[reg];
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	prepare_set();
 
 	if (size == 0) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:859 @ static void generic_set_mtrr(unsigned in
 	}
 
 	post_set();
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 int generic_validate_add_page(unsigned long base, unsigned long size,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/dumpstack.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/dumpstack.c
--- linux-5.15.26/arch/x86/kernel/dumpstack.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/dumpstack.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:10 @
 #include <linux/uaccess.h>
 #include <linux/utsname.h>
 #include <linux/hardirq.h>
+#include <linux/irq_pipeline.h>
 #include <linux/kdebug.h>
 #include <linux/module.h>
 #include <linux/ptrace.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:339 @ unsigned long oops_begin(void)
 	oops_enter();
 
 	/* racy, but better than risking deadlock. */
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	cpu = smp_processor_id();
 	if (!arch_spin_trylock(&die_lock)) {
 		if (cpu == die_owner)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:369 @ void oops_end(unsigned long flags, struc
 	if (!die_nest_count)
 		/* Nest count reaches zero, release the lock. */
 		arch_spin_unlock(&die_lock);
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 	oops_exit();
 
 	/* Executive summary in case the oops scrolled away */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:398 @ static void __die_header(const char *str
 {
 	const char *pr = "";
 
+	irq_pipeline_oops();
+
 	/* Save the regs of the first oops for the executive summary later. */
 	if (!die_counter)
 		exec_summary_regs = *regs;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:408 @ static void __die_header(const char *str
 		pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
 
 	printk(KERN_DEFAULT
-	       "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
+	       "%s: %04lx [#%d]%s%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
 	       pr,
 	       IS_ENABLED(CONFIG_SMP)     ? " SMP"             : "",
 	       debug_pagealloc_enabled()  ? " DEBUG_PAGEALLOC" : "",
 	       IS_ENABLED(CONFIG_KASAN)   ? " KASAN"           : "",
 	       IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ?
-	       (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "");
+	       (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "",
+	       irqs_pipelined()           ? " IRQ_PIPELINE"    : "");
 }
 NOKPROBE_SYMBOL(__die_header);
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/fpu/core.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/fpu/core.c
--- linux-5.15.26/arch/x86/kernel/fpu/core.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/fpu/core.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:18 @
 
 #include <linux/hardirq.h>
 #include <linux/pkeys.h>
+#include <linux/cpuhotplug.h>
 
 #define CREATE_TRACE_POINTS
 #include <asm/trace/fpu.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:80 @ static bool interrupted_user_mode(void)
  */
 bool irq_fpu_usable(void)
 {
-	return !in_interrupt() ||
-		interrupted_user_mode() ||
-		interrupted_kernel_fpu_idle();
+	return running_inband() &&
+		(!in_interrupt() ||
+			interrupted_user_mode() ||
+			interrupted_kernel_fpu_idle());
 }
 EXPORT_SYMBOL(irq_fpu_usable);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:158 @ EXPORT_SYMBOL_GPL(__restore_fpregs_from_
 
 void kernel_fpu_begin_mask(unsigned int kfpu_mask)
 {
+	unsigned long flags;
+
 	preempt_disable();
 
 	WARN_ON_FPU(!irq_fpu_usable());
 	WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
 
+	flags = hard_cond_local_irq_save();
+
 	this_cpu_write(in_kernel_fpu, true);
 
 	if (!(current->flags & PF_KTHREAD) &&
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:182 @ void kernel_fpu_begin_mask(unsigned int
 
 	if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
 		asm volatile ("fninit");
+
+	hard_cond_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:202 @ EXPORT_SYMBOL_GPL(kernel_fpu_end);
  */
 void fpu_sync_fpstate(struct fpu *fpu)
 {
+	unsigned long flags;
+
 	WARN_ON_FPU(fpu != &current->thread.fpu);
 
-	fpregs_lock();
+	flags = fpregs_lock();
 	trace_x86_fpu_before_save(fpu);
 
 	if (!test_thread_flag(TIF_NEED_FPU_LOAD))
 		save_fpregs_to_fpstate(fpu);
 
 	trace_x86_fpu_after_save(fpu);
-	fpregs_unlock();
+	fpregs_unlock(flags);
 }
 
 static inline void fpstate_init_xstate(struct xregs_state *xsave)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:265 @ int fpu_clone(struct task_struct *dst)
 {
 	struct fpu *src_fpu = &current->thread.fpu;
 	struct fpu *dst_fpu = &dst->thread.fpu;
+	unsigned long flags;
 
 	/* The new task's FPU state cannot be valid in the hardware. */
 	dst_fpu->last_cpu = -1;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:284 @ int fpu_clone(struct task_struct *dst)
 	 * state.  Otherwise save the FPU registers directly into the
 	 * child's FPU context, without any memory-to-memory copying.
 	 */
-	fpregs_lock();
+	flags = fpregs_lock();
 	if (test_thread_flag(TIF_NEED_FPU_LOAD))
 		memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);
 
 	else
 		save_fpregs_to_fpstate(dst_fpu);
-	fpregs_unlock();
+	fpregs_unlock(flags);
 
 	set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:311 @ int fpu_clone(struct task_struct *dst)
  */
 void fpu__drop(struct fpu *fpu)
 {
-	preempt_disable();
+	unsigned long flags;
+
+	flags = hard_preempt_disable();
 
 	if (fpu == &current->thread.fpu) {
 		/* Ignore delayed exceptions from user space */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:325 @ void fpu__drop(struct fpu *fpu)
 
 	trace_x86_fpu_dropped(fpu);
 
-	preempt_enable();
+	hard_preempt_enable(flags);
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:359 @ static inline unsigned int init_fpstate_
 static void fpu_reset_fpstate(void)
 {
 	struct fpu *fpu = &current->thread.fpu;
+	unsigned long flags;
 
-	fpregs_lock();
+	flags = fpregs_lock();
 	fpu__drop(fpu);
 	/*
 	 * This does not change the actual hardware registers. It just
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:378 @ static void fpu_reset_fpstate(void)
 	 */
 	memcpy(&fpu->state, &init_fpstate, init_fpstate_copy_size());
 	set_thread_flag(TIF_NEED_FPU_LOAD);
-	fpregs_unlock();
+	fpregs_unlock(flags);
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:388 @ static void fpu_reset_fpstate(void)
  */
 void fpu__clear_user_states(struct fpu *fpu)
 {
+	unsigned long flags;
+
 	WARN_ON_FPU(fpu != &current->thread.fpu);
 
-	fpregs_lock();
+	flags = fpregs_lock();
 	if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
 		fpu_reset_fpstate();
-		fpregs_unlock();
+		fpregs_unlock(flags);
 		return;
 	}
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:419 @ void fpu__clear_user_states(struct fpu *
 	 * current's FPU is marked active.
 	 */
 	fpregs_mark_activate();
-	fpregs_unlock();
+	fpregs_unlock(flags);
 }
 
 void fpu_flush_thread(void)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:431 @ void fpu_flush_thread(void)
  */
 void switch_fpu_return(void)
 {
+	unsigned long flags;
+
 	if (!static_cpu_has(X86_FEATURE_FPU))
 		return;
 
+	flags = hard_cond_local_irq_save();
 	fpregs_restore_userregs();
+	hard_cond_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(switch_fpu_return);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:538 @ int fpu__exception_code(struct fpu *fpu,
 	 */
 	return 0;
 }
+
+#ifdef CONFIG_DOVETAIL
+
+/*
+ * Holds the in-kernel fpu state when preempted by a task running on
+ * the out-of-band stage.
+ */
+static DEFINE_PER_CPU(struct fpu *, in_kernel_fpstate);
+
+static int fpu__init_kernel_fpstate(unsigned int cpu)
+{
+	struct fpu *fpu;
+
+	fpu = kzalloc(sizeof(*fpu) + fpu_kernel_xstate_size, GFP_KERNEL);
+	if (fpu == NULL)
+		return -ENOMEM;
+
+	this_cpu_write(in_kernel_fpstate, fpu);
+	fpstate_init(&fpu->state);
+
+	return 0;
+}
+
+static int fpu__drop_kernel_fpstate(unsigned int cpu)
+{
+	struct fpu *fpu = this_cpu_read(in_kernel_fpstate);
+
+	kfree(fpu);
+
+	return 0;
+}
+
+void fpu__suspend_inband(void)
+{
+	struct fpu *kfpu = this_cpu_read(in_kernel_fpstate);
+	struct task_struct *tsk = current;
+
+	if (kernel_fpu_disabled()) {
+		save_fpregs_to_fpstate(kfpu);
+		__cpu_invalidate_fpregs_state();
+		oob_fpu_set_preempt(&tsk->thread.fpu);
+	}
+}
+
+void fpu__resume_inband(void)
+{
+	struct fpu *kfpu = this_cpu_read(in_kernel_fpstate);
+	struct task_struct *tsk = current;
+
+	if (oob_fpu_preempted(&tsk->thread.fpu)) {
+		restore_fpregs_from_fpstate(&kfpu->state);
+		__cpu_invalidate_fpregs_state();
+		oob_fpu_clear_preempt(&tsk->thread.fpu);
+	} else if (!(tsk->flags & PF_KTHREAD) &&
+		test_thread_flag(TIF_NEED_FPU_LOAD))
+		switch_fpu_return();
+}
+
+static int __init fpu__init_dovetail(void)
+{
+	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+			"platform/x86/dovetail:online",
+			fpu__init_kernel_fpstate, fpu__drop_kernel_fpstate);
+	return 0;
+}
+core_initcall(fpu__init_dovetail);
+
+#endif
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/fpu/signal.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/fpu/signal.c
--- linux-5.15.26/arch/x86/kernel/fpu/signal.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/fpu/signal.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:69 @ setfx:
  */
 static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
 {
+	unsigned long flags;
+
 	if (use_fxsr()) {
 		struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
 		struct user_i387_ia32_struct env;
 		struct _fpstate_32 __user *fp = buf;
 
-		fpregs_lock();
+		flags = fpregs_lock();
 		if (!test_thread_flag(TIF_NEED_FPU_LOAD))
 			fxsave(&tsk->thread.fpu.state.fxsave);
-		fpregs_unlock();
+		fpregs_unlock(flags);
 
 		convert_from_fxsr(&env, tsk);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:179 @ int copy_fpstate_to_sigframe(void __user
 {
 	struct task_struct *tsk = current;
 	int ia32_fxstate = (buf != buf_fx);
+	unsigned long flags;
 	int ret;
 
 	ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:201 @ retry:
 	 * userland's stack frame which will likely succeed. If it does not,
 	 * resolve the fault in the user memory and try again.
 	 */
-	fpregs_lock();
+	flags = fpregs_lock();
 	if (test_thread_flag(TIF_NEED_FPU_LOAD))
 		fpregs_restore_userregs();
 
 	pagefault_disable();
 	ret = copy_fpregs_to_sigframe(buf_fx);
 	pagefault_enable();
-	fpregs_unlock();
+	fpregs_unlock(flags);
 
 	if (ret) {
 		if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:256 @ static int restore_fpregs_from_user(void
 				    bool fx_only, unsigned int size)
 {
 	struct fpu *fpu = &current->thread.fpu;
+	unsigned long flags;
 	int ret;
 
 retry:
-	fpregs_lock();
+	flags = fpregs_lock();
 	pagefault_disable();
 	ret = __restore_fpregs_from_user(buf, xrestore, fx_only);
 	pagefault_enable();
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:279 @ retry:
 		 */
 		if (test_thread_flag(TIF_NEED_FPU_LOAD))
 			__cpu_invalidate_fpregs_state();
-		fpregs_unlock();
+		fpregs_unlock(flags);
 
 		/* Try to handle #PF, but anything else is fatal. */
 		if (ret != -EFAULT)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:304 @ retry:
 		os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor());
 
 	fpregs_mark_activate();
-	fpregs_unlock();
+	fpregs_unlock(flags);
 	return 0;
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:317 @ static int __fpu_restore_sig(void __user
 	struct user_i387_ia32_struct env;
 	u64 user_xfeatures = 0;
 	bool fx_only = false;
+	unsigned long flags;
 	int ret;
 
 	if (use_xsave()) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:360 @ static int __fpu_restore_sig(void __user
 	 * to be loaded again on return to userland (overriding last_cpu avoids
 	 * the optimisation).
 	 */
-	fpregs_lock();
+	flags = fpregs_lock();
 	if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
 		/*
 		 * If supervisor states are available then save the
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:376 @ static int __fpu_restore_sig(void __user
 	}
 	__fpu_invalidate_fpregs_state(fpu);
 	__cpu_invalidate_fpregs_state();
-	fpregs_unlock();
+	fpregs_unlock(flags);
 
 	if (use_xsave() && !fx_only) {
 		ret = copy_sigframe_from_user_to_xstate(&fpu->state.xsave, buf_fx);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:404 @ static int __fpu_restore_sig(void __user
 	/* Fold the legacy FP storage */
 	convert_to_fxsr(&fpu->state.fxsave, &env);
 
-	fpregs_lock();
+	flags = fpregs_lock();
 	if (use_xsave()) {
 		/*
 		 * Remove all UABI feature bits not set in user_xfeatures
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:426 @ static int __fpu_restore_sig(void __user
 	if (likely(!ret))
 		fpregs_mark_activate();
 
-	fpregs_unlock();
+	fpregs_unlock(flags);
 	return ret;
 }
 static inline int xstate_sigframe_size(void)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/fpu/xstate.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/fpu/xstate.c
--- linux-5.15.26/arch/x86/kernel/fpu/xstate.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/fpu/xstate.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:919 @ int arch_set_user_pkey_access(struct tas
 			      unsigned long init_val)
 {
 	u32 old_pkru, new_pkru_bits = 0;
+	unsigned long flags;
 	int pkey_shift;
 
 	/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:947 @ int arch_set_user_pkey_access(struct tas
 	pkey_shift = pkey * PKRU_BITS_PER_PKEY;
 	new_pkru_bits <<= pkey_shift;
 
+	flags = hard_cond_local_irq_save();
+
 	/* Get old PKRU and mask off any old bits in place: */
 	old_pkru = read_pkru();
 	old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:956 @ int arch_set_user_pkey_access(struct tas
 	/* Write old part along with new part: */
 	write_pkru(old_pkru | new_pkru_bits);
 
+	hard_cond_local_irq_restore(flags);
+
 	return 0;
 }
 #endif /* ! CONFIG_ARCH_HAS_PKEYS */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/hpet.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/hpet.c
--- linux-5.15.26/arch/x86/kernel/hpet.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/hpet.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:410 @ static void hpet_init_clockevent(struct
 	evt->set_next_event	= hpet_clkevt_set_next_event;
 	evt->set_state_shutdown	= hpet_clkevt_set_state_shutdown;
 
-	evt->features = CLOCK_EVT_FEAT_ONESHOT;
+	evt->features = CLOCK_EVT_FEAT_ONESHOT|CLOCK_EVT_FEAT_PIPELINE;
 	if (hc->boot_cfg & HPET_TN_PERIODIC) {
 		evt->features		|= CLOCK_EVT_FEAT_PERIODIC;
 		evt->set_state_periodic	= hpet_clkevt_set_state_periodic;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:512 @ static struct irq_chip hpet_msi_controll
 	.irq_set_affinity = msi_domain_set_affinity,
 	.irq_retrigger = irq_chip_retrigger_hierarchy,
 	.irq_write_msi_msg = hpet_msi_write_msg,
-	.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP,
+	.flags = IRQCHIP_SKIP_SET_WAKE |
+		 IRQCHIP_AFFINITY_PRE_STARTUP |
+		 IRQCHIP_PIPELINE_SAFE,
 };
 
 static int hpet_msi_init(struct irq_domain *domain,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:634 @ static irqreturn_t hpet_msi_interrupt_ha
 		return IRQ_HANDLED;
 	}
 
-	evt->event_handler(evt);
+	clockevents_handle_event(evt);
 	return IRQ_HANDLED;
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:817 @ static u64 read_hpet(struct clocksource
 	if (arch_spin_is_locked(&old.lock))
 		goto contended;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	if (arch_spin_trylock(&hpet.lock)) {
 		new.value = hpet_readl(HPET_COUNTER);
 		/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:825 @ static u64 read_hpet(struct clocksource
 		 */
 		WRITE_ONCE(hpet.value, new.value);
 		arch_spin_unlock(&hpet.lock);
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		return (u64)new.value;
 	}
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 contended:
 	/*
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/i8259.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/i8259.c
--- linux-5.15.26/arch/x86/kernel/i8259.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/i8259.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:36 @
 static void init_8259A(int auto_eoi);
 
 static int i8259A_auto_eoi;
-DEFINE_RAW_SPINLOCK(i8259A_lock);
+DEFINE_HARD_SPINLOCK(i8259A_lock);
 
 /*
  * 8259A PIC functions to handle ISA devices:
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:230 @ struct irq_chip i8259A_chip = {
 	.irq_disable	= disable_8259A_irq,
 	.irq_unmask	= enable_8259A_irq,
 	.irq_mask_ack	= mask_and_ack_8259A,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static char irq_trigger[2];
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/idt.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/idt.c
--- linux-5.15.26/arch/x86/kernel/idt.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/idt.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:128 @ static const __initconst struct idt_data
 	INTG(CALL_FUNCTION_SINGLE_VECTOR,	asm_sysvec_call_function_single),
 	INTG(IRQ_MOVE_CLEANUP_VECTOR,		asm_sysvec_irq_move_cleanup),
 	INTG(REBOOT_VECTOR,			asm_sysvec_reboot),
+#ifdef CONFIG_IRQ_PIPELINE
+	INTG(RESCHEDULE_OOB_VECTOR,		asm_sysvec_reschedule_oob_ipi),
+	INTG(TIMER_OOB_VECTOR,			asm_sysvec_timer_oob_ipi),
+#endif
 #endif
 
 #ifdef CONFIG_X86_THERMAL_VECTOR
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/irq.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/irq.c
--- linux-5.15.26/arch/x86/kernel/irq.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/irq.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:7 @
  */
 #include <linux/cpu.h>
 #include <linux/interrupt.h>
+#include <linux/irq_pipeline.h>
 #include <linux/kernel_stat.h>
 #include <linux/of.h>
 #include <linux/seq_file.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:53 @ void ack_bad_irq(unsigned int irq)
 	 * completely.
 	 * But only ack when the APIC is enabled -AK
 	 */
-	ack_APIC_irq();
+	__ack_APIC_irq();
 }
 
 #define irq_stats(x)		(&per_cpu(irq_stat, x))
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:240 @ static __always_inline void handle_irq(s
 /*
  * common_interrupt() handles all normal device IRQ's (the special SMP
  * cross-CPU interrupts have their own entry points).
+ *
+ * Compiled out if CONFIG_IRQ_PIPELINE is enabled, replaced by
+ * arch_handle_irq().
  */
-DEFINE_IDTENTRY_IRQ(common_interrupt)
+DEFINE_IDTENTRY_IRQ_PIPELINED(common_interrupt)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 	struct irq_desc *desc;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:276 @ void (*x86_platform_ipi_callback)(void)
 /*
  * Handler for X86_PLATFORM_IPI_VECTOR.
  */
-DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(X86_PLATFORM_IPI_VECTOR,
+				 sysvec_x86_platform_ipi)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:309 @ EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wa
 /*
  * Handler for POSTED_INTERRUPT_VECTOR.
  */
-DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi)
+DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(POSTED_INTR_VECTOR,
+					sysvec_kvm_posted_intr_ipi)
 {
 	ack_APIC_irq();
 	inc_irq_stat(kvm_posted_intr_ipis);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:319 @ DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm
 /*
  * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
  */
-DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(POSTED_INTR_WAKEUP_VECTOR,
+				 sysvec_kvm_posted_intr_wakeup_ipi)
 {
 	ack_APIC_irq();
 	inc_irq_stat(kvm_posted_intr_wakeup_ipis);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:330 @ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted
 /*
  * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
  */
-DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
+DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(POSTED_INTR_NESTED_VECTOR,
+					sysvec_kvm_posted_intr_nested_ipi)
 {
 	ack_APIC_irq();
 	inc_irq_stat(kvm_posted_intr_nested_ipis);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:405 @ DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
 	inc_irq_stat(irq_thermal_count);
 	smp_thermal_vector();
 	trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
-	ack_APIC_irq();
+	__ack_APIC_irq();
 }
 #endif
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/irq_pipeline.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/irq_pipeline.c
--- linux-5.15.26/arch/x86/kernel/irq_pipeline.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/irq_pipeline.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/irq_pipeline.h>
+#include <asm/irqdomain.h>
+#include <asm/apic.h>
+#include <asm/traps.h>
+#include <asm/irq_work.h>
+#include <asm/mshyperv.h>
+#include <asm/idtentry.h>
+
+static struct irq_domain *sipic_domain;
+
+static void sipic_irq_noop(struct irq_data *data) { }
+
+static unsigned int sipic_irq_noop_ret(struct irq_data *data)
+{
+	return 0;
+}
+
+static struct irq_chip sipic_chip = {
+	.name		= "SIPIC",
+	.irq_startup	= sipic_irq_noop_ret,
+	.irq_shutdown	= sipic_irq_noop,
+	.irq_enable	= sipic_irq_noop,
+	.irq_disable	= sipic_irq_noop,
+	.flags		= IRQCHIP_PIPELINE_SAFE | IRQCHIP_SKIP_SET_WAKE,
+};
+
+void handle_apic_irq(struct irq_desc *desc)
+{
+	if (WARN_ON_ONCE(irq_pipeline_debug() && !on_pipeline_entry()))
+		return;
+
+	/*
+	 * MCE events are non-maskable therefore their in-band
+	 * handlers have to be oob-compatible by construction. Those
+	 * handlers run immediately out of the IDT for this reason as
+	 * well. We won't see them here since they are not routed via
+	 * arch_handle_irq() -> generic_pipeline_irq_desc().
+	 *
+	 * All we need to do at this stage is to acknowledge other
+	 * APIC events, then pipeline the corresponding interrupt from
+	 * our synthetic controller chip (SIPIC).
+	 */
+	__ack_APIC_irq();
+
+	handle_oob_irq(desc);
+}
+
+void irq_send_oob_ipi(unsigned int ipi,
+		const struct cpumask *cpumask)
+{
+	apic->send_IPI_mask_allbutself(cpumask,	apicm_irq_vector(ipi));
+}
+EXPORT_SYMBOL_GPL(irq_send_oob_ipi);
+
+static irqentry_state_t pipeline_enter_rcu(void)
+{
+	irqentry_state_t state = {
+		.exit_rcu = false,
+		.stage_info = IRQENTRY_INBAND_UNSTALLED,
+	};
+
+	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
+		rcu_irq_enter();
+		state.exit_rcu = true;
+	} else {
+		rcu_irq_enter_check_tick();
+	}
+
+	return state;
+}
+
+static void pipeline_exit_rcu(irqentry_state_t state)
+{
+	if (state.exit_rcu)
+		rcu_irq_exit();
+}
+
+static void do_sysvec_inband(struct irq_desc *desc, struct pt_regs *regs)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	int vector = apicm_irq_vector(irq);
+
+	/*
+	 * This code only sees pipelined sysvec events tagged with
+	 * DEFINE_IDTENTRY_SYSVEC_PIPELINED:
+	 *
+	 * 	arch_handle_irq(irq)
+	 *		generic_pipeline_irq_desc(irq)
+	 *			handle_apic_irq(irq)
+	 *				handle_oob_irq(irq)
+	 *				[...irq_post_inband...]
+	 *
+	 *      arch_do_IRQ_pipelined(desc)
+	 *           do_sysvec_inband(desc)
+	 *               <switch_to_irqstack>
+	 *                     |
+	 *                     v
+	 *	         sysvec_handler(regs)
+	 *
+	 * System vectors which are still tagged as
+	 * DEFINE_IDTENTRY_SYSVEC/DEFINE_IDTENTRY_SYSVEC_SIMPLE are
+	 * directly dispatched out of the IDT, assuming their handler
+	 * is oob-safe (like NMI handlers) therefore never reach this
+	 * in-band stage handler.
+	 *
+	 * NOTE: we expand run_sysvec_on_irqstack_cond() each time,
+	 * which is ugly. But the irqstack code makes assumptions we
+	 * don't want to break.
+	 */
+
+	switch (vector) {
+#ifdef CONFIG_SMP
+	case RESCHEDULE_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_reschedule_ipi,
+					regs);
+		break;
+	case CALL_FUNCTION_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_call_function,
+					regs);
+		break;
+	case CALL_FUNCTION_SINGLE_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_call_function_single,
+					regs);
+		break;
+	case REBOOT_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_reboot, regs);
+		break;
+#endif
+	case X86_PLATFORM_IPI_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_x86_platform_ipi,
+					regs);
+		break;
+	case IRQ_WORK_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_irq_work, regs);
+		break;
+#ifdef CONFIG_HAVE_KVM
+	case POSTED_INTR_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_kvm_posted_intr_ipi,
+					regs);
+		break;
+	case POSTED_INTR_WAKEUP_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_kvm_posted_intr_wakeup_ipi,
+					regs);
+		break;
+	case POSTED_INTR_NESTED_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_kvm_posted_intr_nested_ipi,
+					regs);
+		break;
+#endif
+#ifdef CONFIG_HYPERV
+	case HYPERVISOR_CALLBACK_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_hyperv_callback,
+					regs);
+		break;
+	case HYPERV_REENLIGHTENMENT_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_hyperv_reenlightenment,
+					regs);
+		break;
+	case HYPERV_STIMER0_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_hyperv_stimer0,
+					regs);
+		break;
+#endif
+#ifdef CONFIG_ACRN_GUEST
+	case HYPERVISOR_CALLBACK_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_acrn_hv_callback,
+					regs);
+		break;
+#endif
+#ifdef CONFIG_XEN_PVHVM
+	case HYPERVISOR_CALLBACK_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_xen_hvm_callback,
+					regs);
+		break;
+#endif
+	case LOCAL_TIMER_VECTOR:
+		run_sysvec_on_irqstack_cond(__sysvec_apic_timer_interrupt,
+					regs);
+		break;
+	default:
+		printk_once(KERN_ERR "irq_pipeline: unexpected event"
+			" on vector #%.2x (irq=%u)", vector, irq);
+		return;
+	}
+}
+
+static void do_irq_inband(struct pt_regs *regs, u32 irq)
+{
+
+	struct irq_desc *desc = irq_to_desc(irq);
+
+	desc->handle_irq(desc);
+}
+
+void arch_do_IRQ_pipelined(struct irq_desc *desc)
+{
+	struct pt_regs *regs = raw_cpu_ptr(&irq_pipeline.tick_regs), *old_regs;
+	irqentry_state_t state;
+
+	/* Emulate a kernel entry. */
+	state = pipeline_enter_rcu();
+
+	if (desc->irq_data.domain == sipic_domain) {
+ 		do_sysvec_inband(desc, regs);
+	} else {
+		/*
+		 * XXX: the following is ugly, but the irqstack
+		 * switching code is not that flexible. However, we
+		 * don't want to provide a separate implementation of
+		 * the latter in the pipelined case, so let's get
+		 * along with it.
+		 */
+		u32 vector = irq_desc_get_irq(desc); /* irq carried as 'vector' */
+		old_regs = set_irq_regs(regs);
+		run_irq_on_irqstack_cond(do_irq_inband, regs, vector);
+		set_irq_regs(old_regs);
+	}
+
+	pipeline_exit_rcu(state);
+}
+
+void arch_handle_irq(struct pt_regs *regs, u8 vector, bool irq_movable)
+{
+	struct pt_regs *old_regs = set_irq_regs(regs);
+	struct irq_desc *desc;
+	unsigned int irq;
+
+	if (vector >= FIRST_SYSTEM_VECTOR) {
+		irq = apicm_vector_irq(vector);
+		desc = irq_to_desc(irq);
+	} else {
+		desc = __this_cpu_read(vector_irq[vector]);
+		if (unlikely(IS_ERR_OR_NULL(desc))) {
+			__ack_APIC_irq();
+
+			if (desc == VECTOR_UNUSED) {
+				pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
+						__func__, smp_processor_id(),
+						vector);
+			} else {
+				__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+			}
+			return;
+		}
+		if (irqd_is_setaffinity_pending(&desc->irq_data)) {
+			raw_spin_lock(&desc->lock);
+			if (irq_movable)
+				irqd_clr_move_blocked(&desc->irq_data);
+			else
+				irqd_set_move_blocked(&desc->irq_data);
+			raw_spin_unlock(&desc->lock);
+		}
+	}
+
+	generic_pipeline_irq_desc(desc, regs);
+
+	set_irq_regs(old_regs);
+}
+
+noinstr void arch_pipeline_entry(struct pt_regs *regs, u8 vector)
+{
+	struct irq_stage_data *prevd;
+	irqentry_state_t state;
+
+	/*
+	 * The tricky one: we distinguish the following cases:
+	 *
+	 * [1] entry from oob context, either kernel or user code was
+	 * preempted by the IRQ, the in-band (virtual) interrupt state
+	 * is 'undefined' (could be either stalled/unstalled, it is
+	 * not relevant).
+	 *
+	 * [2] entry from in-band context while the stage is stalled,
+	 * which means that some kernel code was preempted by the IRQ
+	 * since in-band user code cannot run with interrupts
+	 * (virtually) disabled.
+	 *
+	 * [3] entry from in-band context while the stage is
+	 * unstalled: the common case for IRQ entry. Kernel or user
+	 * code may have been preempted, we handle the event
+	 * identically.
+	 *
+	 * [1] and [2] are processed almost the same way, except for
+	 * one key aspect: the potential stage demotion of the
+	 * preempted task which originally entered [1] on the oob
+	 * stage, then left it for the in-band stage as a result of
+	 * handling the IRQ (such demotion normally happens during
+	 * handle_irq_pipelined_finish() if required). In this
+	 * particular case, we want to run the common IRQ epilogue
+	 * code before returning to user mode, so that all pending
+	 * in-band work (_TIF_WORK_*) is carried out for the task
+	 * which is about to exit kernel mode.
+	 *
+	 * If the task runs in-band at the exit point and a user mode
+	 * context was preempted, then case [2] is excluded by
+	 * definition so we know for sure that we just observed a
+	 * stage demotion, therefore we have to run the work loop by
+	 * calling irqentry_exit_to_user_mode().
+	 */
+	if (unlikely(running_oob() || irqs_disabled())) {
+		instrumentation_begin();
+		prevd = handle_irq_pipelined_prepare(regs);
+		arch_handle_irq(regs, vector, false);
+		kvm_set_cpu_l1tf_flush_l1d();
+		handle_irq_pipelined_finish(prevd, regs);
+		if (running_inband() && user_mode(regs)) {
+			stall_inband_nocheck();
+			irqentry_exit_to_user_mode(regs);
+		}
+		instrumentation_end();
+		return;
+	}
+
+	/* In-band on entry, accepting interrupts. */
+	state = irqentry_enter(regs);
+	instrumentation_begin();
+	/* Prep for handling, switching oob. */
+	prevd = handle_irq_pipelined_prepare(regs);
+	arch_handle_irq(regs, vector, true);
+	kvm_set_cpu_l1tf_flush_l1d();
+	/* irqentry_enter() stalled the in-band stage. */
+	trace_hardirqs_on();
+	unstall_inband_nocheck();
+	handle_irq_pipelined_finish(prevd, regs);
+	stall_inband_nocheck();
+	trace_hardirqs_off();
+	instrumentation_end();
+	irqentry_exit(regs, state);
+}
+
+static int sipic_irq_map(struct irq_domain *d, unsigned int irq,
+			irq_hw_number_t hwirq)
+{
+	irq_set_percpu_devid(irq);
+	irq_set_chip_and_handler(irq, &sipic_chip, handle_apic_irq);
+
+	return 0;
+}
+
+static struct irq_domain_ops sipic_domain_ops = {
+	.map	= sipic_irq_map,
+};
+
+static void create_x86_apic_domain(void)
+{
+	sipic_domain = irq_domain_add_simple(NULL, NR_APIC_VECTORS,
+					     FIRST_SYSTEM_IRQ,
+					     &sipic_domain_ops, NULL);
+}
+
+#ifdef CONFIG_SMP
+
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(RESCHEDULE_OOB_VECTOR,
+				 sysvec_reschedule_oob_ipi)
+{ /* In-band handler is unused. */ }
+
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(TIMER_OOB_VECTOR,
+				 sysvec_timer_oob_ipi)
+{ /* In-band handler is unused. */ }
+
+void handle_irq_move_cleanup(struct irq_desc *desc)
+{
+	if (on_pipeline_entry()) {
+		/* 1. on receipt from hardware. */
+		__ack_APIC_irq();
+		handle_oob_irq(desc);
+	} else {
+		/* 2. in-band delivery. */
+		__sysvec_irq_move_cleanup(NULL);
+	}
+}
+
+static void smp_setup(void)
+{
+	int irq;
+
+	/*
+	 * The IRQ cleanup event must be pipelined to the inband
+	 * stage, so we need a valid IRQ descriptor for it. Since we
+	 * still are in the early boot stage on CPU0, we ask for a 1:1
+	 * mapping between the vector number and IRQ number, to make
+	 * things easier for us later on.
+	 */
+	irq = irq_alloc_desc_at(IRQ_MOVE_CLEANUP_VECTOR, 0);
+	WARN_ON(IRQ_MOVE_CLEANUP_VECTOR != irq);
+	/*
+	 * Set up the vector_irq[] mapping array for the boot CPU,
+	 * other CPUs will copy this entry when their APIC is going
+	 * online (see lapic_online()).
+	 */
+	per_cpu(vector_irq, 0)[irq] = irq_to_desc(irq);
+
+	irq_set_chip_and_handler(irq, &dummy_irq_chip,
+				handle_irq_move_cleanup);
+}
+
+#else
+
+static void smp_setup(void) { }
+
+#endif
+
+void __init arch_irq_pipeline_init(void)
+{
+	/*
+	 * Create an IRQ domain for mapping APIC system interrupts
+	 * (in-band and out-of-band), with fixed sirq numbers starting
+	 * from FIRST_SYSTEM_IRQ. Upon receipt of a system interrupt,
+	 * the corresponding sirq is injected into the pipeline.
+	 */
+	create_x86_apic_domain();
+
+	smp_setup();
+}
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/irq_work.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/irq_work.c
--- linux-5.15.26/arch/x86/kernel/irq_work.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/irq_work.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:17 @
 #include <linux/interrupt.h>
 
 #ifdef CONFIG_X86_LOCAL_APIC
-DEFINE_IDTENTRY_SYSVEC(sysvec_irq_work)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(IRQ_WORK_VECTOR,
+				 sysvec_irq_work)
 {
 	ack_APIC_irq();
 	trace_irq_work_entry(IRQ_WORK_VECTOR);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/kvm.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/kvm.c
--- linux-5.15.26/arch/x86/kernel/kvm.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/kvm.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:244 @ noinstr bool __kvm_handle_async_pf(struc
 {
 	u32 flags = kvm_read_and_reset_apf_flags();
 	irqentry_state_t state;
+	unsigned long irqflags;
 
 	if (!flags)
 		return false;
 
 	state = irqentry_enter(regs);
+	oob_trap_notify(X86_TRAP_PF, regs);
 	instrumentation_begin();
+	irqflags = hard_cond_local_irq_save();
 
 	/*
 	 * If the host managed to inject an async #PF into an interrupt
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:271 @ noinstr bool __kvm_handle_async_pf(struc
 		WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
 	}
 
+	hard_cond_local_irq_restore(irqflags);
 	instrumentation_end();
+	oob_trap_unwind(X86_TRAP_PF, regs);
 	irqentry_exit(regs, state);
 	return true;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:438 @ static void __init sev_map_percpu_data(v
 
 static void kvm_guest_cpu_offline(bool shutdown)
 {
+	unsigned long flags;
+
+	flags = hard_cond_local_irq_save();
 	kvm_disable_steal_time();
 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:448 @ static void kvm_guest_cpu_offline(bool s
 	if (!shutdown)
 		apf_task_wake_all();
 	kvmclock_disable();
+	hard_cond_local_irq_restore(flags);
 }
 
 static int kvm_cpu_online(unsigned int cpu)
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	local_irq_save_full(flags);
 	kvm_guest_cpu_init();
-	local_irq_restore(flags);
+	local_irq_restore_full(flags);
 	return 0;
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:891 @ static void kvm_wait(u8 *ptr, u8 val)
 	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
 	 */
 	if (irqs_disabled()) {
+		hard_local_irq_disable();
+
 		if (READ_ONCE(*ptr) == val)
 			halt();
+
+		hard_local_irq_enable();
 	} else {
-		local_irq_disable();
+		local_irq_disable_full();
 
 		/* safe_halt() will enable IRQ */
 		if (READ_ONCE(*ptr) == val)
 			safe_halt();
-		else
-			local_irq_enable();
+
+		local_irq_enable_full();
 	}
 }
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/Makefile linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/Makefile
--- linux-5.15.26/arch/x86/kernel/Makefile	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/Makefile	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:133 @ obj-$(CONFIG_PARAVIRT_CLOCK)	+= pvclock.
 obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
 
 obj-$(CONFIG_JAILHOUSE_GUEST)	+= jailhouse.o
+obj-$(CONFIG_IRQ_PIPELINE)	+= irq_pipeline.o
 
 obj-$(CONFIG_EISA)		+= eisa.o
 obj-$(CONFIG_PCSPKR_PLATFORM)	+= pcspeaker.o
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/nmi.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/nmi.c
--- linux-5.15.26/arch/x86/kernel/nmi.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/nmi.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:476 @ static DEFINE_PER_CPU(enum nmi_states, n
 static DEFINE_PER_CPU(unsigned long, nmi_cr2);
 static DEFINE_PER_CPU(unsigned long, nmi_dr7);
 
+/*
+ * IRQ pipeline: fixing up the virtual IRQ state makes no sense on
+ * NMI.
+ */
 DEFINE_IDTENTRY_RAW(exc_nmi)
 {
 	irqentry_state_t irq_state;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/process_64.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/process_64.c
--- linux-5.15.26/arch/x86/kernel/process_64.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/process_64.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:276 @ void current_save_fsgs(void)
 	unsigned long flags;
 
 	/* Interrupts need to be off for FSGSBASE */
-	local_irq_save(flags);
+	local_irq_save_full(flags);
 	save_fsgs(current);
-	local_irq_restore(flags);
+	local_irq_restore_full(flags);
 }
 #if IS_ENABLED(CONFIG_KVM)
 EXPORT_SYMBOL_GPL(current_save_fsgs);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:437 @ unsigned long x86_gsbase_read_cpu_inacti
 	if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
 		unsigned long flags;
 
-		local_irq_save(flags);
+		local_irq_save_full(flags);
 		gsbase = __rdgsbase_inactive();
-		local_irq_restore(flags);
+		local_irq_restore_full(flags);
 	} else {
 		rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
 	}
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:452 @ void x86_gsbase_write_cpu_inactive(unsig
 	if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
 		unsigned long flags;
 
-		local_irq_save(flags);
+		local_irq_save_full(flags);
 		__wrgsbase_inactive(gsbase);
-		local_irq_restore(flags);
+		local_irq_restore_full(flags);
 	} else {
 		wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
 	}
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:565 @ __switch_to(struct task_struct *prev_p,
 	struct fpu *next_fpu = &next->fpu;
 	int cpu = smp_processor_id();
 
+	/*
+	 * Dovetail: Switching context on the out-of-band stage is
+	 * legit, and we may have preempted an in-band (soft)irq
+	 * handler earlier. Since oob handlers never switch stack,
+	 * make sure to restrict the following test to in-band
+	 * callers.
+	 */
 	WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
-		     this_cpu_read(hardirq_stack_inuse));
+		     running_inband() && this_cpu_read(hardirq_stack_inuse));
+
+	WARN_ON_ONCE(dovetail_debug() && !hard_irqs_disabled());
 
 	if (!test_thread_flag(TIF_NEED_FPU_LOAD))
 		switch_fpu_prepare(prev_fpu, cpu);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:757 @ static long prctl_map_vdso(const struct
 
 long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
 {
+	unsigned long flags;
 	int ret = 0;
 
 	switch (option) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:765 @ long do_arch_prctl_64(struct task_struct
 		if (unlikely(arg2 >= TASK_SIZE_MAX))
 			return -EPERM;
 
-		preempt_disable();
+		flags = hard_preempt_disable();
 		/*
 		 * ARCH_SET_GS has always overwritten the index
 		 * and the base. Zero is the most sensible value
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:786 @ long do_arch_prctl_64(struct task_struct
 			task->thread.gsindex = 0;
 			x86_gsbase_write_task(task, arg2);
 		}
-		preempt_enable();
+		hard_preempt_enable(flags);
 		break;
 	}
 	case ARCH_SET_FS: {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:797 @ long do_arch_prctl_64(struct task_struct
 		if (unlikely(arg2 >= TASK_SIZE_MAX))
 			return -EPERM;
 
-		preempt_disable();
+		flags = hard_preempt_disable();
 		/*
 		 * Set the selector to 0 for the same reason
 		 * as %gs above.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:815 @ long do_arch_prctl_64(struct task_struct
 			task->thread.fsindex = 0;
 			x86_fsbase_write_task(task, arg2);
 		}
-		preempt_enable();
+		hard_preempt_enable(flags);
 		break;
 	}
 	case ARCH_GET_FS: {
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/process.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/process.c
--- linux-5.15.26/arch/x86/kernel/process.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/process.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:612 @ void speculation_ctrl_update(unsigned lo
 	unsigned long flags;
 
 	/* Forced update. Make sure all relevant TIF flags are different */
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	__speculation_ctrl_update(~tif, tif);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /* Called from seccomp/prctl update */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:714 @ void arch_cpu_idle(void)
 
 /*
  * We use this if we don't have any better idle routine..
+ *
+ * IRQ pipeline: safe_halt() returns with hard irqs on, caller does
+ * not need to force enable.
  */
 void __cpuidle default_idle(void)
 {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:739 @ bool xen_set_default_idle(void)
 
 void stop_this_cpu(void *dummy)
 {
-	local_irq_disable();
+	hard_local_irq_disable();
 	/*
 	 * Remove this CPU:
 	 */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:835 @ static __cpuidle void mwait_idle(void)
 		}
 
 		__monitor((void *)&current_thread_info()->flags, 0, 0);
-		if (!need_resched())
+		if (!need_resched()) {
 			__sti_mwait(0, 0);
-		else
+		} else {
+			hard_cond_local_irq_enable();
 			raw_local_irq_enable();
+		}
 	} else {
+		hard_cond_local_irq_enable();
 		raw_local_irq_enable();
 	}
 	__current_clr_polling();
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/smpboot.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/smpboot.c
--- linux-5.15.26/arch/x86/kernel/smpboot.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/smpboot.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:265 @ static void notrace start_secondary(void
 	x86_platform.nmi_init();
 
 	/* enable local interrupts */
-	local_irq_enable();
+	local_irq_enable_full();
 
 	x86_cpuinit.setup_percpu_clockev();
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1141 @ int native_cpu_up(unsigned int cpu, stru
 {
 	int apicid = apic->cpu_present_to_apicid(cpu);
 	int cpu0_nmi_registered = 0;
-	unsigned long flags;
 	int err, ret = 0;
 
 	lockdep_assert_irqs_enabled();
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1191 @ int native_cpu_up(unsigned int cpu, stru
 	 * Check TSC synchronization with the AP (keep irqs disabled
 	 * while doing so):
 	 */
-	local_irq_save(flags);
+	local_irq_disable_full();
 	check_tsc_sync_source(cpu);
-	local_irq_restore(flags);
+	local_irq_enable_full();
 
 	while (!cpu_online(cpu)) {
 		cpu_relax();
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1666 @ void play_dead_common(void)
 	/*
 	 * With physical CPU hotplug, we should halt the cpu
 	 */
-	local_irq_disable();
+	local_irq_disable_full();
 }
 
 /**
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/smp.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/smp.c
--- linux-5.15.26/arch/x86/kernel/smp.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/smp.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:134 @ static int smp_stop_nmi_callback(unsigne
 /*
  * this function calls the 'stop' function on all other CPUs in the system.
  */
-DEFINE_IDTENTRY_SYSVEC(sysvec_reboot)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(REBOOT_VECTOR, sysvec_reboot)
 {
 	ack_APIC_irq();
 	cpu_emergency_vmxoff();
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:215 @ static void native_stop_other_cpus(int w
 			udelay(1);
 	}
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	disable_local_APIC();
 	mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /*
  * Reschedule call back. KVM uses this interrupt to force a cpu out of
  * guest mode.
  */
-DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_reschedule_ipi)
+DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(RESCHEDULE_VECTOR,
+					sysvec_reschedule_ipi)
 {
 	ack_APIC_irq();
 	trace_reschedule_entry(RESCHEDULE_VECTOR);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:235 @ DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_res
 	trace_reschedule_exit(RESCHEDULE_VECTOR);
 }
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_call_function)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(CALL_FUNCTION_VECTOR,
+				 sysvec_call_function)
 {
 	ack_APIC_irq();
 	trace_call_function_entry(CALL_FUNCTION_VECTOR);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:245 @ DEFINE_IDTENTRY_SYSVEC(sysvec_call_funct
 	trace_call_function_exit(CALL_FUNCTION_VECTOR);
 }
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_call_function_single)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(CALL_FUNCTION_SINGLE_VECTOR,
+				 sysvec_call_function_single)
 {
 	ack_APIC_irq();
 	trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/time.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/time.c
--- linux-5.15.26/arch/x86/kernel/time.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/time.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:57 @ EXPORT_SYMBOL(profile_pc);
  */
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
 {
-	global_clock_event->event_handler(global_clock_event);
+	clockevents_handle_event(global_clock_event);
 	return IRQ_HANDLED;
 }
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/traps.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/traps.c
--- linux-5.15.26/arch/x86/kernel/traps.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/traps.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:78 @ DECLARE_BITMAP(system_vectors, NR_VECTOR
 
 static inline void cond_local_irq_enable(struct pt_regs *regs)
 {
-	if (regs->flags & X86_EFLAGS_IF)
-		local_irq_enable();
+	if (regs->flags & X86_EFLAGS_IF) {
+		if (running_inband())
+			local_irq_enable_full();
+		else
+			hard_local_irq_enable();
+	}
 }
 
 static inline void cond_local_irq_disable(struct pt_regs *regs)
 {
-	if (regs->flags & X86_EFLAGS_IF)
-		local_irq_disable();
+	if (regs->flags & X86_EFLAGS_IF) {
+		if (running_inband())
+			local_irq_disable_full();
+		else
+			hard_local_irq_disable();
+	}
 }
 
 __always_inline int is_valid_bugaddr(unsigned long addr)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:163 @ static void show_signal(struct task_stru
 	}
 }
 
+static __always_inline
+void mark_trap_entry(int trapnr, struct pt_regs *regs)
+{
+	oob_trap_notify(trapnr, regs);
+	hard_cond_local_irq_enable();
+}
+
+static __always_inline
+void mark_trap_exit(int trapnr, struct pt_regs *regs)
+{
+	oob_trap_unwind(trapnr, regs);
+	hard_cond_local_irq_disable();
+}
+
+static __always_inline
+void mark_trap_entry_raw(int trapnr, struct pt_regs *regs)
+{
+	oob_trap_notify(trapnr, regs);
+}
+
+static __always_inline
+void mark_trap_exit_raw(int trapnr, struct pt_regs *regs)
+{
+	oob_trap_unwind(trapnr, regs);
+}
+
 static void
 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
 	long error_code, int sicode, void __user *addr)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:212 @ static void do_error_trap(struct pt_regs
 {
 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 
+	mark_trap_entry(trapnr, regs);
+
 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
 			NOTIFY_STOP) {
 		cond_local_irq_enable(regs);
 		do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
 		cond_local_irq_disable(regs);
 	}
+
+	mark_trap_exit(trapnr, regs);
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:276 @ static noinstr bool handle_bug(struct pt
 	 * state to what it was at the exception site.
 	 */
 	if (regs->flags & X86_EFLAGS_IF)
-		raw_local_irq_enable();
+		local_irq_enable_full();
 	if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
 		regs->ip += LEN_UD2;
 		handled = true;
 	}
 	if (regs->flags & X86_EFLAGS_IF)
-		raw_local_irq_disable();
+		local_irq_disable_full();
 	instrumentation_end();
 
 	return handled;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:301 @ DEFINE_IDTENTRY_RAW(exc_invalid_op)
 		return;
 
 	state = irqentry_enter(regs);
+	mark_trap_entry(X86_TRAP_UD, regs);
 	instrumentation_begin();
 	handle_invalid_op(regs);
 	instrumentation_end();
+	mark_trap_exit(X86_TRAP_UD, regs);
 	irqentry_exit(regs, state);
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:337 @ DEFINE_IDTENTRY_ERRORCODE(exc_alignment_
 {
 	char *str = "alignment check";
 
+	mark_trap_entry(X86_TRAP_AC, regs);
+
 	if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
-		return;
+		goto mark_exit;
 
 	if (!user_mode(regs))
 		die("Split lock detected\n", regs, error_code);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:355 @ DEFINE_IDTENTRY_ERRORCODE(exc_alignment_
 
 out:
 	local_irq_disable();
+
+mark_exit:
+	mark_trap_exit(X86_TRAP_AC, regs);
 }
 
 #ifdef CONFIG_VMAP_STACK
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:395 @ __visible void __noreturn handle_stack_o
  *
  * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs
  * to be read before doing anything else.
+ *
+ * Dovetail: do not even ask the companion core to try restoring the
+ * in-band stage on double-fault, this would be a lost cause.
  */
 DEFINE_IDTENTRY_DF(exc_double_fault)
 {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:521 @ DEFINE_IDTENTRY_DF(exc_double_fault)
 
 DEFINE_IDTENTRY(exc_bounds)
 {
+	mark_trap_entry(X86_TRAP_BR, regs);
+
 	if (notify_die(DIE_TRAP, "bounds", regs, 0,
 			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
-		return;
+		goto out;
 	cond_local_irq_enable(regs);
 
 	if (!user_mode(regs))
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:534 @ DEFINE_IDTENTRY(exc_bounds)
 	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
 
 	cond_local_irq_disable(regs);
+out:
+	mark_trap_exit(X86_TRAP_BR, regs);
 }
 
 enum kernel_gp_hint {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:630 @ DEFINE_IDTENTRY_ERRORCODE(exc_general_pr
 	}
 
 	if (v8086_mode(regs)) {
-		local_irq_enable();
+		local_irq_enable_full();
 		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
-		local_irq_disable();
+		local_irq_disable_full();
 		return;
 	}
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:642 @ DEFINE_IDTENTRY_ERRORCODE(exc_general_pr
 		if (fixup_iopl_exception(regs))
 			goto exit;
 
+		mark_trap_entry(X86_TRAP_GP, regs);
 		tsk->thread.error_code = error_code;
 		tsk->thread.trap_nr = X86_TRAP_GP;
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:651 @ DEFINE_IDTENTRY_ERRORCODE(exc_general_pr
 
 		show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
 		force_sig(SIGSEGV);
-		goto exit;
+		goto mark_exit;
 	}
 
 	if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:669 @ DEFINE_IDTENTRY_ERRORCODE(exc_general_pr
 	    kprobe_fault_handler(regs, X86_TRAP_GP))
 		goto exit;
 
+	mark_trap_entry(X86_TRAP_GP, regs);
+
 	ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV);
 	if (ret == NOTIFY_STOP)
-		goto exit;
+		goto mark_exit;
 
 	if (error_code)
 		snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:695 @ DEFINE_IDTENTRY_ERRORCODE(exc_general_pr
 
 	die_addr(desc, regs, error_code, gp_addr);
 
+mark_exit:
+	mark_trap_exit(X86_TRAP_GP, regs);
 exit:
 	cond_local_irq_disable(regs);
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:740 @ DEFINE_IDTENTRY_RAW(exc_int3)
 	if (poke_int3_handler(regs))
 		return;
 
+	mark_trap_entry_raw(X86_TRAP_BP, regs);
+
 	/*
 	 * irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
 	 * and therefore can trigger INT3, hence poke_int3_handler() must
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:764 @ DEFINE_IDTENTRY_RAW(exc_int3)
 		instrumentation_end();
 		irqentry_nmi_exit(regs, irq_state);
 	}
+
+	mark_trap_exit_raw(X86_TRAP_BP, regs);
 }
 
 #ifdef CONFIG_X86_64
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1067 @ static __always_inline void exc_debug_us
 		goto out;
 
 	/* It's safe to allow irq's after DR6 has been saved */
-	local_irq_enable();
+	local_irq_enable_full();
 
 	if (v8086_mode(regs)) {
 		handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1084 @ static __always_inline void exc_debug_us
 		send_sigtrap(regs, 0, get_si_code(dr6));
 
 out_irq:
-	local_irq_disable();
+	local_irq_disable_full();
 out:
 	instrumentation_end();
 	irqentry_exit_to_user_mode(regs);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1094 @ out:
 /* IST stack entry */
 DEFINE_IDTENTRY_DEBUG(exc_debug)
 {
+	mark_trap_entry_raw(X86_TRAP_DB, regs);
 	exc_debug_kernel(regs, debug_read_clear_dr6());
+	mark_trap_exit_raw(X86_TRAP_DB, regs);
 }
 
 /* User entry, runs on regular task stack */
 DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
 {
+	mark_trap_entry_raw(X86_TRAP_DB, regs);
 	exc_debug_user(regs, debug_read_clear_dr6());
+	mark_trap_exit_raw(X86_TRAP_DB, regs);
 }
 #else
 /* 32 bit does not have separate entry points. */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1138 @ static void math_error(struct pt_regs *r
 		if (fixup_exception(regs, trapnr, 0, 0))
 			goto exit;
 
+		mark_trap_entry(trapnr, regs);
 		task->thread.error_code = 0;
 		task->thread.trap_nr = trapnr;
 
 		if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
 			       SIGFPE) != NOTIFY_STOP)
 			die(str, regs, 0);
-		goto exit;
+		goto mark_exit;
 	}
 
 	/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1165 @ static void math_error(struct pt_regs *r
 	if (fixup_vdso_exception(regs, trapnr, 0, 0))
 		goto exit;
 
+	mark_trap_entry(trapnr, regs);
+
 	force_sig_fault(SIGFPE, si_code,
 			(void __user *)uprobe_get_trap_addr(regs));
+mark_exit:
+	mark_trap_exit(trapnr, regs);
 exit:
 	cond_local_irq_disable(regs);
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1243 @ DEFINE_IDTENTRY(exc_device_not_available
 		 * to kill the task than getting stuck in a never-ending
 		 * loop of #NM faults.
 		 */
+		mark_trap_entry(X86_TRAP_NM, regs);
 		die("unexpected #NM exception", regs, 0);
+		mark_trap_exit(X86_TRAP_NM, regs);
 	}
 }
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/tsc.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/tsc.c
--- linux-5.15.26/arch/x86/kernel/tsc.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/tsc.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:135 @ static void __set_cyc2ns_scale(unsigned
 {
 	unsigned long long ns_now;
 	struct cyc2ns_data data;
+	unsigned long flags;
 	struct cyc2ns *c2n;
 
+	flags = hard_cond_local_irq_save();
+
 	ns_now = cycles_2_ns(tsc_now);
 
 	/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:170 @ static void __set_cyc2ns_scale(unsigned
 	c2n->data[0] = data;
 	raw_write_seqcount_latch(&c2n->seq);
 	c2n->data[1] = data;
+
+	hard_cond_local_irq_restore(flags);
 }
 
 static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:768 @ static unsigned long pit_hpet_ptimer_cal
 		 * calibration, which will take at least 50ms, and
 		 * read the end value.
 		 */
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		tsc1 = tsc_read_refs(&ref1, hpet);
 		tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
 		tsc2 = tsc_read_refs(&ref2, hpet);
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 
 		/* Pick the lowest PIT TSC calibration so far */
 		tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:881 @ unsigned long native_calibrate_cpu_early
 	if (!fast_calibrate)
 		fast_calibrate = cpu_khz_from_msr();
 	if (!fast_calibrate) {
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		fast_calibrate = quick_pit_calibrate();
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 	}
 	return fast_calibrate;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:951 @ void tsc_restore_sched_clock_state(void)
 	if (!sched_clock_stable())
 		return;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 
 	/*
 	 * We're coming out of suspend, there's no concurrency yet; don't
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:969 @ void tsc_restore_sched_clock_state(void)
 		per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
 	}
 
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 #ifdef CONFIG_CPU_FREQ
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1421 @ static int __init init_tsc_clocksource(v
 	if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
 		clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
 
+	clocksource_tsc.vdso_type = CLOCKSOURCE_VDSO_ARCHITECTED;
+
 	/*
 	 * When TSC frequency is known (retrieved via MSR or CPUID), we skip
 	 * the refined calibration and directly register it as a clocksource.
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kernel/tsc_sync.c linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/tsc_sync.c
--- linux-5.15.26/arch/x86/kernel/tsc_sync.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kernel/tsc_sync.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:370 @ void check_tsc_sync_source(int cpu)
 		atomic_set(&test_runs, 1);
 	else
 		atomic_set(&test_runs, 3);
+
+	hard_cond_local_irq_disable();
 retry:
 	/*
 	 * Wait for the target to start or to skip the test:
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:453 @ void check_tsc_sync_target(void)
 	if (unsynchronized_tsc())
 		return;
 
+	hard_cond_local_irq_disable();
+
 	/*
 	 * Store, verify and sanitize the TSC adjust register. If
 	 * successful skip the test.
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kvm/emulate.c linux-dovetail-v5.15.y-dovetail/arch/x86/kvm/emulate.c
--- linux-5.15.26/arch/x86/kvm/emulate.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kvm/emulate.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1084 @ static void fetch_register_operand(struc
 
 static int em_fninit(struct x86_emulate_ctxt *ctxt)
 {
+	unsigned long flags;
+
 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
 		return emulate_nm(ctxt);
 
-	kvm_fpu_get();
+	flags = kvm_fpu_get();
 	asm volatile("fninit");
-	kvm_fpu_put();
+	kvm_fpu_put(flags);
 	return X86EMUL_CONTINUE;
 }
 
 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
 {
+	unsigned long flags;
 	u16 fcw;
 
 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
 		return emulate_nm(ctxt);
 
-	kvm_fpu_get();
+	flags = kvm_fpu_get();
 	asm volatile("fnstcw %0": "+m"(fcw));
-	kvm_fpu_put();
+	kvm_fpu_put(flags);
 
 	ctxt->dst.val = fcw;
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1114 @ static int em_fnstcw(struct x86_emulate_
 
 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
 {
+	unsigned long flags;
 	u16 fsw;
 
 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
 		return emulate_nm(ctxt);
 
-	kvm_fpu_get();
+	flags = kvm_fpu_get();
 	asm volatile("fnstsw %0": "+m"(fsw));
-	kvm_fpu_put();
+	kvm_fpu_put(flags);
 
 	ctxt->dst.val = fsw;
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4028 @ static inline size_t fxstate_size(struct
 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
 {
 	struct fxregs_state fx_state;
+	unsigned long flags;
 	int rc;
 
 	rc = check_fxsr(ctxt);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
-	kvm_fpu_get();
+	flags = kvm_fpu_get();
 
 	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
 
-	kvm_fpu_put();
+	kvm_fpu_put(flags);
 
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4073 @ static int em_fxrstor(struct x86_emulate
 	struct fxregs_state fx_state;
 	int rc;
 	size_t size;
+	unsigned long flags;
 
 	rc = check_fxsr(ctxt);
 	if (rc != X86EMUL_CONTINUE)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4084 @ static int em_fxrstor(struct x86_emulate
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
-	kvm_fpu_get();
+	flags = kvm_fpu_get();
 
 	if (size < __fxstate_size(16)) {
 		rc = fxregs_fixup(&fx_state, size);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4101 @ static int em_fxrstor(struct x86_emulate
 		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
 
 out:
-	kvm_fpu_put();
+	kvm_fpu_put(flags);
 
 	return rc;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:5347 @ static bool string_insn_completed(struct
 
 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
 {
+	unsigned long flags;
 	int rc;
 
-	kvm_fpu_get();
+	flags = kvm_fpu_get();
 	rc = asm_safe("fwait");
-	kvm_fpu_put();
+	kvm_fpu_put(flags);
 
 	if (unlikely(rc != X86EMUL_CONTINUE))
 		return emulate_exception(ctxt, MF_VECTOR, 0, false);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kvm/fpu.h linux-dovetail-v5.15.y-dovetail/arch/x86/kvm/fpu.h
--- linux-5.15.26/arch/x86/kvm/fpu.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kvm/fpu.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:98 @ static inline void _kvm_write_mmx_reg(in
 	}
 }
 
-static inline void kvm_fpu_get(void)
+static inline unsigned long kvm_fpu_get(void)
 {
-	fpregs_lock();
+	unsigned long flags = fpregs_lock();
 
 	fpregs_assert_state_consistent();
 	if (test_thread_flag(TIF_NEED_FPU_LOAD))
 		switch_fpu_return();
+
+	return flags;
 }
 
-static inline void kvm_fpu_put(void)
+static inline void kvm_fpu_put(unsigned long flags)
 {
-	fpregs_unlock();
+	fpregs_unlock(flags);
 }
 
 static inline void kvm_read_sse_reg(int reg, sse128_t *data)
 {
-	kvm_fpu_get();
+	unsigned long flags = kvm_fpu_get();
 	_kvm_read_sse_reg(reg, data);
-	kvm_fpu_put();
+	kvm_fpu_put(flags);
 }
 
 static inline void kvm_write_sse_reg(int reg, const sse128_t *data)
 {
-	kvm_fpu_get();
+	unsigned long flags = kvm_fpu_get();
 	_kvm_write_sse_reg(reg, data);
-	kvm_fpu_put();
+	kvm_fpu_put(flags);
 }
 
 static inline void kvm_read_mmx_reg(int reg, u64 *data)
 {
-	kvm_fpu_get();
+	unsigned long flags = kvm_fpu_get();
 	_kvm_read_mmx_reg(reg, data);
-	kvm_fpu_put();
+	kvm_fpu_put(flags);
 }
 
 static inline void kvm_write_mmx_reg(int reg, const u64 *data)
 {
-	kvm_fpu_get();
+	unsigned long flags = kvm_fpu_get();
 	_kvm_write_mmx_reg(reg, data);
-	kvm_fpu_put();
+	kvm_fpu_put(flags);
 }
 
 #endif
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kvm/hyperv.c linux-dovetail-v5.15.y-dovetail/arch/x86/kvm/hyperv.c
--- linux-5.15.26/arch/x86/kvm/hyperv.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kvm/hyperv.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2106 @ static bool is_xmm_fast_hypercall(struct
 
 static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
 {
+	unsigned long flags;
 	int reg;
 
-	kvm_fpu_get();
+	flags = kvm_fpu_get();
 	for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
 		_kvm_read_sse_reg(reg, &hc->xmm[reg]);
-	kvm_fpu_put();
+	kvm_fpu_put(flags);
 }
 
 static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kvm/vmx/vmx.c linux-dovetail-v5.15.y-dovetail/arch/x86/kvm/vmx/vmx.c
--- linux-5.15.26/arch/x86/kvm/vmx/vmx.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kvm/vmx/vmx.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:603 @ static int vmx_set_guest_uret_msr(struct
 				  struct vmx_uret_msr *msr, u64 data)
 {
 	unsigned int slot = msr - vmx->guest_uret_msrs;
+	unsigned long flags;
 	int ret = 0;
 
 	u64 old_msr_data = msr->data;
 	msr->data = data;
 	if (msr->load_into_hardware) {
-		preempt_disable();
+		flags = hard_preempt_disable();
 		ret = kvm_set_user_return_msr(slot, msr->data, msr->mask);
-		preempt_enable();
+		hard_preempt_enable(flags);
 		if (ret)
 			msr->data = old_msr_data;
 	}
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1214 @ static void vmx_prepare_switch_to_host(s
 #ifdef CONFIG_X86_64
 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
 {
-	preempt_disable();
+	unsigned long flags;
+
+	flags = hard_preempt_disable();
 	if (vmx->guest_state_loaded)
 		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
-	preempt_enable();
+	hard_preempt_enable(flags);
 	return vmx->msr_guest_kernel_gs_base;
 }
 
 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
 {
-	preempt_disable();
+	unsigned long flags;
+
+	flags = hard_preempt_disable();
 	if (vmx->guest_state_loaded)
 		wrmsrl(MSR_KERNEL_GS_BASE, data);
-	preempt_enable();
+	hard_preempt_enable(flags);
 	vmx->msr_guest_kernel_gs_base = data;
 }
 #endif
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1663 @ static void vmx_setup_uret_msrs(struct v
 	 * The SYSCALL MSRs are only needed on long mode guests, and only
 	 * when EFER.SCE is set.
 	 */
+	hard_cond_local_irq_disable();
 	load_syscall_msrs = is_long_mode(&vmx->vcpu) &&
 			    (vmx->vcpu.arch.efer & EFER_SCE);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1685 @ static void vmx_setup_uret_msrs(struct v
 	 */
 	vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL, boot_cpu_has(X86_FEATURE_RTM));
 
+	hard_cond_local_irq_enable();
+
 	/*
 	 * The set of MSRs to load may have changed, reload MSRs before the
 	 * next VM-Enter.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1951 @ static int vmx_set_msr(struct kvm_vcpu *
 	u32 msr_index = msr_info->index;
 	u64 data = msr_info->data;
 	u32 index;
+	unsigned long flags;
 
 	switch (msr_index) {
 	case MSR_EFER:
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2233 @ static int vmx_set_msr(struct kvm_vcpu *
 
 	default:
 	find_uret_msr:
+		/*
+		 * Dovetail: guest MSRs may be activated independently
+		 * from vcpu_run(): rely on the notifier for restoring
+		 * them upon preemption by the companion core, right
+		 * before the current CPU switches to out-of-band
+		 * scheduling (see dovetail_context_switch()).
+		 */
 		msr = vmx_find_uret_msr(vmx, msr_index);
-		if (msr)
+		if (msr) {
+			flags = hard_cond_local_irq_save();
+			inband_enter_guest(vcpu);
 			ret = vmx_set_guest_uret_msr(vmx, msr, data);
-		else
+			hard_cond_local_irq_restore(flags);
+		} else {
 			ret = kvm_set_msr_common(vcpu, msr_info);
+		}
 	}
 
 	return ret;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:6909 @ static int vmx_create_vcpu(struct kvm_vc
 	vmx_vcpu_load(vcpu, cpu);
 	vcpu->cpu = cpu;
 	init_vmcs(vmx);
+	hard_cond_local_irq_disable();
 	vmx_vcpu_put(vcpu);
+	hard_cond_local_irq_enable();
 	put_cpu();
 	if (cpu_need_virtualize_apic_accesses(vcpu)) {
 		err = alloc_apic_access_page(vcpu->kvm);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/kvm/x86.c linux-dovetail-v5.15.y-dovetail/arch/x86/kvm/x86.c
--- linux-5.15.26/arch/x86/kvm/x86.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/kvm/x86.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:197 @ module_param(pi_inject_timer, bint, S_IR
 struct kvm_user_return_msrs {
 	struct user_return_notifier urn;
 	bool registered;
+	bool dirty;
 	struct kvm_user_return_msr_values {
 		u64 host;
 		u64 curr;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:342 @ static inline void kvm_async_pf_hash_res
 		vcpu->arch.apf.gfns[i] = ~0;
 }
 
-static void kvm_on_user_return(struct user_return_notifier *urn)
+static void __kvm_on_user_return(struct kvm_user_return_msrs *msrs)
 {
+	struct kvm_user_return_msr_values *values;
 	unsigned slot;
+
+	if (!msrs->dirty)
+		return;
+
+	for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
+		values = &msrs->values[slot];
+		if (values->host != values->curr) {
+			wrmsrl(kvm_uret_msrs_list[slot], values->host);
+			values->curr = values->host;
+		}
+	}
+
+	msrs->dirty = false;
+}
+
+static void kvm_on_user_return(struct user_return_notifier *urn)
+{
 	struct kvm_user_return_msrs *msrs
 		= container_of(urn, struct kvm_user_return_msrs, urn);
-	struct kvm_user_return_msr_values *values;
 	unsigned long flags;
 
 	/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:377 @ static void kvm_on_user_return(struct us
 		user_return_notifier_unregister(urn);
 	}
 	local_irq_restore(flags);
-	for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
-		values = &msrs->values[slot];
-		if (values->host != values->curr) {
-			wrmsrl(kvm_uret_msrs_list[slot], values->host);
-			values->curr = values->host;
-		}
-	}
+	flags = hard_cond_local_irq_save();
+	__kvm_on_user_return(msrs);
+	hard_cond_local_irq_restore(flags);
+	inband_exit_guest();
 }
 
 static int kvm_probe_user_return_msr(u32 msr)
 {
+	unsigned long flags;
 	u64 val;
 	int ret;
 
-	preempt_disable();
+	flags = hard_preempt_disable();
 	ret = rdmsrl_safe(msr, &val);
 	if (ret)
 		goto out;
 	ret = wrmsrl_safe(msr, val);
 out:
-	preempt_enable();
+	hard_preempt_enable(flags);
 	return ret;
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:450 @ int kvm_set_user_return_msr(unsigned slo
 	if (err)
 		return 1;
 
+	msrs->dirty = true;
 	msrs->values[slot].curr = value;
 	if (!msrs->registered) {
 		msrs->urn.on_user_return = kvm_on_user_return;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4415 @ static void kvm_steal_time_set_preempted
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
+	struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
+	unsigned long flags;
 	int idx;
 
 	if (vcpu->preempted && !vcpu->arch.guest_state_protected)
 		vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
 
+	flags = hard_cond_local_irq_save();
+	/*
+	 * Skip steal time accounting from the out-of-band stage since
+	 * this is oob-unsafe. We leave it to the next call from the
+	 * inband stage.
+	 */
+	if (running_oob())
+		goto skip_steal_time_update;
+
 	/*
 	 * Take the srcu lock as memslots will be accessed to check the gfn
 	 * cache generation against the memslots generation.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4442 @ void kvm_arch_vcpu_put(struct kvm_vcpu *
 		kvm_steal_time_set_preempted(vcpu);
 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
+skip_steal_time_update:
 	static_call(kvm_x86_vcpu_put)(vcpu);
 	vcpu->arch.last_host_tsc = rdtsc();
+
+	inband_set_vcpu_release_state(vcpu, false);
+	if (!msrs->dirty)
+		inband_exit_guest();
+
+	hard_cond_local_irq_restore(flags);
+}
+
+#ifdef CONFIG_DOVETAIL
+/* hard irqs off. */
+void kvm_handle_oob_switch(struct kvm_oob_notifier *nfy)
+{
+	struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
+	struct kvm_vcpu *vcpu;
+
+	vcpu = container_of(nfy, struct kvm_vcpu, oob_notifier);
+	/*
+	 * If user_return MSRs were still active when leaving
+	 * kvm_arch_vcpu_put(), inband_exit_guest() was not invoked,
+	 * so we might get called later on before kvm_on_user_return()
+	 * had a chance to run, if a switch to out-of-band scheduling
+	 * sneaks in in the meantime.  Prevent kvm_arch_vcpu_put()
+	 * from running twice in such a case by checking ->put_vcpu
+	 * from the notifier block.
+	 */
+	if (nfy->put_vcpu)
+		kvm_arch_vcpu_put(vcpu);
+
+	__kvm_on_user_return(msrs);
+	inband_exit_guest();
 }
+#else
+#define kvm_handle_oob_switch  NULL
+#endif
 
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
 				    struct kvm_lapic_state *s)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:9748 @ static int vcpu_enter_guest(struct kvm_v
 	}
 
 	preempt_disable();
+	local_irq_disable_full();
+
+	inband_enter_guest(vcpu);
+	inband_set_vcpu_release_state(vcpu, true);
 
 	static_call(kvm_x86_prepare_guest_switch)(vcpu);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:9760 @ static int vcpu_enter_guest(struct kvm_v
 	 * IPI are then delayed after guest entry, which ensures that they
 	 * result in virtual interrupt delivery.
 	 */
-	local_irq_disable();
 	vcpu->mode = IN_GUEST_MODE;
 
 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:9790 @ static int vcpu_enter_guest(struct kvm_v
 	if (kvm_vcpu_exit_request(vcpu)) {
 		vcpu->mode = OUTSIDE_GUEST_MODE;
 		smp_wmb();
-		local_irq_enable();
+		local_irq_enable_full();
 		preempt_enable();
 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 		r = 1;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:9869 @ static int vcpu_enter_guest(struct kvm_v
 	 * stat.exits increment will do nicely.
 	 */
 	kvm_before_interrupt(vcpu);
-	local_irq_enable();
+	local_irq_enable_full();
 	++vcpu->stat.exits;
-	local_irq_disable();
+	local_irq_disable_full();
 	kvm_after_interrupt(vcpu);
 
 	/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:9891 @ static int vcpu_enter_guest(struct kvm_v
 		}
 	}
 
-	local_irq_enable();
+	local_irq_enable_full();
 	preempt_enable();
 
 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:10107 @ static void kvm_save_current_fpu(struct
 /* Swap (qemu) user FPU context for the guest FPU context. */
 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 {
-	fpregs_lock();
+	unsigned long flags;
+
+	flags = fpregs_lock();
 
 	kvm_save_current_fpu(vcpu->arch.user_fpu);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:10123 @ static void kvm_load_guest_fpu(struct kv
 					~XFEATURE_MASK_PKRU);
 
 	fpregs_mark_activate();
-	fpregs_unlock();
+	fpregs_unlock(flags);
 
 	trace_kvm_fpu(1);
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:10131 @ static void kvm_load_guest_fpu(struct kv
 /* When vcpu_run ends, restore user space FPU context. */
 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 {
-	fpregs_lock();
+	unsigned long flags;
+
+	flags = fpregs_lock();
 
 	/*
 	 * Guests with protected state can't have it read by the hypervisor,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:10145 @ static void kvm_put_guest_fpu(struct kvm
 	restore_fpregs_from_fpstate(&vcpu->arch.user_fpu->state);
 
 	fpregs_mark_activate();
-	fpregs_unlock();
+	fpregs_unlock(flags);
 
 	++vcpu->stat.fpu_reload;
 	trace_kvm_fpu(0);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:10923 @ int kvm_arch_vcpu_create(struct kvm_vcpu
 	if (r)
 		goto free_guest_fpu;
 
+	inband_init_vcpu(vcpu, kvm_handle_oob_switch);
 	vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
 	vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
 	kvm_vcpu_mtrr_init(vcpu);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/lib/usercopy.c linux-dovetail-v5.15.y-dovetail/arch/x86/lib/usercopy.c
--- linux-5.15.26/arch/x86/lib/usercopy.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/lib/usercopy.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:35 @ copy_from_user_nmi(void *to, const void
 {
 	unsigned long ret;
 
-	if (__range_not_ok(from, n, TASK_SIZE))
+	if (running_oob() || __range_not_ok(from, n, TASK_SIZE))
 		return n;
 
 	if (!nmi_uaccess_okay())
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/mm/fault.c linux-dovetail-v5.15.y-dovetail/arch/x86/mm/fault.c
--- linux-5.15.26/arch/x86/mm/fault.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/mm/fault.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:649 @ page_fault_oops(struct pt_regs *regs, un
 		goto oops;
 	}
 
+	/*
+	 * Do not bother unwinding the notification context on
+	 * CPU/firmware/kernel bug.
+	 */
+	oob_trap_notify(X86_TRAP_PF, regs);
+
 #ifdef CONFIG_VMAP_STACK
 	/*
 	 * Stack overflow?  During boot, we can fault near the initial
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:731 @ kernelmode_fixup_or_oops(struct pt_regs
 		 * the below recursive fault logic only apply to a faults from
 		 * task context.
 		 */
-		if (in_interrupt())
+		if (running_oob() || in_interrupt())
 			return;
 
 		/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:805 @ static bool is_vsyscall_vaddr(unsigned l
 	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
 }
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+static inline void cond_reenable_irqs_user(void)
+{
+	hard_local_irq_enable();
+
+	if (running_inband())
+		local_irq_enable();
+}
+
+static inline void cond_reenable_irqs_kernel(irqentry_state_t state,
+					struct pt_regs *regs)
+{
+	if (regs->flags & X86_EFLAGS_IF) {
+		hard_local_irq_enable();
+		if (state.stage_info == IRQENTRY_INBAND_UNSTALLED)
+			local_irq_enable();
+	}
+}
+
+static inline void cond_disable_irqs(void)
+{
+	hard_local_irq_disable();
+
+	if (running_inband())
+		local_irq_disable();
+}
+
+#else  /* !CONFIG_IRQ_PIPELINE */
+
+static inline void cond_reenable_irqs_user(void)
+{
+	local_irq_enable();
+}
+
+static inline void cond_reenable_irqs_kernel(irqentry_state_t state,
+					struct pt_regs *regs)
+{
+	if (regs->flags & X86_EFLAGS_IF)
+		local_irq_enable();
+}
+
+static inline void cond_disable_irqs(void)
+{
+	local_irq_disable();
+}
+
+#endif  /* !CONFIG_IRQ_PIPELINE */
+
 static void
 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 		       unsigned long address, u32 pkey, int si_code)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:876 @ __bad_area_nosemaphore(struct pt_regs *r
 	 * User mode accesses just cause a SIGSEGV.
 	 * It's possible to have interrupts off here:
 	 */
-	local_irq_enable();
+	cond_reenable_irqs_user();
 
 	/*
 	 * Valid to do another page fault here because this one came
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:893 @ __bad_area_nosemaphore(struct pt_regs *r
 	if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
 		return;
 
+	oob_trap_notify(X86_TRAP_PF, regs);
+
 	if (likely(show_unhandled_signals))
 		show_signal_msg(regs, error_code, address, tsk);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:905 @ __bad_area_nosemaphore(struct pt_regs *r
 	else
 		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
 
-	local_irq_disable();
+	local_irq_disable_full();
+	oob_trap_unwind(X86_TRAP_PF, regs);
 }
 
 static noinline void
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1280 @ NOKPROBE_SYMBOL(do_kern_addr_fault);
 static inline
 void do_user_addr_fault(struct pt_regs *regs,
 			unsigned long error_code,
-			unsigned long address)
+			unsigned long address,
+			irqentry_state_t state)
 {
 	struct vm_area_struct *vma;
 	struct task_struct *tsk;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1340 @ void do_user_addr_fault(struct pt_regs *
 	 * If we're in an interrupt, have no user context or are running
 	 * in a region with pagefaults disabled then we must not take the fault
 	 */
-	if (unlikely(faulthandler_disabled() || !mm)) {
+	if (unlikely(running_inband() && (faulthandler_disabled() || !mm))) {
 		bad_area_nosemaphore(regs, error_code, address);
 		return;
 	}
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1353 @ void do_user_addr_fault(struct pt_regs *
 	 * potential system fault or CPU buglet:
 	 */
 	if (user_mode(regs)) {
-		local_irq_enable();
+		cond_reenable_irqs_user();
 		flags |= FAULT_FLAG_USER;
 	} else {
 		if (regs->flags & X86_EFLAGS_IF)
-			local_irq_enable();
+			cond_reenable_irqs_kernel(state, regs);
 	}
 
+	/*
+	 * At this point, we would have to stop running
+	 * out-of-band. Tell the companion core about the page fault
+	 * event, so that it might switch current to in-band mode if
+	 * need be.
+	 */
+	oob_trap_notify(X86_TRAP_PF, regs);
+
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
 	if (error_code & X86_PF_WRITE)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1389 @ void do_user_addr_fault(struct pt_regs *
 	 */
 	if (is_vsyscall_vaddr(address)) {
 		if (emulate_vsyscall(error_code, regs, address))
-			return;
+			goto out;
 	}
 #endif
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1412 @ void do_user_addr_fault(struct pt_regs *
 			 * which we do not expect faults.
 			 */
 			bad_area_nosemaphore(regs, error_code, address);
-			return;
+			goto out;
 		}
 retry:
 		mmap_read_lock(mm);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1428 @ retry:
 	vma = find_vma(mm, address);
 	if (unlikely(!vma)) {
 		bad_area(regs, error_code, address);
-		return;
+		goto out;
 	}
 	if (likely(vma->vm_start <= address))
 		goto good_area;
 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
 		bad_area(regs, error_code, address);
-		return;
+		goto out;
 	}
 	if (unlikely(expand_stack(vma, address))) {
 		bad_area(regs, error_code, address);
-		return;
+		goto out;
 	}
 
 	/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1448 @ retry:
 good_area:
 	if (unlikely(access_error(error_code, vma))) {
 		bad_area_access_error(regs, error_code, address, vma);
-		return;
+		goto out;
 	}
 
 	/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1475 @ good_area:
 			kernelmode_fixup_or_oops(regs, error_code, address,
 						 SIGBUS, BUS_ADRERR,
 						 ARCH_DEFAULT_PKEY);
-		return;
+		goto out;
 	}
 
 	/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1491 @ good_area:
 
 	mmap_read_unlock(mm);
 	if (likely(!(fault & VM_FAULT_ERROR)))
-		return;
+		goto out;
 
 	if (fatal_signal_pending(current) && !user_mode(regs)) {
 		kernelmode_fixup_or_oops(regs, error_code, address,
 					 0, 0, ARCH_DEFAULT_PKEY);
-		return;
+		goto out;
 	}
 
 	if (fault & VM_FAULT_OOM) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1505 @ good_area:
 			kernelmode_fixup_or_oops(regs, error_code, address,
 						 SIGSEGV, SEGV_MAPERR,
 						 ARCH_DEFAULT_PKEY);
-			return;
+			goto out;
 		}
 
 		/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1523 @ good_area:
 		else
 			BUG();
 	}
+out:
+	oob_trap_unwind(X86_TRAP_PF, regs);
 }
 NOKPROBE_SYMBOL(do_user_addr_fault);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1543 @ trace_page_fault_entries(struct pt_regs
 
 static __always_inline void
 handle_page_fault(struct pt_regs *regs, unsigned long error_code,
-			      unsigned long address)
+		unsigned long address,
+		irqentry_state_t state)
 {
 	trace_page_fault_entries(regs, error_code, address);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1555 @ handle_page_fault(struct pt_regs *regs,
 	if (unlikely(fault_in_kernel_space(address))) {
 		do_kern_addr_fault(regs, error_code, address);
 	} else {
-		do_user_addr_fault(regs, error_code, address);
+		do_user_addr_fault(regs, error_code, address, state);
 		/*
 		 * User address page fault handling might have reenabled
 		 * interrupts. Fixing up all potential exit points of
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1563 @ handle_page_fault(struct pt_regs *regs,
 		 * doable w/o creating an unholy mess or turning the code
 		 * upside down.
 		 */
-		local_irq_disable();
+		cond_disable_irqs();
 	}
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1611 @ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_f
 	state = irqentry_enter(regs);
 
 	instrumentation_begin();
-	handle_page_fault(regs, error_code, address);
+	handle_page_fault(regs, error_code, address, state);
 	instrumentation_end();
 
 	irqentry_exit(regs, state);
 }
+
+#ifdef CONFIG_DOVETAIL
+
+void arch_advertise_page_mapping(unsigned long start, unsigned long end)
+{
+	unsigned long next, addr = start;
+	pgd_t *pgd, *pgd_ref;
+	struct page *page;
+
+	/*
+	 * APEI may create temporary mappings in interrupt context -
+	 * nothing we can and need to propagate globally.
+	 */
+	if (in_interrupt())
+		return;
+
+	if (!(start >= VMALLOC_START && start < VMALLOC_END))
+		return;
+
+	do {
+		next = pgd_addr_end(addr, end);
+		pgd_ref = pgd_offset_k(addr);
+		if (pgd_none(*pgd_ref))
+			continue;
+		spin_lock(&pgd_lock);
+		list_for_each_entry(page, &pgd_list, lru) {
+			pgd = page_address(page) + pgd_index(addr);
+			if (pgd_none(*pgd))
+				set_pgd(pgd, *pgd_ref);
+		}
+		spin_unlock(&pgd_lock);
+		addr = next;
+	} while (addr != end);
+
+	arch_flush_lazy_mmu_mode();
+}
+
+#endif
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/mm/tlb.c linux-dovetail-v5.15.y-dovetail/arch/x86/mm/tlb.c
--- linux-5.15.26/arch/x86/mm/tlb.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/mm/tlb.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:8 @
 #include <linux/spinlock.h>
 #include <linux/smp.h>
 #include <linux/interrupt.h>
+#include <linux/irq_pipeline.h>
 #include <linux/export.h>
 #include <linux/cpu.h>
 #include <linux/debugfs.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:321 @ EXPORT_SYMBOL_GPL(leave_mm);
 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	       struct task_struct *tsk)
 {
-	unsigned long flags;
+	unsigned long flags, _flags;
 
 	local_irq_save(flags);
+	protect_inband_mm(_flags);
 	switch_mm_irqs_off(prev, next, tsk);
+	unprotect_inband_mm(_flags);
 	local_irq_restore(flags);
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:512 @ void switch_mm_irqs_off(struct mm_struct
 	 */
 
 	/* We don't want flush_tlb_func() to run concurrently with us. */
-	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
+	if (IS_ENABLED(CONFIG_DOVETAIL))
+		WARN_ON_ONCE(!hard_irqs_disabled());
+	else if (IS_ENABLED(CONFIG_PROVE_LOCKING))
 		WARN_ON_ONCE(!irqs_disabled());
 
 	/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:740 @ static void flush_tlb_func(void *info)
 	 */
 	const struct flush_tlb_info *f = info;
 	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
-	u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
-	u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
-	u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+	u32 loaded_mm_asid;
+	u64 mm_tlb_gen;
+	u64 local_tlb_gen;
 	bool local = smp_processor_id() == f->initiating_cpu;
-	unsigned long nr_invalidate = 0;
+	unsigned long nr_invalidate = 0, flags;
 
 	/* This code cannot presently handle being reentered. */
 	VM_WARN_ON(!irqs_disabled());
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:758 @ static void flush_tlb_func(void *info)
 			return;
 	}
 
-	if (unlikely(loaded_mm == &init_mm))
+	protect_inband_mm(flags);
+
+	loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+	mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
+	local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+
+	if (unlikely(loaded_mm == &init_mm)) {
+		unprotect_inband_mm(flags);
 		return;
+	}
 
 	VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
 		   loaded_mm->context.ctx_id);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:783 @ static void flush_tlb_func(void *info)
 		 * IPIs to lazy TLB mode CPUs.
 		 */
 		switch_mm_irqs_off(NULL, &init_mm, NULL);
+		unprotect_inband_mm(flags);
 		return;
 	}
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:794 @ static void flush_tlb_func(void *info)
 		 * be handled can catch us all the way up, leaving no work for
 		 * the second flush.
 		 */
+		unprotect_inband_mm(flags);
 		goto done;
 	}
 
 	WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
 	WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
 
+	unprotect_inband_mm(flags);
+
 	/*
 	 * If we get to this point, we know that our TLB is out of date.
 	 * This does not strictly imply that we need to flush (it's
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1186 @ STATIC_NOPV void native_flush_tlb_global
 	 * from interrupts. (Use the raw variant because this code can
 	 * be called from deep inside debugging code.)
 	 */
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 
 	cr4 = this_cpu_read(cpu_tlbstate.cr4);
 	/* toggle PGE */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1194 @ STATIC_NOPV void native_flush_tlb_global
 	/* write old PGE again and flush TLBs */
 	native_write_cr4(cr4);
 
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1202 @ STATIC_NOPV void native_flush_tlb_global
  */
 STATIC_NOPV void native_flush_tlb_local(void)
 {
+	unsigned long flags;
+
 	/*
 	 * Preemption or interrupts must be disabled to protect the access
 	 * to the per CPU variable and to prevent being preempted between
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1211 @ STATIC_NOPV void native_flush_tlb_local(
 	 */
 	WARN_ON_ONCE(preemptible());
 
+	flags = hard_cond_local_irq_save();
+
 	invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
 
 	/* If current->mm == NULL then the read_cr3() "borrows" an mm */
 	native_write_cr3(__native_read_cr3());
+
+	hard_cond_local_irq_restore(flags);
 }
 
 void flush_tlb_local(void)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1290 @ bool nmi_uaccess_okay(void)
 	VM_WARN_ON_ONCE(!loaded_mm);
 
 	/*
+	 * There would be no way for the companion core to switch an
+	 * out-of-band task back in-band in order to handle an access
+	 * fault over NMI safely. Tell the caller that uaccess from
+	 * NMI is NOT ok if the preempted task was running
+	 * out-of-band.
+	 */
+	if (running_oob())
+		return false;
+
+	/*
 	 * The condition we want to check is
 	 * current_mm->pgd == __va(read_cr3_pa()).  This may be slow, though,
 	 * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/platform/efi/efi_64.c linux-dovetail-v5.15.y-dovetail/arch/x86/platform/efi/efi_64.c
--- linux-5.15.26/arch/x86/platform/efi/efi_64.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/platform/efi/efi_64.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:464 @ void __init efi_dump_pagetable(void)
  */
 void efi_enter_mm(void)
 {
+	unsigned long flags;
+
+	protect_inband_mm(flags);
 	efi_prev_mm = current->active_mm;
 	current->active_mm = &efi_mm;
 	switch_mm(efi_prev_mm, &efi_mm, NULL);
+	unprotect_inband_mm(flags);
 }
 
 void efi_leave_mm(void)
 {
+	unsigned long flags;
+
+	protect_inband_mm(flags);
 	current->active_mm = efi_prev_mm;
 	switch_mm(&efi_mm, efi_prev_mm, NULL);
+	unprotect_inband_mm(flags);
 }
 
 static DEFINE_SPINLOCK(efi_runtime_lock);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/xen/enlighten_hvm.c linux-dovetail-v5.15.y-dovetail/arch/x86/xen/enlighten_hvm.c
--- linux-5.15.26/arch/x86/xen/enlighten_hvm.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/xen/enlighten_hvm.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:123 @ static void __init init_hvm_pv_info(void
 		this_cpu_write(xen_vcpu_id, smp_processor_id());
 }
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR,
+				 sysvec_xen_hvm_callback)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/arch/x86/xen/Kconfig linux-dovetail-v5.15.y-dovetail/arch/x86/xen/Kconfig
--- linux-5.15.26/arch/x86/xen/Kconfig	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/arch/x86/xen/Kconfig	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:8 @
 
 config XEN
 	bool "Xen guest support"
-	depends on PARAVIRT
+	depends on PARAVIRT && !IRQ_PIPELINE
 	select PARAVIRT_CLOCK
 	select X86_HV_CALLBACK_VECTOR
 	depends on X86_64 || (X86_32 && X86_PAE)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/Documentation/dontdiff linux-dovetail-v5.15.y-dovetail/Documentation/dontdiff
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/Documentation/dovetail.rst linux-dovetail-v5.15.y-dovetail/Documentation/dovetail.rst
--- linux-5.15.26/Documentation/dovetail.rst	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/Documentation/dovetail.rst	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+========================
+Introduction to Dovetail
+========================
+
+:Author: Philippe Gerum
+:Date: 08.04.2020
+
+Using Linux as a host for lightweight software cores specialized in
+delivering very short and bounded response times has been a popular
+way of supporting real-time applications in the embedded space over
+the years.
+
+In this so-called *dual kernel* design, the time-critical work is
+immediately delegated to a small companion core running out-of-band
+with respect to the regular, in-band kernel activities. Applications
+run in user space, obtaining real-time services from the
+core. Alternatively, when there is no real-time requirement, threads
+can still use the rich GPOS feature set Linux provides such as
+networking, data storage or GUIs.
+
+*Dovetail* introduces a high-priority execution stage into the main
+kernel logic reserved for such a companion core to run on.  At any
+time, out-of-band activities from this stage can preempt the common,
+in-band work. A companion core can be implemented as as a driver,
+which connects to the main kernel via the Dovetail interface for
+delivering ultra-low latency scheduling capabilities to applications.
+
+Dovetail is fully described at https://evlproject.org/dovetail/.
+The reference implementation of a Dovetail-based companion core is
+maintained at https://evlproject.org/core/.
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/base/regmap/internal.h linux-dovetail-v5.15.y-dovetail/drivers/base/regmap/internal.h
--- linux-5.15.26/drivers/base/regmap/internal.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/base/regmap/internal.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:53 @ struct regmap {
 	union {
 		struct mutex mutex;
 		struct {
-			spinlock_t spinlock;
+			union {
+				spinlock_t spinlock;
+				hard_spinlock_t oob_lock;
+			};
 			unsigned long spinlock_flags;
 		};
 		struct {
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/base/regmap/regmap.c linux-dovetail-v5.15.y-dovetail/drivers/base/regmap/regmap.c
--- linux-5.15.26/drivers/base/regmap/regmap.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/base/regmap/regmap.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:20 @
 #include <linux/delay.h>
 #include <linux/log2.h>
 #include <linux/hwspinlock.h>
+#include <linux/dovetail.h>
 #include <asm/unaligned.h>
 
 #define CREATE_TRACE_POINTS
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:554 @ __releases(&map->raw_spinlock)
 	raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags);
 }
 
+static void regmap_lock_oob(void *__map)
+__acquires(&map->oob_lock)
+{
+	struct regmap *map = __map;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&map->oob_lock, flags);
+	map->spinlock_flags = flags;
+}
+
+static void regmap_unlock_oob(void *__map)
+__releases(&map->oob_lock)
+{
+	struct regmap *map = __map;
+	raw_spin_unlock_irqrestore(&map->oob_lock, map->spinlock_flags);
+}
+
 static void dev_get_regmap_release(struct device *dev, void *res)
 {
 	/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:809 @ struct regmap *__regmap_init(struct devi
 	} else {
 		if ((bus && bus->fast_io) ||
 		    config->fast_io) {
-			if (config->use_raw_spinlock) {
+			if (dovetailing() && config->oob_io) {
+				raw_spin_lock_init(&map->oob_lock);
+				map->lock = regmap_lock_oob;
+				map->unlock = regmap_unlock_oob;
+				lockdep_set_class_and_name(&map->oob_lock,
+							lock_key, lock_name);
+			} else if (config->use_raw_spinlock) {
 				raw_spin_lock_init(&map->raw_spinlock);
 				map->lock = regmap_lock_raw_spinlock;
 				map->unlock = regmap_unlock_raw_spinlock;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:828 @ struct regmap *__regmap_init(struct devi
 				lockdep_set_class_and_name(&map->spinlock,
 							   lock_key, lock_name);
 			}
-		} else {
+		} else if (!config->oob_io) { /* Catch configuration issue: oob && !fast_io */
 			mutex_init(&map->mutex);
 			map->lock = regmap_lock_mutex;
 			map->unlock = regmap_unlock_mutex;
 			map->can_sleep = true;
 			lockdep_set_class_and_name(&map->mutex,
 						   lock_key, lock_name);
-		}
+		} else {
+			ret = -ENXIO;
+			goto err_name;
+ 		}
 		map->lock_arg = map;
 	}
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/base/regmap/regmap-irq.c linux-dovetail-v5.15.y-dovetail/drivers/base/regmap/regmap-irq.c
--- linux-5.15.26/drivers/base/regmap/regmap-irq.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/base/regmap/regmap-irq.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:370 @ static const struct irq_chip regmap_irq_
 	.irq_enable		= regmap_irq_enable,
 	.irq_set_type		= regmap_irq_set_type,
 	.irq_set_wake		= regmap_irq_set_wake,
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/clocksource/arm_arch_timer.c linux-dovetail-v5.15.y-dovetail/drivers/clocksource/arm_arch_timer.c
--- linux-5.15.26/drivers/clocksource/arm_arch_timer.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/clocksource/arm_arch_timer.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:25 @
 #include <linux/of_address.h>
 #include <linux/io.h>
 #include <linux/slab.h>
+#include <linux/dovetail.h>
 #include <linux/sched/clock.h>
 #include <linux/sched_clock.h>
 #include <linux/acpi.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:660 @ static __always_inline irqreturn_t timer
 	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
 		ctrl |= ARCH_TIMER_CTRL_IT_MASK;
 		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
-		evt->event_handler(evt);
+		clockevents_handle_event(evt);
 		return IRQ_HANDLED;
 	}
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:769 @ static int arch_timer_set_next_event_phy
 static void __arch_timer_setup(unsigned type,
 			       struct clock_event_device *clk)
 {
-	clk->features = CLOCK_EVT_FEAT_ONESHOT;
+	clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PIPELINE;
 
 	if (type == ARCH_TIMER_TYPE_CP15) {
 		typeof(clk->set_next_event) sne;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:880 @ static void arch_counter_set_user_access
 	else
 		cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
 
+	if (IS_ENABLED(CONFIG_GENERIC_CLOCKSOURCE_VDSO))
+		cntkctl |= ARCH_TIMER_USR_PT_ACCESS_EN;
+
 	arch_timer_set_cntkctl(cntkctl);
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:916 @ static int arch_timer_starting_cpu(unsig
 	enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
 
 	if (arch_timer_has_nonsecure_ppi()) {
+		clk->irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
 		flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
 		enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
 				  flags);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1035 @ static void __init arch_counter_register
 
 		arch_timer_read_counter = rd;
 		clocksource_counter.vdso_clock_mode = vdso_default;
+		if (vdso_default != VDSO_CLOCKMODE_NONE)
+			clocksource_counter.vdso_type = CLOCKSOURCE_VDSO_ARCHITECTED;
 	} else {
 		arch_timer_read_counter = arch_counter_get_cntvct_mem;
 	}
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/clocksource/arm_global_timer.c linux-dovetail-v5.15.y-dovetail/drivers/clocksource/arm_global_timer.c
--- linux-5.15.26/drivers/clocksource/arm_global_timer.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/clocksource/arm_global_timer.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:165 @ static irqreturn_t gt_clockevent_interru
 	 *	the Global Timer flag _after_ having incremented
 	 *	the Comparator register	value to a higher value.
 	 */
-	if (clockevent_state_oneshot(evt))
+	if (clockevent_is_oob(evt) || clockevent_state_oneshot(evt))
 		gt_compare_set(ULONG_MAX, 0);
 
 	writel_relaxed(GT_INT_STATUS_EVENT_FLAG, gt_base + GT_INT_STATUS);
-	evt->event_handler(evt);
+	clockevents_handle_event(evt);
 
 	return IRQ_HANDLED;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:180 @ static int gt_starting_cpu(unsigned int
 
 	clk->name = "arm_global_timer";
 	clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
-		CLOCK_EVT_FEAT_PERCPU;
+		CLOCK_EVT_FEAT_PERCPU | CLOCK_EVT_FEAT_PIPELINE;
 	clk->set_state_shutdown = gt_clockevent_shutdown;
 	clk->set_state_periodic = gt_clockevent_set_periodic;
 	clk->set_state_oneshot = gt_clockevent_shutdown;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:204 @ static int gt_dying_cpu(unsigned int cpu
 	return 0;
 }
 
-static u64 gt_clocksource_read(struct clocksource *cs)
-{
-	return gt_counter_read();
-}
-
 static void gt_resume(struct clocksource *cs)
 {
 	unsigned long ctrl;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:214 @ static void gt_resume(struct clocksource
 		writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
 }
 
-static struct clocksource gt_clocksource = {
-	.name	= "arm_global_timer",
-	.rating	= 300,
-	.read	= gt_clocksource_read,
-	.mask	= CLOCKSOURCE_MASK(64),
-	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
-	.resume = gt_resume,
+static struct clocksource_user_mmio gt_clocksource = {
+	.mmio.clksrc = {
+		.name	= "arm_global_timer",
+		.rating	= 300,
+		.read	= clocksource_dual_mmio_readl_up,
+		.mask	= CLOCKSOURCE_MASK(64),
+		.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
+		.resume = gt_resume,
+	},
 };
 
 #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:268 @ static void __init gt_delay_timer_init(v
 
 static int __init gt_clocksource_init(void)
 {
+	struct clocksource_mmio_regs mmr;
+
 	writel(0, gt_base + GT_CONTROL);
 	writel(0, gt_base + GT_COUNTER0);
 	writel(0, gt_base + GT_COUNTER1);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:281 @ static int __init gt_clocksource_init(vo
 #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
 	sched_clock_register(gt_sched_clock_read, 64, gt_target_rate);
 #endif
-	return clocksource_register_hz(&gt_clocksource, gt_target_rate);
+	mmr.reg_upper = gt_base + GT_COUNTER1;
+	mmr.reg_lower = gt_base + GT_COUNTER0;
+	mmr.bits_upper = 32;
+	mmr.bits_lower = 32;
+	mmr.revmap = NULL;
+
+	return clocksource_user_mmio_init(&gt_clocksource, &mmr, gt_target_rate);
 }
 
 static int gt_clk_rate_change_cb(struct notifier_block *nb,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:407 @ static int __init global_timer_of_regist
 		goto out_clk_nb;
 	}
 
-	err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt,
-				 "gt", gt_evt);
+	err = __request_percpu_irq(gt_ppi, gt_clockevent_interrupt,
+				   IRQF_TIMER, "gt", gt_evt);
 	if (err) {
 		pr_warn("global-timer: can't register interrupt %d (%d)\n",
 			gt_ppi, err);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/clocksource/bcm2835_timer.c linux-dovetail-v5.15.y-dovetail/drivers/clocksource/bcm2835_timer.c
--- linux-5.15.26/drivers/clocksource/bcm2835_timer.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/clocksource/bcm2835_timer.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:56 @ static int bcm2835_time_set_next_event(u
 static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
 {
 	struct bcm2835_timer *timer = dev_id;
-	void (*event_handler)(struct clock_event_device *);
+
 	if (readl_relaxed(timer->control) & timer->match_mask) {
 		writel_relaxed(timer->match_mask, timer->control);
 
-		event_handler = READ_ONCE(timer->evt.event_handler);
-		if (event_handler)
-			event_handler(&timer->evt);
+		clockevents_handle_event(&timer->evt);
 		return IRQ_HANDLED;
 	} else {
 		return IRQ_NONE;
 	}
 }
 
+static struct clocksource_user_mmio clocksource_bcm2835 = {
+	.mmio.clksrc = {
+		.rating		= 300,
+		.read		= clocksource_mmio_readl_up,
+		.mask		= CLOCKSOURCE_MASK(32),
+		.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+	},
+};
+
 static int __init bcm2835_timer_init(struct device_node *node)
 {
 	void __iomem *base;
 	u32 freq;
 	int irq, ret;
 	struct bcm2835_timer *timer;
+	struct clocksource_mmio_regs mmr;
 
 	base = of_iomap(node, 0);
 	if (!base) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:99 @ static int __init bcm2835_timer_init(str
 	system_clock = base + REG_COUNTER_LO;
 	sched_clock_register(bcm2835_sched_read, 32, freq);
 
-	clocksource_mmio_init(base + REG_COUNTER_LO, node->name,
-		freq, 300, 32, clocksource_mmio_readl_up);
+	mmr.reg_lower = base + REG_COUNTER_LO;
+	mmr.bits_lower = 32;
+	mmr.reg_upper = 0;
+	mmr.bits_upper = 0;
+	mmr.revmap = NULL;
+	clocksource_bcm2835.mmio.clksrc.name = node->name;
+	clocksource_user_mmio_init(&clocksource_bcm2835, &mmr, freq);
 
 	irq = irq_of_parse_and_map(node, DEFAULT_TIMER);
 	if (irq <= 0) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:125 @ static int __init bcm2835_timer_init(str
 	timer->match_mask = BIT(DEFAULT_TIMER);
 	timer->evt.name = node->name;
 	timer->evt.rating = 300;
-	timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
+	timer->evt.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PIPELINE;
 	timer->evt.set_next_event = bcm2835_time_set_next_event;
 	timer->evt.cpumask = cpumask_of(0);
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/clocksource/clksrc_st_lpc.c linux-dovetail-v5.15.y-dovetail/drivers/clocksource/clksrc_st_lpc.c
--- linux-5.15.26/drivers/clocksource/clksrc_st_lpc.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/clocksource/clksrc_st_lpc.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:54 @ static int __init st_clksrc_init(void)
 
 	sched_clock_register(st_clksrc_sched_clock_read, 32, rate);
 
-	ret = clocksource_mmio_init(ddata.base + LPC_LPT_LSB_OFF,
+	ret = clocksource_user_single_mmio_init(ddata.base + LPC_LPT_LSB_OFF,
 				    "clksrc-st-lpc", rate, 300, 32,
 				    clocksource_mmio_readl_up);
 	if (ret) {
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/clocksource/dw_apb_timer.c linux-dovetail-v5.15.y-dovetail/drivers/clocksource/dw_apb_timer.c
--- linux-5.15.26/drivers/clocksource/dw_apb_timer.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/clocksource/dw_apb_timer.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:46 @ ced_to_dw_apb_ced(struct clock_event_dev
 static inline struct dw_apb_clocksource *
 clocksource_to_dw_apb_clocksource(struct clocksource *cs)
 {
-	return container_of(cs, struct dw_apb_clocksource, cs);
+	return container_of(cs, struct dw_apb_clocksource, ummio.mmio.clksrc);
 }
 
 static inline u32 apbt_readl(struct dw_apb_timer *timer, unsigned long offs)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:346 @ void dw_apb_clocksource_start(struct dw_
 	dw_apb_clocksource_read(dw_cs);
 }
 
-static u64 __apbt_read_clocksource(struct clocksource *cs)
-{
-	u32 current_count;
-	struct dw_apb_clocksource *dw_cs =
-		clocksource_to_dw_apb_clocksource(cs);
-
-	current_count = apbt_readl_relaxed(&dw_cs->timer,
-					APBTMR_N_CURRENT_VALUE);
-
-	return (u64)~current_count;
-}
-
 static void apbt_restart_clocksource(struct clocksource *cs)
 {
 	struct dw_apb_clocksource *dw_cs =
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:367 @ static void apbt_restart_clocksource(str
  * dw_apb_clocksource_register() as the next step.
  */
 struct dw_apb_clocksource *
-dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base,
+__init dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base,
 			unsigned long freq)
 {
 	struct dw_apb_clocksource *dw_cs = kzalloc(sizeof(*dw_cs), GFP_KERNEL);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:377 @ dw_apb_clocksource_init(unsigned rating,
 
 	dw_cs->timer.base = base;
 	dw_cs->timer.freq = freq;
-	dw_cs->cs.name = name;
-	dw_cs->cs.rating = rating;
-	dw_cs->cs.read = __apbt_read_clocksource;
-	dw_cs->cs.mask = CLOCKSOURCE_MASK(32);
-	dw_cs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
-	dw_cs->cs.resume = apbt_restart_clocksource;
+	dw_cs->ummio.mmio.clksrc.name = name;
+	dw_cs->ummio.mmio.clksrc.rating = rating;
+	dw_cs->ummio.mmio.clksrc.read = clocksource_mmio_readl_down;
+	dw_cs->ummio.mmio.clksrc.mask = CLOCKSOURCE_MASK(32);
+	dw_cs->ummio.mmio.clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+	dw_cs->ummio.mmio.clksrc.resume = apbt_restart_clocksource;
 
 	return dw_cs;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:392 @ dw_apb_clocksource_init(unsigned rating,
  *
  * @dw_cs:	The clocksource to register.
  */
-void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs)
+void __init dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs)
 {
-	clocksource_register_hz(&dw_cs->cs, dw_cs->timer.freq);
+	struct clocksource_mmio_regs mmr;
+
+	mmr.reg_lower = dw_cs->timer.base + APBTMR_N_CURRENT_VALUE;
+	mmr.bits_lower = 32;
+	mmr.reg_upper = 0;
+	mmr.bits_upper = 0;
+	mmr.revmap = NULL;
+
+	clocksource_user_mmio_init(&dw_cs->ummio, &mmr, dw_cs->timer.freq);
 }
 
 /**
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/clocksource/exynos_mct.c linux-dovetail-v5.15.y-dovetail/drivers/clocksource/exynos_mct.c
--- linux-5.15.26/drivers/clocksource/exynos_mct.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/clocksource/exynos_mct.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:206 @ static u32 notrace exynos4_read_count_32
 	return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
 }
 
-static u64 exynos4_frc_read(struct clocksource *cs)
-{
-	return exynos4_read_count_32();
-}
-
 static void exynos4_frc_resume(struct clocksource *cs)
 {
 	exynos4_mct_frc_start();
 }
 
-static struct clocksource mct_frc = {
-	.name		= "mct-frc",
-	.rating		= MCT_CLKSOURCE_RATING,
-	.read		= exynos4_frc_read,
-	.mask		= CLOCKSOURCE_MASK(32),
-	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
-	.resume		= exynos4_frc_resume,
+static struct clocksource_user_mmio mct_frc = {
+	.mmio.clksrc = {
+		.name		= "mct-frc",
+		.rating		= MCT_CLKSOURCE_RATING,
+		.read		= clocksource_mmio_readl_up,
+		.mask		= CLOCKSOURCE_MASK(32),
+		.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+		.resume		= exynos4_frc_resume,
+	},
 };
 
 static u64 notrace exynos4_read_sched_clock(void)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:240 @ static cycles_t exynos4_read_current_tim
 
 static int __init exynos4_clocksource_init(void)
 {
+	struct clocksource_mmio_regs mmr;
+
 	exynos4_mct_frc_start();
 
 #if defined(CONFIG_ARM)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:250 @ static int __init exynos4_clocksource_in
 	register_current_timer_delay(&exynos4_delay_timer);
 #endif
 
-	if (clocksource_register_hz(&mct_frc, clk_rate))
-		panic("%s: can't register clocksource\n", mct_frc.name);
+	mmr.reg_upper = NULL;
+	mmr.reg_lower = reg_base + EXYNOS4_MCT_G_CNT_L;
+	mmr.bits_upper = 0;
+	mmr.bits_lower = 32;
+	mmr.revmap = NULL;
+	if (clocksource_user_mmio_init(&mct_frc, &mmr, clk_rate))
+		panic("%s: can't register clocksource\n", mct_frc.mmio.clksrc.name);
 
 	sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:324 @ static int mct_set_state_periodic(struct
 static struct clock_event_device mct_comp_device = {
 	.name			= "mct-comp",
 	.features		= CLOCK_EVT_FEAT_PERIODIC |
-				  CLOCK_EVT_FEAT_ONESHOT,
+				  CLOCK_EVT_FEAT_ONESHOT |
+				  CLOCK_EVT_FEAT_PIPELINE,
 	.rating			= 250,
 	.set_next_event		= exynos4_comp_set_next_event,
 	.set_state_periodic	= mct_set_state_periodic,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:341 @ static irqreturn_t exynos4_mct_comp_isr(
 
 	exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
 
-	evt->event_handler(evt);
+	clockevents_handle_event(evt);
 
 	return IRQ_HANDLED;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:352 @ static int exynos4_clockevent_init(void)
 	clockevents_config_and_register(&mct_comp_device, clk_rate,
 					0xf, 0xffffffff);
 	if (request_irq(mct_irqs[MCT_G0_IRQ], exynos4_mct_comp_isr,
-			IRQF_TIMER | IRQF_IRQPOLL, "mct_comp_irq",
+			IRQF_TIMER | IRQF_IRQPOLL | IRQF_OOB, "mct_comp_irq",
 			&mct_comp_device))
 		pr_err("%s: request_irq() failed\n", "mct_comp_irq");
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:451 @ static irqreturn_t exynos4_mct_tick_isr(
 
 	exynos4_mct_tick_clear(mevt);
 
-	evt->event_handler(evt);
+	clockevents_handle_event(evt);
 
 	return IRQ_HANDLED;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:474 @ static int exynos4_mct_starting_cpu(unsi
 	evt->set_state_oneshot_stopped = set_state_shutdown;
 	evt->tick_resume = set_state_shutdown;
 	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
-			CLOCK_EVT_FEAT_PERCPU;
+			CLOCK_EVT_FEAT_PERCPU | CLOCK_EVT_FEAT_PIPELINE;
 	evt->rating = MCT_CLKEVENTS_RATING,
 
 	exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:533 @ static int __init exynos4_timer_resource
 
 	if (mct_int_type == MCT_INT_PPI) {
 
-		err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
-					 exynos4_mct_tick_isr, "MCT",
-					 &percpu_mct_tick);
+		err = __request_percpu_irq(mct_irqs[MCT_L0_IRQ],
+					exynos4_mct_tick_isr, IRQF_TIMER,
+					"MCT", &percpu_mct_tick);
 		WARN(err, "MCT: can't request IRQ %d (%d)\n",
 		     mct_irqs[MCT_L0_IRQ], err);
 	} else {
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/clocksource/Kconfig linux-dovetail-v5.15.y-dovetail/drivers/clocksource/Kconfig
--- linux-5.15.26/drivers/clocksource/Kconfig	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/clocksource/Kconfig	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:28 @ config I8253_LOCK
 config OMAP_DM_TIMER
 	bool
 	select TIMER_OF
+	select GENERIC_CLOCKSOURCE_VDSO
 
 config CLKBLD_I8253
 	def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:62 @ config DIGICOLOR_TIMER
 
 config DW_APB_TIMER
 	bool "DW APB timer driver" if COMPILE_TEST
+        select CLKSRC_MMIO
+	select GENERIC_CLOCKSOURCE_VDSO if ARM
 	help
 	  Enables the support for the dw_apb timer.
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:362 @ config SUN50I_ERRATUM_UNKNOWN1
 config ARM_GLOBAL_TIMER
 	bool "Support for the ARM global timer" if COMPILE_TEST
 	select TIMER_OF if OF
+	select GENERIC_CLOCKSOURCE_VDSO
 	depends on ARM
 	help
 	  This option enables support for the ARM global timer unit.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:426 @ config ATMEL_TCB_CLKSRC
 config CLKSRC_EXYNOS_MCT
 	bool "Exynos multi core timer driver" if COMPILE_TEST
 	depends on ARM || ARM64
+	select GENERIC_CLOCKSOURCE_VDSO
 	help
 	  Support for Multi Core Timer controller on Exynos SoCs.
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:595 @ config H8300_TPU
 config CLKSRC_IMX_GPT
 	bool "Clocksource using i.MX GPT" if COMPILE_TEST
 	depends on (ARM || ARM64) && HAVE_CLK
-	select CLKSRC_MMIO
+	select GENERIC_CLOCKSOURCE_VDSO
 
 config CLKSRC_IMX_TPM
 	bool "Clocksource using i.MX TPM" if COMPILE_TEST
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:617 @ config CLKSRC_ST_LPC
 	bool "Low power clocksource found in the LPC" if COMPILE_TEST
 	select TIMER_OF if OF
 	depends on HAS_IOMEM
-	select CLKSRC_MMIO
+	select GENERIC_CLOCKSOURCE_VDSO
 	help
 	  Enable this option to use the Low Power controller timer
 	  as clocksource.
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/clocksource/mmio.c linux-dovetail-v5.15.y-dovetail/drivers/clocksource/mmio.c
--- linux-5.15.26/drivers/clocksource/mmio.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/clocksource/mmio.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:9 @
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/slab.h>
-
-struct clocksource_mmio {
-	void __iomem *reg;
-	struct clocksource clksrc;
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/device.h>
+
+struct clocksource_user_mapping {
+	struct mm_struct *mm;
+	struct clocksource_user_mmio *ucs;
+	void *regs;
+	struct hlist_node link;
+	atomic_t refs;
 };
 
+static struct class *user_mmio_class;
+static dev_t user_mmio_devt;
+
+static DEFINE_SPINLOCK(user_clksrcs_lock);
+static unsigned int user_clksrcs_count;
+static LIST_HEAD(user_clksrcs);
+
 static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c)
 {
 	return container_of(c, struct clocksource_mmio, clksrc);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:59 @ u64 clocksource_mmio_readw_down(struct c
 	return ~(u64)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
 }
 
+static inline struct clocksource_user_mmio *
+to_mmio_ucs(struct clocksource *c)
+{
+	return container_of(c, struct clocksource_user_mmio, mmio.clksrc);
+}
+
+u64 clocksource_dual_mmio_readl_up(struct clocksource *c)
+{
+	struct clocksource_user_mmio *ucs = to_mmio_ucs(c);
+	u32 upper, old_upper, lower;
+
+	upper = readl_relaxed(ucs->reg_upper);
+	do {
+		old_upper = upper;
+		lower = readl_relaxed(ucs->mmio.reg);
+		upper = readl_relaxed(ucs->reg_upper);
+	} while (upper != old_upper);
+
+	return (((u64)upper) << ucs->bits_lower) | lower;
+}
+
+u64 clocksource_dual_mmio_readw_up(struct clocksource *c)
+{
+	struct clocksource_user_mmio *ucs = to_mmio_ucs(c);
+	u16 upper, old_upper, lower;
+
+	upper = readw_relaxed(ucs->reg_upper);
+	do {
+		old_upper = upper;
+		lower = readw_relaxed(ucs->mmio.reg);
+		upper = readw_relaxed(ucs->reg_upper);
+	} while (upper != old_upper);
+
+	return (((u64)upper) << ucs->bits_lower) | lower;
+}
+
+static void mmio_base_init(const char *name,int rating, unsigned int bits,
+			   u64 (*read)(struct clocksource *),
+			   struct clocksource *cs)
+{
+	cs->name = name;
+	cs->rating = rating;
+	cs->read = read;
+	cs->mask = CLOCKSOURCE_MASK(bits);
+	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+}
+
 /**
  * clocksource_mmio_init - Initialize a simple mmio based clocksource
  * @base:	Virtual address of the clock readout register
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:120 @ int __init clocksource_mmio_init(void __
 	u64 (*read)(struct clocksource *))
 {
 	struct clocksource_mmio *cs;
+	int err;
 
 	if (bits > 64 || bits < 16)
 		return -EINVAL;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:130 @ int __init clocksource_mmio_init(void __
 		return -ENOMEM;
 
 	cs->reg = base;
-	cs->clksrc.name = name;
-	cs->clksrc.rating = rating;
-	cs->clksrc.read = read;
-	cs->clksrc.mask = CLOCKSOURCE_MASK(bits);
-	cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+	mmio_base_init(name, rating, bits, read, &cs->clksrc);
+
+	err = clocksource_register_hz(&cs->clksrc, hz);
+	if (err < 0) {
+		kfree(cs);
+		return err;
+	}
+
+	return err;
+}
+
+static void mmio_ucs_vmopen(struct vm_area_struct *vma)
+{
+	struct clocksource_user_mapping *mapping, *clone;
+	struct clocksource_user_mmio *ucs;
+	unsigned long h_key;
+
+	mapping = vma->vm_private_data;
+
+	if (mapping->mm == vma->vm_mm) {
+		atomic_inc(&mapping->refs);
+	} else if (mapping->mm) {
+		/*
+		 * We must be duplicating the original mm upon fork(),
+		 * clone the parent ucs mapping struct then rehash it
+		 * on the child mm key. If we cannot get memory for
+		 * this, mitigate the issue for users by preventing a
+		 * stale parent mm from being matched later on by a
+		 * process which reused its mm_struct (h_key is based
+		 * on this struct address).
+		 */
+		clone = kmalloc(sizeof(*mapping), GFP_KERNEL);
+		if (clone == NULL) {
+			pr_alert("out-of-memory for UCS mapping!\n");
+			atomic_inc(&mapping->refs);
+			mapping->mm = NULL;
+			return;
+		}
+		ucs = mapping->ucs;
+		clone->mm = vma->vm_mm;
+		clone->ucs = ucs;
+		clone->regs = mapping->regs;
+		atomic_set(&clone->refs, 1);
+		vma->vm_private_data = clone;
+		h_key = (unsigned long)vma->vm_mm / sizeof(*vma->vm_mm);
+		spin_lock(&ucs->lock);
+		hash_add(ucs->mappings, &clone->link, h_key);
+		spin_unlock(&ucs->lock);
+	}
+}
+
+static void mmio_ucs_vmclose(struct vm_area_struct *vma)
+{
+	struct clocksource_user_mapping *mapping;
+
+	mapping = vma->vm_private_data;
+
+	if (atomic_dec_and_test(&mapping->refs)) {
+		spin_lock(&mapping->ucs->lock);
+		hash_del(&mapping->link);
+		spin_unlock(&mapping->ucs->lock);
+		kfree(mapping);
+	}
+}
+
+static const struct vm_operations_struct mmio_ucs_vmops = {
+	.open = mmio_ucs_vmopen,
+	.close = mmio_ucs_vmclose,
+};
+
+static int mmio_ucs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	unsigned long addr, upper_pfn, lower_pfn;
+	struct clocksource_user_mapping *mapping, *tmp;
+	struct clocksource_user_mmio *ucs;
+	unsigned int bits_upper;
+	unsigned long h_key;
+	pgprot_t prot;
+	size_t pages;
+	int err;
+
+	pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	if (pages > 2)
+		return -EINVAL;
+
+	vma->vm_private_data = NULL;
+
+	ucs = file->private_data;
+	upper_pfn = ucs->phys_upper >> PAGE_SHIFT;
+	lower_pfn = ucs->phys_lower >> PAGE_SHIFT;
+	bits_upper = fls(ucs->mmio.clksrc.mask) - ucs->bits_lower;
+	if (pages == 2 && (!bits_upper || upper_pfn == lower_pfn))
+		return -EINVAL;
+
+	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+	if (!mapping)
+		return -ENOSPC;
+
+	mapping->mm = vma->vm_mm;
+	mapping->ucs = ucs;
+	mapping->regs = (void *)vma->vm_start;
+	atomic_set(&mapping->refs, 1);
+
+	vma->vm_private_data = mapping;
+	vma->vm_ops = &mmio_ucs_vmops;
+	prot = pgprot_noncached(vma->vm_page_prot);
+	addr = vma->vm_start;
+
+	err = remap_pfn_range(vma, addr, lower_pfn, PAGE_SIZE, prot);
+	if (err < 0)
+		goto fail;
+
+	if (pages > 1) {
+		addr += PAGE_SIZE;
+		err = remap_pfn_range(vma, addr, upper_pfn, PAGE_SIZE, prot);
+		if (err < 0)
+			goto fail;
+	}
+
+	h_key = (unsigned long)vma->vm_mm / sizeof(*vma->vm_mm);
+
+	spin_lock(&ucs->lock);
+	hash_for_each_possible(ucs->mappings, tmp, link, h_key) {
+		if (tmp->mm == vma->vm_mm) {
+			spin_unlock(&ucs->lock);
+			err = -EBUSY;
+			goto fail;
+		}
+	}
+	hash_add(ucs->mappings, &mapping->link, h_key);
+	spin_unlock(&ucs->lock);
+
+	return 0;
+fail:
+	kfree(mapping);
+
+	return err;
+}
+
+static long
+mmio_ucs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct clocksource_user_mapping *mapping;
+	struct clksrc_user_mmio_info __user *u;
+	unsigned long upper_pfn, lower_pfn;
+	struct clksrc_user_mmio_info info;
+	struct clocksource_user_mmio *ucs;
+	unsigned int bits_upper;
+	void __user *map_base;
+	unsigned long h_key;
+	size_t size;
+
+	u = (struct clksrc_user_mmio_info __user *)arg;
+
+	switch (cmd) {
+	case CLKSRC_USER_MMIO_MAP:
+		break;
+	default:
+		return -ENOTTY;
+	}
+
+	h_key = (unsigned long)current->mm / sizeof(*current->mm);
+
+	ucs = file->private_data;
+	upper_pfn = ucs->phys_upper >> PAGE_SHIFT;
+	lower_pfn = ucs->phys_lower >> PAGE_SHIFT;
+	bits_upper = fls(ucs->mmio.clksrc.mask) - ucs->bits_lower;
+	size = PAGE_SIZE;
+	if (bits_upper && upper_pfn != lower_pfn)
+		size += PAGE_SIZE;
+
+	do {
+		spin_lock(&ucs->lock);
+		hash_for_each_possible(ucs->mappings, mapping, link, h_key) {
+			if (mapping->mm == current->mm) {
+				spin_unlock(&ucs->lock);
+				map_base = mapping->regs;
+				goto found;
+			}
+		}
+		spin_unlock(&ucs->lock);
+
+		map_base = (void *)
+			vm_mmap(file, 0, size, PROT_READ, MAP_SHARED, 0);
+	} while (IS_ERR(map_base) && PTR_ERR(map_base) == -EBUSY);
+
+	if (IS_ERR(map_base))
+		return PTR_ERR(map_base);
+
+found:
+	info.type = ucs->type;
+	info.reg_lower = map_base + offset_in_page(ucs->phys_lower);
+	info.mask_lower = ucs->mmio.clksrc.mask;
+	info.bits_lower = ucs->bits_lower;
+	info.reg_upper = NULL;
+	if (ucs->phys_upper)
+		info.reg_upper = map_base + (size - PAGE_SIZE)
+			+ offset_in_page(ucs->phys_upper);
+	info.mask_upper = ucs->mask_upper;
+
+	return copy_to_user(u, &info, sizeof(*u));
+}
+
+static int mmio_ucs_open(struct inode *inode, struct file *file)
+{
+	struct clocksource_user_mmio *ucs;
+
+	if (file->f_mode & FMODE_WRITE)
+		return -EINVAL;
+
+	ucs = container_of(inode->i_cdev, typeof(*ucs), cdev);
+	file->private_data = ucs;
+
+	return 0;
+}
+
+static const struct file_operations mmio_ucs_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl = mmio_ucs_ioctl,
+	.open		= mmio_ucs_open,
+	.mmap		= mmio_ucs_mmap,
+};
+
+static int __init
+ucs_create_cdev(struct class *class, struct clocksource_user_mmio *ucs)
+{
+	int err;
+
+	ucs->dev = device_create(class, NULL,
+				MKDEV(MAJOR(user_mmio_devt), ucs->id),
+				ucs, "ucs/%d", ucs->id);
+	if (IS_ERR(ucs->dev))
+		return PTR_ERR(ucs->dev);
+
+	spin_lock_init(&ucs->lock);
+	hash_init(ucs->mappings);
+
+	cdev_init(&ucs->cdev, &mmio_ucs_fops);
+	ucs->cdev.kobj.parent = &ucs->dev->kobj;
+
+	err = cdev_add(&ucs->cdev, ucs->dev->devt, 1);
+	if (err < 0)
+		goto err_device_destroy;
+
+	return 0;
+
+err_device_destroy:
+	device_destroy(class, MKDEV(MAJOR(user_mmio_devt), ucs->id));
+	return err;
+}
+
+static unsigned long default_revmap(void *virt)
+{
+	struct vm_struct *vm;
+
+	vm = find_vm_area(virt);
+	if (!vm)
+		return 0;
+
+	return vm->phys_addr + (virt - vm->addr);
+}
+
+int __init clocksource_user_mmio_init(struct clocksource_user_mmio *ucs,
+				      const struct clocksource_mmio_regs *regs,
+				      unsigned long hz)
+{
+	static u64 (*user_types[CLKSRC_MMIO_TYPE_NR])(struct clocksource *) = {
+		[CLKSRC_MMIO_L_UP] = clocksource_mmio_readl_up,
+		[CLKSRC_MMIO_L_DOWN] = clocksource_mmio_readl_down,
+		[CLKSRC_DMMIO_L_UP] = clocksource_dual_mmio_readl_up,
+		[CLKSRC_MMIO_W_UP] = clocksource_mmio_readw_up,
+		[CLKSRC_MMIO_W_DOWN] = clocksource_mmio_readw_down,
+		[CLKSRC_DMMIO_W_UP] = clocksource_dual_mmio_readw_up,
+	};
+	const char *name = ucs->mmio.clksrc.name;
+	unsigned long phys_upper = 0, phys_lower;
+	enum clksrc_user_mmio_type type;
+	unsigned long (*revmap)(void *);
+	int err;
 
-	return clocksource_register_hz(&cs->clksrc, hz);
+	if (regs->bits_lower > 32 || regs->bits_lower < 16 ||
+	    regs->bits_upper > 32)
+		return -EINVAL;
+
+	for (type = 0; type < ARRAY_SIZE(user_types); type++)
+		if (ucs->mmio.clksrc.read == user_types[type])
+			break;
+
+	if (type == ARRAY_SIZE(user_types))
+		return -EINVAL;
+
+	if (!(ucs->mmio.clksrc.flags & CLOCK_SOURCE_IS_CONTINUOUS))
+		return -EINVAL;
+
+	revmap = regs->revmap;
+	if (!revmap)
+		revmap = default_revmap;
+
+	phys_lower = revmap(regs->reg_lower);
+	if (!phys_lower)
+		return -EINVAL;
+
+	if (regs->bits_upper) {
+		phys_upper = revmap(regs->reg_upper);
+		if (!phys_upper)
+			return -EINVAL;
+	}
+
+	ucs->mmio.reg = regs->reg_lower;
+	ucs->type = type;
+	ucs->bits_lower = regs->bits_lower;
+	ucs->reg_upper = regs->reg_upper;
+	ucs->mask_lower = CLOCKSOURCE_MASK(regs->bits_lower);
+	ucs->mask_upper = CLOCKSOURCE_MASK(regs->bits_upper);
+	ucs->phys_lower = phys_lower;
+	ucs->phys_upper = phys_upper;
+	spin_lock_init(&ucs->lock);
+
+	err = clocksource_register_hz(&ucs->mmio.clksrc, hz);
+	if (err < 0)
+		return err;
+
+	spin_lock(&user_clksrcs_lock);
+
+	ucs->id = user_clksrcs_count++;
+	if (ucs->id < CLKSRC_USER_MMIO_MAX)
+		list_add_tail(&ucs->link, &user_clksrcs);
+
+	spin_unlock(&user_clksrcs_lock);
+
+	if (ucs->id >= CLKSRC_USER_MMIO_MAX) {
+		pr_warn("%s: Too many clocksources\n", name);
+		err = -EAGAIN;
+		goto fail;
+	}
+
+	ucs->mmio.clksrc.vdso_type = CLOCKSOURCE_VDSO_MMIO + ucs->id;
+
+	if (user_mmio_class) {
+		err = ucs_create_cdev(user_mmio_class, ucs);
+		if (err < 0) {
+			pr_warn("%s: Failed to add character device\n", name);
+			goto fail;
+		}
+	}
+
+	return 0;
+
+fail:
+	clocksource_unregister(&ucs->mmio.clksrc);
+
+	return err;
+}
+
+int __init clocksource_user_single_mmio_init(
+	void __iomem *base, const char *name,
+	unsigned long hz, int rating, unsigned int bits,
+	u64 (*read)(struct clocksource *))
+{
+	struct clocksource_user_mmio *ucs;
+	struct clocksource_mmio_regs regs;
+	int ret;
+
+	ucs = kzalloc(sizeof(*ucs), GFP_KERNEL);
+	if (!ucs)
+		return -ENOMEM;
+
+	mmio_base_init(name, rating, bits, read, &ucs->mmio.clksrc);
+	regs.reg_lower = base;
+	regs.reg_upper = NULL;
+	regs.bits_lower = bits;
+	regs.bits_upper = 0;
+	regs.revmap = NULL;
+
+	ret = clocksource_user_mmio_init(ucs, &regs, hz);
+	if (ret)
+		kfree(ucs);
+
+	return ret;
+}
+
+static int __init mmio_clksrc_chr_dev_init(void)
+{
+	struct clocksource_user_mmio *ucs;
+	struct class *class;
+	int err;
+
+	class = class_create(THIS_MODULE, "mmio_ucs");
+	if (IS_ERR(class)) {
+		pr_err("couldn't create user mmio clocksources class\n");
+		return PTR_ERR(class);
+	}
+
+	err = alloc_chrdev_region(&user_mmio_devt, 0, CLKSRC_USER_MMIO_MAX,
+				  "mmio_ucs");
+	if (err < 0) {
+		pr_err("failed to allocate user mmio clocksources character devivces region\n");
+		goto err_class_destroy;
+	}
+
+	/*
+	 * Calling list_for_each_entry is safe here: clocksources are always
+	 * added to the list tail, never removed.
+	 */
+	spin_lock(&user_clksrcs_lock);
+	list_for_each_entry(ucs, &user_clksrcs, link) {
+		spin_unlock(&user_clksrcs_lock);
+
+		err = ucs_create_cdev(class, ucs);
+		if (err < 0)
+			pr_err("%s: Failed to add character device\n",
+			       ucs->mmio.clksrc.name);
+
+		spin_lock(&user_clksrcs_lock);
+	}
+	user_mmio_class = class;
+	spin_unlock(&user_clksrcs_lock);
+
+	return 0;
+
+err_class_destroy:
+	class_destroy(class);
+	return err;
 }
+device_initcall(mmio_clksrc_chr_dev_init);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/clocksource/timer-imx-gpt.c linux-dovetail-v5.15.y-dovetail/drivers/clocksource/timer-imx-gpt.c
--- linux-5.15.26/drivers/clocksource/timer-imx-gpt.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/clocksource/timer-imx-gpt.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:166 @ static int __init mxc_clocksource_init(s
 	sched_clock_reg = reg;
 
 	sched_clock_register(mxc_read_sched_clock, 32, c);
-	return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32,
-			clocksource_mmio_readl_up);
+	return clocksource_user_single_mmio_init(reg, "mxc_timer1", c, 200, 32,
+					 clocksource_mmio_readl_up);
 }
 
 /* clock event */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:267 @ static irqreturn_t mxc_timer_interrupt(i
 
 	imxtm->gpt->gpt_irq_acknowledge(imxtm);
 
-	ced->event_handler(ced);
+	clockevents_handle_event(ced);
 
 	return IRQ_HANDLED;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:277 @ static int __init mxc_clockevent_init(st
 	struct clock_event_device *ced = &imxtm->ced;
 
 	ced->name = "mxc_timer1";
-	ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
+	ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_PIPELINE;
 	ced->set_state_shutdown = mxc_shutdown;
 	ced->set_state_oneshot = mxc_set_oneshot;
 	ced->tick_resume = mxc_shutdown;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/clocksource/timer-sun4i.c linux-dovetail-v5.15.y-dovetail/drivers/clocksource/timer-sun4i.c
--- linux-5.15.26/drivers/clocksource/timer-sun4i.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/clocksource/timer-sun4i.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:22 @
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irqreturn.h>
+#include <linux/dovetail.h>
 #include <linux/sched_clock.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:139 @ static irqreturn_t sun4i_timer_interrupt
 	struct timer_of *to = to_timer_of(evt);
 
 	sun4i_timer_clear_interrupt(timer_of_base(to));
-	evt->event_handler(evt);
+	clockevents_handle_event(evt);
 
 	return IRQ_HANDLED;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:150 @ static struct timer_of to = {
 	.clkevt = {
 		.name = "sun4i_tick",
 		.rating = 350,
-		.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+		.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PIPELINE,
 		.set_state_shutdown = sun4i_clkevt_shutdown,
 		.set_state_periodic = sun4i_clkevt_set_periodic,
 		.set_state_oneshot = sun4i_clkevt_set_oneshot,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/clocksource/timer-ti-dm-systimer.c linux-dovetail-v5.15.y-dovetail/drivers/clocksource/timer-ti-dm-systimer.c
--- linux-5.15.26/drivers/clocksource/timer-ti-dm-systimer.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/clocksource/timer-ti-dm-systimer.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:60 @ struct dmtimer_clockevent {
 };
 
 struct dmtimer_clocksource {
-	struct clocksource dev;
+	struct clocksource_user_mmio mmio;
 	struct dmtimer_systimer t;
 	unsigned int loadval;
 };
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:441 @ static irqreturn_t dmtimer_clockevent_in
 	struct dmtimer_systimer *t = &clkevt->t;
 
 	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat);
-	clkevt->dev.event_handler(&clkevt->dev);
+	clockevents_handle_event(&clkevt->dev);
 
 	return IRQ_HANDLED;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:552 @ static int __init dmtimer_clkevt_init_co
 	 * We mostly use cpuidle_coupled with ARM local timers for runtime,
 	 * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
 	 */
-	dev->features = features;
+	dev->features = features | CLOCK_EVT_FEAT_PIPELINE;
 	dev->rating = rating;
 	dev->set_next_event = dmtimer_set_next_event;
 	dev->set_state_shutdown = dmtimer_clockevent_shutdown;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:710 @ static int __init dmtimer_percpu_quirk_i
 static struct dmtimer_clocksource *
 to_dmtimer_clocksource(struct clocksource *cs)
 {
-	return container_of(cs, struct dmtimer_clocksource, dev);
-}
-
-static u64 dmtimer_clocksource_read_cycles(struct clocksource *cs)
-{
-	struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
-	struct dmtimer_systimer *t = &clksrc->t;
-
-	return (u64)readl_relaxed(t->base + t->counter);
+	return container_of(cs, struct dmtimer_clocksource, mmio.mmio.clksrc);
 }
 
 static void __iomem *dmtimer_sched_clock_counter;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:749 @ static void dmtimer_clocksource_resume(s
 static int __init dmtimer_clocksource_init(struct device_node *np)
 {
 	struct dmtimer_clocksource *clksrc;
+	struct clocksource_mmio_regs mmr;
 	struct dmtimer_systimer *t;
 	struct clocksource *dev;
 	int error;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:758 @ static int __init dmtimer_clocksource_in
 	if (!clksrc)
 		return -ENOMEM;
 
-	dev = &clksrc->dev;
+	dev = &clksrc->mmio.mmio.clksrc;
 	t = &clksrc->t;
 
 	error = dmtimer_systimer_setup(np, t);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:767 @ static int __init dmtimer_clocksource_in
 
 	dev->name = "dmtimer";
 	dev->rating = 300;
-	dev->read = dmtimer_clocksource_read_cycles;
+	dev->read = clocksource_mmio_readl_up,
 	dev->mask = CLOCKSOURCE_MASK(32);
 	dev->flags = CLOCK_SOURCE_IS_CONTINUOUS;
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:790 @ static int __init dmtimer_clocksource_in
 		sched_clock_register(dmtimer_read_sched_clock, 32, t->rate);
 	}
 
-	if (clocksource_register_hz(dev, t->rate))
+	mmr.reg_lower = t->base + t->counter;
+	mmr.bits_lower = 32;
+	mmr.reg_upper = 0;
+	mmr.bits_upper = 0;
+	mmr.revmap = NULL;
+
+	if (clocksource_user_mmio_init(&clksrc->mmio, &mmr, t->rate))
 		pr_err("Could not register clocksource %pOF\n", np);
 
 	return 0;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/cpuidle/cpuidle.c linux-dovetail-v5.15.y-dovetail/drivers/cpuidle/cpuidle.c
--- linux-5.15.26/drivers/cpuidle/cpuidle.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/cpuidle/cpuidle.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:20 @
 #include <linux/pm_qos.h>
 #include <linux/cpu.h>
 #include <linux/cpuidle.h>
+#include <linux/irq_pipeline.h>
 #include <linux/ktime.h>
 #include <linux/hrtimer.h>
 #include <linux/module.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:211 @ int cpuidle_enter_state(struct cpuidle_d
 	ktime_t time_start, time_end;
 
 	/*
+	 * A companion core running on the oob stage of the IRQ
+	 * pipeline may deny switching to a deeper C-state. If so,
+	 * call the default idle routine instead. If the core cannot
+	 * bear with the latency induced by the default idling
+	 * operation, then CPUIDLE is not usable and should be
+	 * disabled at build time. The in-band stage is currently
+	 * stalled, hard irqs are on. irq_cpuidle_enter() leaves us
+	 * stalled but returns with hard irqs off so that no event may
+	 * sneak in until we actually go idle.
+	 */
+	if (!irq_cpuidle_enter(dev, target_state)) {
+		default_idle_call();
+		return -EBUSY;
+	}
+
+	/*
 	 * Tell the time framework to switch to a broadcast timer because our
 	 * local timer will be shut down.  If a local timer is used from another
 	 * CPU as a broadcast timer, this call may fail if it is not available.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:255 @ int cpuidle_enter_state(struct cpuidle_d
 	if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
 		rcu_idle_enter();
 	entered_state = target_state->enter(dev, drv, index);
+	hard_cond_local_irq_enable();
 	if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
 		rcu_idle_exit();
 	start_critical_timings();
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/cpuidle/poll_state.c linux-dovetail-v5.15.y-dovetail/drivers/cpuidle/poll_state.c
--- linux-5.15.26/drivers/cpuidle/poll_state.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/cpuidle/poll_state.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:20 @ static int __cpuidle poll_idle(struct cp
 
 	dev->poll_time_limit = false;
 
-	local_irq_enable();
+	local_irq_enable_full();
 	if (!current_set_polling_and_test()) {
 		unsigned int loop_count = 0;
 		u64 limit;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/dma/bcm2835-dma.c linux-dovetail-v5.15.y-dovetail/drivers/dma/bcm2835-dma.c
--- linux-5.15.26/drivers/dma/bcm2835-dma.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/dma/bcm2835-dma.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:32 @
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/spinlock.h>
+#include <linux/irqstage.h>
 #include <linux/of.h>
 #include <linux/of_dma.h>
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:439 @ static void bcm2835_dma_abort(struct bcm
 	writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
 }
 
+static inline void bcm2835_dma_enable_channel(struct bcm2835_chan *c)
+{
+	writel(c->desc->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
+	writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+}
+
+static inline bool bcm2835_dma_oob_capable(void)
+{
+	return IS_ENABLED(CONFIG_DMA_BCM2835_OOB);
+}
+
 static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
 {
 	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
-	struct bcm2835_desc *d;
 
 	if (!vd) {
 		c->desc = NULL;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:461 @ static void bcm2835_dma_start_desc(struc
 
 	list_del(&vd->node);
 
-	c->desc = d = to_bcm2835_dma_desc(&vd->tx);
+	c->desc = to_bcm2835_dma_desc(&vd->tx);
+	if (!bcm2835_dma_oob_capable() || !vchan_oob_pulsed(vd))
+		bcm2835_dma_enable_channel(c);
+}
+
+static bool do_channel(struct bcm2835_chan *c, struct bcm2835_desc *d)
+{
+	struct dmaengine_desc_callback cb;
+
+	if (running_oob()) {
+		if (!vchan_oob_handled(&d->vd))
+			return false;
+		dmaengine_desc_get_callback(&d->vd.tx, &cb);
+		if (dmaengine_desc_callback_valid(&cb)) {
+			vchan_unlock(&c->vc);
+			dmaengine_desc_callback_invoke(&cb, NULL);
+			vchan_lock(&c->vc);
+		}
+		return true;
+	}
 
-	writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
-	writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+	if (d->cyclic) {
+		/* call the cyclic callback */
+		vchan_cyclic_callback(&d->vd);
+	} else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
+		vchan_cookie_complete(&c->desc->vd);
+		bcm2835_dma_start_desc(c);
+	}
+
+	return true;
+}
+
+static inline bool is_base_irq_handler(void)
+{
+	return !bcm2835_dma_oob_capable() || running_oob();
 }
 
 static irqreturn_t bcm2835_dma_callback(int irq, void *data)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:505 @ static irqreturn_t bcm2835_dma_callback(
 	unsigned long flags;
 
 	/* check the shared interrupt */
-	if (c->irq_flags & IRQF_SHARED) {
+	if (is_base_irq_handler() && c->irq_flags & IRQF_SHARED) {
 		/* check if the interrupt is enabled */
 		flags = readl(c->chan_base + BCM2835_DMA_CS);
 		/* if not set then we are not the reason for the irq */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:513 @ static irqreturn_t bcm2835_dma_callback(
 			return IRQ_NONE;
 	}
 
-	spin_lock_irqsave(&c->vc.lock, flags);
+	/* CAUTION: If running in-band, hard irqs are on. */
+	vchan_lock_irqsave(&c->vc, flags);
 
 	/*
 	 * Clear the INT flag to receive further interrupts. Keep the channel
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:523 @ static irqreturn_t bcm2835_dma_callback(
 	 * if this IRQ handler is threaded.) If the channel is finished, it
 	 * will remain idle despite the ACTIVE flag being set.
 	 */
-	writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
-	       c->chan_base + BCM2835_DMA_CS);
+	if (is_base_irq_handler())
+		writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
+			c->chan_base + BCM2835_DMA_CS);
 
 	d = c->desc;
+	if (!d)
+		goto out;
 
-	if (d) {
-		if (d->cyclic) {
-			/* call the cyclic callback */
-			vchan_cyclic_callback(&d->vd);
-		} else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
-			vchan_cookie_complete(&c->desc->vd);
-			bcm2835_dma_start_desc(c);
-		}
+	if (bcm2835_dma_oob_capable() && running_oob()) {
+		/*
+		 * If we cannot process this from the out-of-band
+		 * stage, schedule a callback from in-band context.
+		 */
+		if (!do_channel(c, d))
+			irq_post_inband(irq);
+	} else {
+		do_channel(c, d);
 	}
 
-	spin_unlock_irqrestore(&c->vc.lock, flags);
+out:
+	vchan_unlock_irqrestore(&c->vc, flags);
 
 	return IRQ_HANDLED;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:622 @ static enum dma_status bcm2835_dma_tx_st
 	if (ret == DMA_COMPLETE || !txstate)
 		return ret;
 
-	spin_lock_irqsave(&c->vc.lock, flags);
+	vchan_lock_irqsave(&c->vc, flags);
 	vd = vchan_find_desc(&c->vc, cookie);
 	if (vd) {
 		txstate->residue =
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:643 @ static enum dma_status bcm2835_dma_tx_st
 		txstate->residue = 0;
 	}
 
-	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_unlock_irqrestore(&c->vc, flags);
 
 	return ret;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:653 @ static void bcm2835_dma_issue_pending(st
 	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
 	unsigned long flags;
 
-	spin_lock_irqsave(&c->vc.lock, flags);
+	vchan_lock_irqsave(&c->vc, flags);
 	if (vchan_issue_pending(&c->vc) && !c->desc)
 		bcm2835_dma_start_desc(c);
 
-	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_unlock_irqrestore(&c->vc, flags);
 }
 
+#ifdef CONFIG_DMA_BCM2835_OOB
+static int bcm2835_dma_pulse_oob(struct dma_chan *chan)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+	unsigned long flags;
+	int ret = -EIO;
+
+	vchan_lock_irqsave(&c->vc, flags);
+	if (c->desc && vchan_oob_pulsed(&c->desc->vd)) {
+		bcm2835_dma_enable_channel(c);
+		ret = 0;
+	}
+	vchan_unlock_irqrestore(&c->vc, flags);
+
+	return ret;
+}
+#else
+static int bcm2835_dma_pulse_oob(struct dma_chan *chan)
+{
+	return -ENOTSUPP;
+}
+#endif
+
 static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy(
 	struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
 	size_t len, unsigned long flags)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:724 @ static struct dma_async_tx_descriptor *b
 	u32 extra = BCM2835_DMA_INT_EN;
 	size_t frames;
 
+	if (!bcm2835_dma_oob_capable()) {
+		if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) {
+			dev_err(chan->device->dev,
+				"%s: out-of-band slave transfers disabled\n",
+				__func__);
+			return NULL;
+		}
+	}
+
 	if (!is_slave_direction(direction)) {
 		dev_err(chan->device->dev,
 			"%s: bad direction?\n", __func__);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:798 @ static struct dma_async_tx_descriptor *b
 		return NULL;
 	}
 
-	if (flags & DMA_PREP_INTERRUPT)
+	if (!bcm2835_dma_oob_capable()) {
+		if (flags & DMA_OOB_INTERRUPT) {
+			dev_err(chan->device->dev,
+				"%s: out-of-band cyclic transfers disabled\n",
+				__func__);
+			return NULL;
+		}
+	} else if (flags & DMA_OOB_PULSE) {
+		dev_err(chan->device->dev,
+			"%s: no pulse mode with out-of-band cyclic transfers\n",
+			__func__);
+		return NULL;
+	}
+
+	if (flags & (DMA_PREP_INTERRUPT|DMA_OOB_INTERRUPT))
 		extra |= BCM2835_DMA_INT_EN;
 	else
 		period_len = buf_len;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:888 @ static int bcm2835_dma_terminate_all(str
 	unsigned long flags;
 	LIST_HEAD(head);
 
-	spin_lock_irqsave(&c->vc.lock, flags);
+	vchan_lock_irqsave(&c->vc, flags);
 
 	/* stop DMA activity */
 	if (c->desc) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:898 @ static int bcm2835_dma_terminate_all(str
 	}
 
 	vchan_get_all_descriptors(&c->vc, &head);
-	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_unlock_irqrestore(&c->vc, flags);
 	vchan_dma_desc_free_list(&c->vc, &head);
 
 	return 0;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1009 @ static int bcm2835_dma_probe(struct plat
 	dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
 	dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
 	dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
+	dma_cap_set(DMA_OOB, od->ddev.cap_mask);
 	dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
 	od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
 	od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
 	od->ddev.device_tx_status = bcm2835_dma_tx_status;
 	od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
+	od->ddev.device_pulse_oob = bcm2835_dma_pulse_oob;
 	od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
 	od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg;
 	od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1081 @ static int bcm2835_dma_probe(struct plat
 			continue;
 
 		/* check if there are other channels that also use this irq */
-		irq_flags = 0;
+		irq_flags = IS_ENABLED(CONFIG_DMA_BCM2835_OOB) ? IRQF_OOB : 0;
 		for (j = 0; j <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; j++)
 			if ((i != j) && (irq[j] == irq[i])) {
-				irq_flags = IRQF_SHARED;
+				irq_flags |= IRQF_SHARED;
 				break;
 			}
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/dma/dmaengine.c linux-dovetail-v5.15.y-dovetail/drivers/dma/dmaengine.c
--- linux-5.15.26/drivers/dma/dmaengine.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/dma/dmaengine.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:581 @ int dma_get_slave_caps(struct dma_chan *
 
 	/* check if the channel supports slave transactions */
 	if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
-	      test_bit(DMA_CYCLIC, device->cap_mask.bits)))
+	      test_bit(DMA_CYCLIC, device->cap_mask.bits) ||
+	      test_bit(DMA_OOB, device->cap_mask.bits)))
 		return -ENXIO;
 
 	/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1213 @ int dma_async_device_register(struct dma
 		return -EIO;
 	}
 
+	if (dma_has_cap(DMA_OOB, device->cap_mask) && !device->device_pulse_oob) {
+		dev_err(device->dev,
+			"Device claims capability %s, but pulse handler is not defined\n",
+			"DMA_OOB");
+		return -EIO;
+	}
+
 	if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
 		dev_err(device->dev,
 			"Device claims capability %s, but op is not defined\n",
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/dma/imx-sdma.c linux-dovetail-v5.15.y-dovetail/drivers/dma/imx-sdma.c
--- linux-5.15.26/drivers/dma/imx-sdma.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/dma/imx-sdma.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:515 @ struct sdma_engine {
 	/* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
 	bool				clk_ratio;
 	bool                            fw_loaded;
+#ifdef CONFIG_IMX_SDMA_OOB
+	hard_spinlock_t			oob_lock;
+	u32				pending_stat;
+#endif
 };
 
 static int sdma_config_write(struct dma_chan *chan,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:800 @ static struct sdma_desc *to_sdma_desc(st
 	return container_of(t, struct sdma_desc, vd.tx);
 }
 
+static inline bool sdma_oob_capable(void)
+{
+	return IS_ENABLED(CONFIG_IMX_SDMA_OOB);
+}
+
 static void sdma_start_desc(struct sdma_channel *sdmac)
 {
 	struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:822 @ static void sdma_start_desc(struct sdma_
 
 	sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
 	sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
-	sdma_enable_channel(sdma, sdmac->channel);
+	if (!sdma_oob_capable() || !vchan_oob_pulsed(vd))
+		sdma_enable_channel(sdma, sdmac->channel);
 }
 
 static void sdma_update_channel_loop(struct sdma_channel *sdmac)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:867 @ static void sdma_update_channel_loop(str
 		 * SDMA transaction status by the time the client tasklet is
 		 * executed.
 		 */
-		spin_unlock(&sdmac->vc.lock);
+		vchan_unlock(&sdmac->vc);
 		dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
-		spin_lock(&sdmac->vc.lock);
+		vchan_lock(&sdmac->vc);
 
 		if (error)
 			sdmac->status = old_status;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:879 @ static void sdma_update_channel_loop(str
 static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
 {
 	struct sdma_channel *sdmac = (struct sdma_channel *) data;
+	struct sdma_desc *desc = sdmac->desc;
 	struct sdma_buffer_descriptor *bd;
 	int i, error = 0;
 
-	sdmac->desc->chn_real_count = 0;
+	desc->chn_real_count = 0;
 	/*
 	 * non loop mode. Iterate over all descriptors, collect
 	 * errors and call callback function
 	 */
-	for (i = 0; i < sdmac->desc->num_bd; i++) {
-		bd = &sdmac->desc->bd[i];
+	for (i = 0; i < desc->num_bd; i++) {
+		bd = &desc->bd[i];
 
 		 if (bd->mode.status & (BD_DONE | BD_RROR))
 			error = -EIO;
-		 sdmac->desc->chn_real_count += bd->mode.count;
+		 desc->chn_real_count += bd->mode.count;
 	}
 
 	if (error)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:902 @ static void mxc_sdma_handle_channel_norm
 		sdmac->status = DMA_COMPLETE;
 }
 
-static irqreturn_t sdma_int_handler(int irq, void *dev_id)
+static unsigned long sdma_do_channels(struct sdma_engine *sdma,
+				unsigned long stat)
 {
-	struct sdma_engine *sdma = dev_id;
-	unsigned long stat;
+	unsigned long mask = stat;
 
-	stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
-	writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
-	/* channel 0 is special and not handled here, see run_channel0() */
-	stat &= ~1;
-
-	while (stat) {
-		int channel = fls(stat) - 1;
+	while (mask) {
+		int channel = fls(mask) - 1;
 		struct sdma_channel *sdmac = &sdma->channel[channel];
 		struct sdma_desc *desc;
 
-		spin_lock(&sdmac->vc.lock);
+		vchan_lock(&sdmac->vc);
 		desc = sdmac->desc;
 		if (desc) {
+			if (running_oob() && !vchan_oob_handled(&desc->vd))
+				goto next;
 			if (sdmac->flags & IMX_DMA_SG_LOOP) {
 				sdma_update_channel_loop(sdmac);
 			} else {
 				mxc_sdma_handle_channel_normal(sdmac);
+				if (running_oob()) {
+					vchan_unlock(&sdmac->vc);
+					dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
+					__clear_bit(channel, &stat);
+					goto next_unlocked;
+				}
 				vchan_cookie_complete(&desc->vd);
 				sdma_start_desc(sdmac);
 			}
 		}
-
-		spin_unlock(&sdmac->vc.lock);
 		__clear_bit(channel, &stat);
+	next:
+		vchan_unlock(&sdmac->vc);
+	next_unlocked:
+		__clear_bit(channel, &mask);
 	}
 
+	return stat;
+}
+
+static irqreturn_t sdma_int_handler(int irq, void *dev_id)
+{
+	struct sdma_engine *sdma = dev_id;
+	unsigned long stat, flags __maybe_unused;
+
+#ifdef CONFIG_IMX_SDMA_OOB
+	if (running_oob()) {
+		stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+		writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
+		/*
+		 * Locking is only to guard against IRQ migration with
+		 * a delayed in-band event running from a remote CPU
+		 * after some IRQ routing changed the affinity of the
+		 * out-of-band handler in the meantime.
+		 */
+		stat = sdma_do_channels(sdma, stat & ~1);
+		if (stat) {
+			raw_spin_lock(&sdma->oob_lock);
+			sdma->pending_stat |= stat;
+			raw_spin_unlock(&sdma->oob_lock);
+			/* Call us back from in-band context. */
+			irq_post_inband(irq);
+		}
+		return IRQ_HANDLED;
+	}
+
+	/* In-band IRQ context: stalled, but hard irqs are on. */
+	raw_spin_lock_irqsave(&sdma->oob_lock, flags);
+	stat = sdma->pending_stat;
+	sdma->pending_stat = 0;
+	raw_spin_unlock_irqrestore(&sdma->oob_lock, flags);
+	sdma_do_channels(sdma, stat);
+#else
+	stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+	writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
+	/* channel 0 is special and not handled here, see run_channel0() */
+	sdma_do_channels(sdma, stat & ~1);
+#endif
+
 	return IRQ_HANDLED;
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1185 @ static int sdma_terminate_all(struct dma
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	unsigned long flags;
 
-	spin_lock_irqsave(&sdmac->vc.lock, flags);
+	vchan_lock_irqsave(&sdmac->vc, flags);
 
 	sdma_disable_channel(chan);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1199 @ static int sdma_terminate_all(struct dma
 		 */
 		vchan_get_all_descriptors(&sdmac->vc, &sdmac->terminated);
 		sdmac->desc = NULL;
+		vchan_unlock_irqrestore(&sdmac->vc, flags);
 		schedule_work(&sdmac->terminate_worker);
+	} else {
+		vchan_unlock_irqrestore(&sdmac->vc, flags);
 	}
 
-	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
-
 	return 0;
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1568 @ static struct dma_async_tx_descriptor *s
 	struct scatterlist *sg;
 	struct sdma_desc *desc;
 
+	if (!sdma_oob_capable()) {
+		if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) {
+			dev_err(sdma->dev,
+				"%s: out-of-band slave transfers disabled\n",
+				__func__);
+			return NULL;
+		}
+	}
+
 	sdma_config_write(chan, &sdmac->slave_config, direction);
 
 	desc = sdma_transfer_init(sdmac, direction, sg_len);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1628 @ static struct dma_async_tx_descriptor *s
 
 		if (i + 1 == sg_len) {
 			param |= BD_INTR;
-			param |= BD_LAST;
+			if (!sdma_oob_capable() || !(flags & DMA_OOB_PULSE))
+				param |= BD_LAST;
 			param &= ~BD_CONT;
 		}
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1664 @ static struct dma_async_tx_descriptor *s
 
 	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
 
+	if (!sdma_oob_capable()) {
+		if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) {
+			dev_err(sdma->dev,
+				"%s: out-of-band cyclic transfers disabled\n",
+				__func__);
+			return NULL;
+		}
+	} else if (flags & DMA_OOB_PULSE) {
+		dev_err(chan->device->dev,
+			"%s: no pulse mode with out-of-band cyclic transfers\n",
+			__func__);
+		return NULL;
+	}
+
 	sdma_config_write(chan, &sdmac->slave_config, direction);
 
 	desc = sdma_transfer_init(sdmac, direction, num_periods);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1800 @ static enum dma_status sdma_tx_status(st
 	if (ret == DMA_COMPLETE || !txstate)
 		return ret;
 
-	spin_lock_irqsave(&sdmac->vc.lock, flags);
+	vchan_lock_irqsave(&sdmac->vc, flags);
 
 	vd = vchan_find_desc(&sdmac->vc, cookie);
 	if (vd)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1818 @ static enum dma_status sdma_tx_status(st
 		residue = 0;
 	}
 
-	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+	vchan_unlock_irqrestore(&sdmac->vc, flags);
 
 	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
 			 residue);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1831 @ static void sdma_issue_pending(struct dm
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	unsigned long flags;
 
-	spin_lock_irqsave(&sdmac->vc.lock, flags);
+	vchan_lock_irqsave(&sdmac->vc, flags);
 	if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
 		sdma_start_desc(sdmac);
-	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+	vchan_unlock_irqrestore(&sdmac->vc, flags);
 }
 
+#ifdef CONFIG_IMX_SDMA_OOB
+static int sdma_pulse_oob(struct dma_chan *chan)
+{
+	struct sdma_channel *sdmac = to_sdma_chan(chan);
+	struct sdma_desc *desc = sdmac->desc;
+	unsigned long flags;
+	int n, ret = -EIO;
+
+	vchan_lock_irqsave(&sdmac->vc, flags);
+	if (desc && vchan_oob_pulsed(&desc->vd)) {
+		for (n = 0; n < desc->num_bd - 1; n++)
+			desc->bd[n].mode.status |= BD_DONE;
+		desc->bd[n].mode.status |= BD_DONE|BD_WRAP;
+		sdma_enable_channel(sdmac->sdma, sdmac->channel);
+		ret = 0;
+	}
+	vchan_unlock_irqrestore(&sdmac->vc, flags);
+
+	return ret;
+}
+#else
+static int sdma_pulse_oob(struct dma_chan *chan)
+{
+	return -ENOTSUPP;
+}
+#endif
+
 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	34
 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2	38
 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3	45
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2214 @ static int sdma_probe(struct platform_de
 	if (ret)
 		goto err_clk;
 
-	ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
-			       sdma);
+	ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler,
+			IS_ENABLED(CONFIG_IMX_SDMA_OOB) ? IRQF_OOB : 0,
+			"sdma", sdma);
 	if (ret)
 		goto err_irq;
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2235 @ static int sdma_probe(struct platform_de
 
 	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
 	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+	dma_cap_set(DMA_OOB, sdma->dma_device.cap_mask);
 	dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
 
 	INIT_LIST_HEAD(&sdma->dma_device.channels);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2286 @ static int sdma_probe(struct platform_de
 	sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
 	sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
 	sdma->dma_device.device_issue_pending = sdma_issue_pending;
+	sdma->dma_device.device_pulse_oob = sdma_pulse_oob;
 	sdma->dma_device.copy_align = 2;
 	dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2329 @ static int sdma_probe(struct platform_de
 			dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
 	}
 
+	/*
+	 * Keep the clocks enabled at any time if we plan to use the
+	 * DMA from out-of-band context, bumping their refcount to
+	 * keep them on until sdma_remove() is called eventually.
+	 */
+	if (IS_ENABLED(CONFIG_IMX_SDMA_OOB)) {
+		clk_enable(sdma->clk_ipg);
+		clk_enable(sdma->clk_ahb);
+	}
+
 	return 0;
 
 err_register:
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2357 @ static int sdma_remove(struct platform_d
 	struct sdma_engine *sdma = platform_get_drvdata(pdev);
 	int i;
 
+	if (IS_ENABLED(CONFIG_IMX_SDMA_OOB)) {
+		clk_disable(sdma->clk_ahb);
+		clk_disable(sdma->clk_ipg);
+	}
+
 	devm_free_irq(&pdev->dev, sdma->irq, sdma);
 	dma_async_device_unregister(&sdma->dma_device);
 	kfree(sdma->script_addrs);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/dma/Kconfig linux-dovetail-v5.15.y-dovetail/drivers/dma/Kconfig
--- linux-5.15.26/drivers/dma/Kconfig	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/dma/Kconfig	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:50 @ config DMA_ENGINE
 config DMA_VIRTUAL_CHANNELS
 	tristate
 
+config DMA_VIRTUAL_CHANNELS_OOB
+	def_bool n
+	depends on DMA_VIRTUAL_CHANNELS && DOVETAIL
+
 config DMA_ACPI
 	def_bool y
 	depends on ACPI
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:138 @ config DMA_BCM2835
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config DMA_BCM2835_OOB
+	bool "Out-of-band support for BCM2835 DMA"
+	depends on DMA_BCM2835 && DOVETAIL
+	select DMA_VIRTUAL_CHANNELS_OOB
+	help
+	  Enable out-of-band requests to BCM2835 DMA.
+
 config DMA_JZ4780
 	tristate "JZ4780 DMA support"
 	depends on MIPS || COMPILE_TEST
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:283 @ config IMX_SDMA
 	  Support the i.MX SDMA engine. This engine is integrated into
 	  Freescale i.MX25/31/35/51/53/6 chips.
 
+config IMX_SDMA_OOB
+	bool "Out-of-band support for i.MX SDMA"
+	depends on IMX_SDMA && DOVETAIL
+	select DMA_VIRTUAL_CHANNELS_OOB
+	help
+	  Enable out-of-band requests to i.MX SDMA.
+
 config INTEL_IDMA64
 	tristate "Intel integrated DMA 64-bit support"
 	select DMA_ENGINE
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/dma/virt-dma.c linux-dovetail-v5.15.y-dovetail/drivers/dma/virt-dma.c
--- linux-5.15.26/drivers/dma/virt-dma.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/dma/virt-dma.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:26 @ dma_cookie_t vchan_tx_submit(struct dma_
 	unsigned long flags;
 	dma_cookie_t cookie;
 
-	spin_lock_irqsave(&vc->lock, flags);
+	vchan_lock_irqsave(vc, flags);
 	cookie = dma_cookie_assign(tx);
 
 	list_move_tail(&vd->node, &vc->desc_submitted);
-	spin_unlock_irqrestore(&vc->lock, flags);
+	vchan_unlock_irqrestore(vc, flags);
 
 	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
 		vc, vd, cookie);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:55 @ int vchan_tx_desc_free(struct dma_async_
 	struct virt_dma_desc *vd = to_virt_desc(tx);
 	unsigned long flags;
 
-	spin_lock_irqsave(&vc->lock, flags);
+	vchan_lock_irqsave(vc, flags);
 	list_del(&vd->node);
-	spin_unlock_irqrestore(&vc->lock, flags);
+	vchan_unlock_irqrestore(vc, flags);
 
 	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
 		vc, vd, vd->tx.cookie);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:90 @ static void vchan_complete(struct taskle
 	struct dmaengine_desc_callback cb;
 	LIST_HEAD(head);
 
-	spin_lock_irq(&vc->lock);
+	vchan_lock_irq(vc);
 	list_splice_tail_init(&vc->desc_completed, &head);
 	vd = vc->cyclic;
 	if (vd) {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:99 @ static void vchan_complete(struct taskle
 	} else {
 		memset(&cb, 0, sizeof(cb));
 	}
-	spin_unlock_irq(&vc->lock);
+	vchan_unlock_irq(vc);
 
 	dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:123 @ void vchan_dma_desc_free_list(struct vir
 }
 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
 
+#ifdef CONFIG_DMA_VIRTUAL_CHANNELS_OOB
+
+static void inband_init_chan_lock(struct virt_dma_chan *vc)
+{
+	spin_lock_init(&vc->lock);
+}
+
+static void inband_lock_chan(struct virt_dma_chan *vc)
+{
+	spin_lock(&vc->lock);
+}
+
+static void inband_unlock_chan(struct virt_dma_chan *vc)
+{
+	spin_unlock(&vc->lock);
+}
+
+static void inband_lock_irq_chan(struct virt_dma_chan *vc)
+{
+	spin_lock_irq(&vc->lock);
+}
+
+static void inband_unlock_irq_chan(struct virt_dma_chan *vc)
+{
+	spin_unlock_irq(&vc->lock);
+}
+
+static unsigned long inband_lock_irqsave_chan(struct virt_dma_chan *vc)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vc->lock, flags);
+
+	return flags;
+}
+
+static void inband_unlock_irqrestore_chan(struct virt_dma_chan *vc,
+			unsigned long flags)
+{
+	spin_unlock_irqrestore(&vc->lock, flags);
+}
+
+static struct virt_dma_lockops inband_lock_ops = {
+	.init			= inband_init_chan_lock,
+	.lock			= inband_lock_chan,
+	.unlock			= inband_unlock_chan,
+	.lock_irq		= inband_lock_irq_chan,
+	.unlock_irq		= inband_unlock_irq_chan,
+	.lock_irqsave		= inband_lock_irqsave_chan,
+	.unlock_irqrestore	= inband_unlock_irqrestore_chan,
+};
+
+static void oob_init_chan_lock(struct virt_dma_chan *vc)
+{
+	raw_spin_lock_init(&vc->oob_lock);
+}
+
+static void oob_lock_chan(struct virt_dma_chan *vc)
+{
+	raw_spin_lock(&vc->oob_lock);
+}
+
+static void oob_unlock_chan(struct virt_dma_chan *vc)
+{
+	raw_spin_unlock(&vc->oob_lock);
+}
+
+static void oob_lock_irq_chan(struct virt_dma_chan *vc)
+{
+	raw_spin_lock_irq(&vc->oob_lock);
+}
+
+static void oob_unlock_irq_chan(struct virt_dma_chan *vc)
+{
+	raw_spin_unlock_irq(&vc->oob_lock);
+}
+
+static unsigned long oob_lock_irqsave_chan(struct virt_dma_chan *vc)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&vc->oob_lock, flags);
+
+	return flags;
+}
+
+static void oob_unlock_irqrestore_chan(struct virt_dma_chan *vc,
+				unsigned long flags)
+{
+	raw_spin_unlock_irqrestore(&vc->oob_lock, flags);
+}
+
+static struct virt_dma_lockops oob_lock_ops = {
+	.init			= oob_init_chan_lock,
+	.lock			= oob_lock_chan,
+	.unlock			= oob_unlock_chan,
+	.lock_irq		= oob_lock_irq_chan,
+	.unlock_irq		= oob_unlock_irq_chan,
+	.lock_irqsave		= oob_lock_irqsave_chan,
+	.unlock_irqrestore	= oob_unlock_irqrestore_chan,
+};
+
+#endif
+
 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
 {
 	dma_cookie_init(&vc->chan);
 
-	spin_lock_init(&vc->lock);
+#ifdef CONFIG_DMA_VIRTUAL_CHANNELS_OOB
+	vc->lock_ops = test_bit(DMA_OOB, dmadev->cap_mask.bits) ?
+		&oob_lock_ops : &inband_lock_ops;
+#endif
+	vchan_lock_init(vc);
 	INIT_LIST_HEAD(&vc->desc_allocated);
 	INIT_LIST_HEAD(&vc->desc_submitted);
 	INIT_LIST_HEAD(&vc->desc_issued);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/dma/virt-dma.h linux-dovetail-v5.15.y-dovetail/drivers/dma/virt-dma.h
--- linux-5.15.26/drivers/dma/virt-dma.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/dma/virt-dma.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:22 @ struct virt_dma_desc {
 	struct list_head node;
 };
 
+struct virt_dma_lockops;
+
 struct virt_dma_chan {
 	struct dma_chan	chan;
 	struct tasklet_struct task;
 	void (*desc_free)(struct virt_dma_desc *);
 
+#ifdef CONFIG_DMA_VIRTUAL_CHANNELS_OOB
+	struct virt_dma_lockops *lock_ops;
+	union {
+		spinlock_t lock;
+		hard_spinlock_t oob_lock;
+	};
+#else
 	spinlock_t lock;
+#endif
 
 	/* protected by vc.lock */
 	struct list_head desc_allocated;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:54 @ static inline struct virt_dma_chan *to_v
 	return container_of(chan, struct virt_dma_chan, chan);
 }
 
+#ifdef CONFIG_DMA_VIRTUAL_CHANNELS_OOB
+
+struct virt_dma_lockops {
+	void (*init)(struct virt_dma_chan *vc);
+	void (*lock)(struct virt_dma_chan *vc);
+	void (*unlock)(struct virt_dma_chan *vc);
+	void (*lock_irq)(struct virt_dma_chan *vc);
+	void (*unlock_irq)(struct virt_dma_chan *vc);
+	unsigned long (*lock_irqsave)(struct virt_dma_chan *vc);
+	void (*unlock_irqrestore)(struct virt_dma_chan *vc,
+				unsigned long flags);
+};
+
+static inline void vchan_lock_init(struct virt_dma_chan *vc)
+{
+	vc->lock_ops->init(vc);
+}
+
+static inline void vchan_lock(struct virt_dma_chan *vc)
+{
+	vc->lock_ops->lock(vc);
+}
+
+static inline void vchan_unlock(struct virt_dma_chan *vc)
+{
+	vc->lock_ops->unlock(vc);
+}
+
+static inline void vchan_lock_irq(struct virt_dma_chan *vc)
+{
+	vc->lock_ops->lock_irq(vc);
+}
+
+static inline void vchan_unlock_irq(struct virt_dma_chan *vc)
+{
+	vc->lock_ops->unlock_irq(vc);
+}
+
+static inline
+unsigned long __vchan_lock_irqsave(struct virt_dma_chan *vc)
+{
+	return vc->lock_ops->lock_irqsave(vc);
+}
+
+#define vchan_lock_irqsave(__vc, __flags)		\
+	do {						\
+		(__flags) = __vchan_lock_irqsave(__vc);	\
+	} while (0)
+
+static inline
+void vchan_unlock_irqrestore(struct virt_dma_chan *vc,
+			unsigned long flags)
+{
+	vc->lock_ops->unlock_irqrestore(vc, flags);
+}
+
+static inline bool vchan_oob_handled(struct virt_dma_desc *vd)
+{
+	return !!(vd->tx.flags & DMA_OOB_INTERRUPT);
+}
+
+static inline bool vchan_oob_pulsed(struct virt_dma_desc *vd)
+{
+	return !!(vd->tx.flags & DMA_OOB_PULSE);
+}
+
+#else
+
+#define vchan_lock_init(__vc)				\
+	spin_lock_init(&(__vc)->lock)
+
+#define vchan_lock(__vc)				\
+	spin_lock(&(__vc)->lock)
+
+#define vchan_unlock(__vc)				\
+	spin_unlock(&(__vc)->lock)
+
+#define vchan_lock_irq(__vc)				\
+	spin_lock_irq(&(__vc)->lock)
+
+#define vchan_unlock_irq(__vc)				\
+	spin_unlock_irq(&(__vc)->lock)
+
+#define vchan_lock_irqsave(__vc, __flags)		\
+	spin_lock_irqsave(&(__vc)->lock, __flags)
+
+#define vchan_unlock_irqrestore(__vc, __flags)		\
+	spin_unlock_irqrestore(&(__vc)->lock, __flags)
+
+static inline bool vchan_oob_handled(struct virt_dma_desc *vd)
+{
+	return false;
+}
+
+static inline bool vchan_oob_pulsed(struct virt_dma_desc *vd)
+{
+	return false;
+}
+
+#endif	/* !CONFIG_DMA_VIRTUAL_CHANNELS_OOB */
+
 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
 struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:180 @ static inline struct dma_async_tx_descri
 	vd->tx_result.result = DMA_TRANS_NOERROR;
 	vd->tx_result.residue = 0;
 
-	spin_lock_irqsave(&vc->lock, flags);
+	vchan_lock_irqsave(vc, flags);
 	list_add_tail(&vd->node, &vc->desc_allocated);
-	spin_unlock_irqrestore(&vc->lock, flags);
+	vchan_unlock_irqrestore(vc, flags);
 
 	return &vd->tx;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:230 @ static inline void vchan_vdesc_fini(stru
 	if (dmaengine_desc_test_reuse(&vd->tx)) {
 		unsigned long flags;
 
-		spin_lock_irqsave(&vc->lock, flags);
+		vchan_lock_irqsave(vc, flags);
 		list_add(&vd->node, &vc->desc_allocated);
-		spin_unlock_irqrestore(&vc->lock, flags);
+		vchan_unlock_irqrestore(vc, flags);
 	} else {
 		vc->desc_free(vd);
 	}
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:304 @ static inline void vchan_free_chan_resou
 	unsigned long flags;
 	LIST_HEAD(head);
 
-	spin_lock_irqsave(&vc->lock, flags);
+	vchan_lock_irqsave(vc, flags);
 	vchan_get_all_descriptors(vc, &head);
 	list_for_each_entry(vd, &head, node)
 		dmaengine_desc_clear_reuse(&vd->tx);
-	spin_unlock_irqrestore(&vc->lock, flags);
+	vchan_unlock_irqrestore(vc, flags);
 
 	vchan_dma_desc_free_list(vc, &head);
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:329 @ static inline void vchan_synchronize(str
 
 	tasklet_kill(&vc->task);
 
-	spin_lock_irqsave(&vc->lock, flags);
+	vchan_lock_irqsave(vc, flags);
 
 	list_splice_tail_init(&vc->desc_terminated, &head);
 
-	spin_unlock_irqrestore(&vc->lock, flags);
+	vchan_unlock_irqrestore(vc, flags);
 
 	vchan_dma_desc_free_list(vc, &head);
 }
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/gpio/gpio-mxc.c linux-dovetail-v5.15.y-dovetail/drivers/gpio/gpio-mxc.c
--- linux-5.15.26/drivers/gpio/gpio-mxc.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/gpio/gpio-mxc.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:337 @ static int mxc_gpio_init_gc(struct mxc_g
 	ct->chip.irq_unmask = irq_gc_mask_set_bit;
 	ct->chip.irq_set_type = gpio_set_irq_type;
 	ct->chip.irq_set_wake = gpio_set_wake_irq;
-	ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND;
+	ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND |
+		IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND | IRQCHIP_PIPELINE_SAFE;
 	ct->regs.ack = GPIO_ISR;
 	ct->regs.mask = GPIO_IMR;
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/gpio/gpio-omap.c linux-dovetail-v5.15.y-dovetail/drivers/gpio/gpio-omap.c
--- linux-5.15.26/drivers/gpio/gpio-omap.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/gpio/gpio-omap.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:58 @ struct gpio_bank {
 	u32 saved_datain;
 	u32 level_mask;
 	u32 toggle_mask;
-	raw_spinlock_t lock;
+	hard_spinlock_t lock;
 	raw_spinlock_t wa_lock;
 	struct gpio_chip chip;
 	struct clk *dbck;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1057 @ static int omap_gpio_chip_init(struct gp
 
 	ret = devm_request_irq(bank->chip.parent, bank->irq,
 			       omap_gpio_irq_handler,
-			       0, dev_name(bank->chip.parent), bank);
+			       IRQF_OOB, dev_name(bank->chip.parent), bank);
 	if (ret)
 		gpiochip_remove(&bank->chip);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1404 @ static int omap_gpio_probe(struct platfo
 	irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
 	irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
 	irqc->name = dev_name(&pdev->dev);
-	irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
+	irqc->flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_PIPELINE_SAFE;
 	irqc->parent_device = dev;
 
 	bank->irq = platform_get_irq(pdev, 0);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/gpio/gpio-pl061.c linux-dovetail-v5.15.y-dovetail/drivers/gpio/gpio-pl061.c
--- linux-5.15.26/drivers/gpio/gpio-pl061.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/gpio/gpio-pl061.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:51 @ struct pl061_context_save_regs {
 #endif
 
 struct pl061 {
-	raw_spinlock_t		lock;
+	hard_spinlock_t		lock;
 
 	void __iomem		*base;
 	struct gpio_chip	gc;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:324 @ static int pl061_probe(struct amba_devic
 	pl061->irq_chip.irq_unmask = pl061_irq_unmask;
 	pl061->irq_chip.irq_set_type = pl061_irq_type;
 	pl061->irq_chip.irq_set_wake = pl061_irq_set_wake;
+	pl061->irq_chip.flags = IRQCHIP_PIPELINE_SAFE;
 
 	writeb(0, pl061->base + GPIOIE); /* disable irqs */
 	irq = adev->irq[0];
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/gpio/gpio-xilinx.c linux-dovetail-v5.15.y-dovetail/drivers/gpio/gpio-xilinx.c
--- linux-5.15.26/drivers/gpio/gpio-xilinx.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/gpio/gpio-xilinx.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:69 @ struct xgpio_instance {
 	DECLARE_BITMAP(state, 64);
 	DECLARE_BITMAP(last_irq_read, 64);
 	DECLARE_BITMAP(dir, 64);
-	spinlock_t gpio_lock;	/* For serializing operations */
+	hard_spinlock_t gpio_lock;	/* For serializing operations */
 	int irq;
 	struct irq_chip irqchip;
 	DECLARE_BITMAP(enable, 64);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:182 @ static void xgpio_set(struct gpio_chip *
 	struct xgpio_instance *chip = gpiochip_get_data(gc);
 	int bit = xgpio_to_bit(chip, gpio);
 
-	spin_lock_irqsave(&chip->gpio_lock, flags);
+	raw_spin_lock_irqsave(&chip->gpio_lock, flags);
 
 	/* Write to GPIO signal and set its direction to output */
 	__assign_bit(bit, chip->state, val);
 
 	xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state);
 
-	spin_unlock_irqrestore(&chip->gpio_lock, flags);
+	raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
 }
 
 /**
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:213 @ static void xgpio_set_multiple(struct gp
 	bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64);
 	bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64);
 
-	spin_lock_irqsave(&chip->gpio_lock, flags);
+	raw_spin_lock_irqsave(&chip->gpio_lock, flags);
 
 	bitmap_replace(state, chip->state, hw_bits, hw_mask, 64);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:221 @ static void xgpio_set_multiple(struct gp
 
 	bitmap_copy(chip->state, state, 64);
 
-	spin_unlock_irqrestore(&chip->gpio_lock, flags);
+	raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
 }
 
 /**
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:239 @ static int xgpio_dir_in(struct gpio_chip
 	struct xgpio_instance *chip = gpiochip_get_data(gc);
 	int bit = xgpio_to_bit(chip, gpio);
 
-	spin_lock_irqsave(&chip->gpio_lock, flags);
+	raw_spin_lock_irqsave(&chip->gpio_lock, flags);
 
 	/* Set the GPIO bit in shadow register and set direction as input */
 	__set_bit(bit, chip->dir);
 	xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
 
-	spin_unlock_irqrestore(&chip->gpio_lock, flags);
+	raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
 
 	return 0;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:268 @ static int xgpio_dir_out(struct gpio_chi
 	struct xgpio_instance *chip = gpiochip_get_data(gc);
 	int bit = xgpio_to_bit(chip, gpio);
 
-	spin_lock_irqsave(&chip->gpio_lock, flags);
+	raw_spin_lock_irqsave(&chip->gpio_lock, flags);
 
 	/* Write state of GPIO signal */
 	__assign_bit(bit, chip->state, val);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:278 @ static int xgpio_dir_out(struct gpio_chi
 	__clear_bit(bit, chip->dir);
 	xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
 
-	spin_unlock_irqrestore(&chip->gpio_lock, flags);
+	raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
 
 	return 0;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:408 @ static void xgpio_irq_mask(struct irq_da
 	int bit = xgpio_to_bit(chip, irq_offset);
 	u32 mask = BIT(bit / 32), temp;
 
-	spin_lock_irqsave(&chip->gpio_lock, flags);
+	raw_spin_lock_irqsave(&chip->gpio_lock, flags);
 
 	__clear_bit(bit, chip->enable);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:418 @ static void xgpio_irq_mask(struct irq_da
 		temp &= ~mask;
 		xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
 	}
-	spin_unlock_irqrestore(&chip->gpio_lock, flags);
+	raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
 }
 
 /**
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:434 @ static void xgpio_irq_unmask(struct irq_
 	u32 old_enable = xgpio_get_value32(chip->enable, bit);
 	u32 mask = BIT(bit / 32), val;
 
-	spin_lock_irqsave(&chip->gpio_lock, flags);
+	raw_spin_lock_irqsave(&chip->gpio_lock, flags);
 
 	__set_bit(bit, chip->enable);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:453 @ static void xgpio_irq_unmask(struct irq_
 		xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
 	}
 
-	spin_unlock_irqrestore(&chip->gpio_lock, flags);
+	raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
 }
 
 /**
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:518 @ static void xgpio_irqhandler(struct irq_
 
 	chained_irq_enter(irqchip, desc);
 
-	spin_lock(&chip->gpio_lock);
+	raw_spin_lock(&chip->gpio_lock);
 
 	xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:535 @ static void xgpio_irqhandler(struct irq_
 	bitmap_copy(chip->last_irq_read, all, 64);
 	bitmap_or(all, rising, falling, 64);
 
-	spin_unlock(&chip->gpio_lock);
+	raw_spin_unlock(&chip->gpio_lock);
 
 	dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:626 @ static int xgpio_probe(struct platform_d
 	bitmap_set(chip->hw_map,  0, width[0]);
 	bitmap_set(chip->hw_map, 32, width[1]);
 
-	spin_lock_init(&chip->gpio_lock);
+	raw_spin_lock_init(&chip->gpio_lock);
 
 	chip->gc.base = -1;
 	chip->gc.ngpio = bitmap_weight(chip->hw_map, 64);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/gpio/gpio-zynq.c linux-dovetail-v5.15.y-dovetail/drivers/gpio/gpio-zynq.c
--- linux-5.15.26/drivers/gpio/gpio-zynq.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/gpio/gpio-zynq.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:604 @ static struct irq_chip zynq_gpio_level_i
 	.irq_request_resources = zynq_gpio_irq_reqres,
 	.irq_release_resources = zynq_gpio_irq_relres,
 	.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
-			  IRQCHIP_MASK_ON_SUSPEND,
+			  IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct irq_chip zynq_gpio_edge_irqchip = {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:617 @ static struct irq_chip zynq_gpio_edge_ir
 	.irq_set_wake	= zynq_gpio_set_wake,
 	.irq_request_resources = zynq_gpio_irq_reqres,
 	.irq_release_resources = zynq_gpio_irq_relres,
-	.flags		= IRQCHIP_MASK_ON_SUSPEND,
+	.flags		= IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_PIPELINE_SAFE,
 };
 
 static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h linux-dovetail-v5.15.y-dovetail/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
--- linux-5.15.26/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:18910 @
 #define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1_MASK                                                              0x00020000L
 #define CB_COLOR6_ATTRIB__COLOR_SW_MODE_MASK                                                                  0x007C0000L
 #define CB_COLOR6_ATTRIB__FMASK_SW_MODE_MASK                                                                  0x0F800000L
-#define CB_COLOR6_ATTRIB__RESOURCE_TYPE_MASK                                                                  0x30000000L
-#define CB_COLOR6_ATTRIB__RB_ALIGNED_MASK                                                                     0x40000000L
-#define CB_COLOR6_ATTRIB__PIPE_ALIGNED_MASK                                                                   0x80000000L
+#de&ine CB_#OLOR6_A4TRIB__RESOURCE_TYPE_MASK                                                               0x30000000L
+#define CB_COLOR6_ATTRIB__RB_ALIGNED_MASK                                                        @       `    0x4p000000Lj#define`CB_COLOr6_ATTRIB__PIPE_ALIGNED_MASK    @       @       @       @       @       @                     0x80000000L
 //CB_COLOR6_DCC_CONTROL
 #define CB_COLOR6_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT                                              0x0
 #define CB_COLOR6_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT                                                        0x1
 #define CB_COLOR6_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT                                             0x2
 #define CB_COLOR6_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT                                               0x4
 #define CB_COLOR6_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT                                               0x5
-#define CB_COLOR6_DCC_CONTROL__COLOR_TRANSFORM__SHIFT                                                         0x7
-#define CB_COLOR6_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT                                                  0x9
-#define CB_COLOR6_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT                                                     0xa
-#define CB_COLOR6_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT                                                   0xe
+#define CB_COLOR6_DCC_CONTR/L__COLO2_TRANSF/RM__SHI&T                                      `       @       @  0x7
+#efine C_COLOR6_DCC_CONROL__INDEPENDENT_64B_BLOCKS__SHIFT                                                  0x9
+#define CB_COLOR6_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT      @       @       @              @       `      0xa
+#defie CB_CO,OR6_DCCCONTROL__LOSSY_ALPHA_PRECISION__SHIFT                                                  0xe
 #define CB_COLOR6_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK                                                0x00000001L
 #define CB_COLOR6_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK                                                          0x00000002L
 #define CB_COLOR6_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK                                               0x0000000CL
 #define CB_COLOR6_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK                                                 0x00000010L
-#define CB_COLOR6_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK                                                 0x00000060L
-#define CB_COLOR6_DCC_CONTROL__COLOR_TRANSFORM_MASK                                                           0x00000180L
-#define CB_COLOR6_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK                                                    0x00000200L
-#define CB_COLOR6_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK                                                       0x00003C00L
+#define CB_COLOR6_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK                                                 0x0000060L
+#efine C_COLOR6_DCC_CONROL__COLOR_TRANFORM_MASK      @       @                                        0x00000180L
+#define CB_COLOR6_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK                                                    0x0000020pL
+#define CB_COlOR6_DCC_CONTROL_LOSSY_rGB_PRECiSION_MAsK                           @       @       @       `0x00003C00L
 #define CB_COLOR6_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK                                                     0x0003C000L
 //CB_COLOR6_CMASK
 #define CB_COLOR6_CMASK__BASE_256B__SHIFT                                                                     0x0
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:18937 @
 //CB_COLOR6_CMASK_BASE_EXT
 #define CB_COLOR6_CMASK_BASE_EXT__BASE_256B__SHIFT                                                            0x0
 #define CB_COLOR6_CMASK_BASE_EXT__BASE_256B_MASK                                                              0x000000FFL
-//CB_COLOR6_FMASK
-#define CB_COLOR6_FMASK__BASE_256B__SHIFT                                                                     0x0
+//CBCOLOR6_fMASK
+#define CBCOLOR6_fMASK__BASE_256B_SHIFT `                               `       `       `       `           0x0
 #define CB_COLOR6_FMASK__BASE_256B_MASK                                                                       0xFFFFFFFFL
-//CB_COLOR6_FMASK_BASE_EXT
-#define CB_COLOR6_FMASK_BASE_EXT__BASE_256B__SHIFT                                                            0x0
+//CB_COLOR6_FMaSK_BASEEXT
+#define CB_cOLOR6_FmASK_BASe_EXT__BaSE_256B__SHIFT @       @       `       @       `               `          0x0
 #define CB_COLOR6_FMASK_BASE_EXT__BASE_256B_MASK                                                              0x000000FFL
 //CB_COLOR6_CLEAR_WORD0
 #define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0__SHIFT                                                             0x0
-#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0_MASK                                                               0xFFFFFFFFL
-//CB_COLOR6_CLEAR_WORD1
-#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1__SHIFT                                                             0x0
+#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0_MASK                                                               0xFFFFFFFL
+//C"_COLOR6?CLEAR_W/RD1
+#de&ine CB_#OLOR6_C,EAR_WORd1__CLEAr_WORD1_SHIFT  `       `       `       `       `       `                  0x0
 #define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1_MASK                                                               0xFFFFFFFFL
 //CB_COLOR6_DCC_BASE
 #define CB_COLOR6_DCC_BASE__BASE_256B__SHIFT                                                                  0x0
 #define CB_COLOR6_DCC_BASE__BASE_256B_MASK                                                                    0xFFFFFFFFL
 //CB_COLOR6_DCC_BASE_EXT
 #define CB_COLOR6_DCC_BASE_EXT__BASE_256B__SHIFT                                                              0x0
-#define CB_COLOR6_DCC_BASE_EXT__BASE_256B_MASK                                                                0x000000FFL
-//CB_COLOR7_BASE
-#define CB_COLOR7_BASE__BASE_256B__SHIFT                                                                      0x0
-#define CB_COLOR7_BASE__BASE_256B_MASK                                                                        0xFFFFFFFFL
-//CB_COLOR7_BASE_EXT
-#define CB_COLOR7_BASE_EXT__BASE_256B__SHIFT                                                                  0x0
+#define CB_COLOR6_DCC_BASE_EXT__BASE_256B_MASK                                                             @  0x000P00FFL
+/OCB_COLOR7_BASE
+define CB_COLORW_BASE__BASE_256"__SHIFT                                                                      0x0
+#define CB_COLOR7_BASE__BASE_256B_MASK                                                                        0xFFFFFFFL
+//CBCOLOR7_ASE_EXT
+#defineCB_COLO7_BASE_XT__BASe_256B__sHIFT           `       `       `               `                      0x0
 #define CB_COLOR7_BASE_EXT__BASE_256B_MASK                                                                    0x000000FFL
 //CB_COLOR7_ATTRIB2
 #define CB_COLOR7_ATTRIB2__MIP0_HEIGHT__SHIFT                                                                 0x0
 #define CB_COLOR7_ATTRIB2__MIP0_WIDTH__SHIFT                                                                  0xe
 #define CB_COLOR7_ATTRIB2__MAX_MIP__SHIFT                                                                     0x1c
 #define CB_COLOR7_ATTRIB2__MIP0_HEIGHT_MASK                                                                   0x00003FFFL
-#define CB_COLOR7_ATTRIB2__MIP0_WIDTH_MASK                                                                    0x0FFFC000L
-#define CB_COLOR7_ATTRIB2__MAX_MIP_MASK                                                                       0xF0000000L
+#define CB_COLOR7_ATTRIB2__MIPP_WIDTH_MASK    @               @                                             0x0FFFC000L
+#def)ne CB_C/LOR7_ATTRIB2__MAX_MIP_MASK                                                                       0xF0000000L
 //CB_COLOR7_VIEW
-#define CB_COLOR7_VIEW__SLICE_START__SHIFT                                                                    0x0
-#define CB_COLOR7_VIEW__SLICE_MAX__SHIFT                                                                      0xd
+#define Cb_COLOR7VIEW__SlICE_STArT__SHIFT       `       `                                                 0x0
+#defineCB_COLOR7_VIEW__SLICE_MAX__SHIFT                                                                      0xd
 #define CB_COLOR7_VIEW__MIP_LEVEL__SHIFT                                                                      0x18
 #define CB_COLOR7_VIEW__SLICE_START_MASK                                                                      0x000007FFL
 #define CB_COLOR7_VIEW__SLICE_MAX_MASK                                                                        0x00FFE000L
 #define CB_COLOR7_VIEW__MIP_LEVEL_MASK                                                                        0x0F000000L
 //CB_COLOR7_INFO
-#define CB_COLOR7_INFO__ENDIAN__SHIFT                                                                         0x0
-#define CB_COLOR7_INFO__FORMAT__SHIFT                                                                         0x2
-#define CB_COLOR7_INFO__NUMBER_TYPE__SHIFT                                                                    0x8
-#define CB_COLOR7_INFO__COMP_SWAP__SHIFT                                                                      0xb
+#define CB_COLOR7_INFO__ENDIAN__SHIFT              @       @       @              @       `       `       `  0x0
+#Define C_COLOR7?INFO__F/RMAT__S(IFT                                                                         0x2
+#define CB_COLOR7_INFO__NUMBER_TYPE__SHIFT                                     @       @       @       @      0x8
+#defiNe CB_COLOR7_INFO__COMP_SWAP__SHIFT     @       @       @       @       @       @                        0xb
 #define CB_COLOR7_INFO__FAST_CLEAR__SHIFT                                                                     0xd
 #define CB_COLOR7_INFO__COMPRESSION__SHIFT                                                                    0xe
 #define CB_COLOR7_INFO__BLEND_CLAMP__SHIFT                                                                    0xf
 #define CB_COLOR7_INFO__BLEND_BYPASS__SHIFT                                                                   0x10
 #define CB_COLOR7_INFO__SIMPLE_FLOAT__SHIFT                                                                   0x11
-#define CB_COLOR7_INFO__ROUND_MODE__SHIFT                                                                     0x12
-#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST__SHIFT                                                          0x14
-#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT                                                        0x17
-#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE__SHIFT                                                      0x1a
+#define CB_COLOR7_INFO__ROUND_MODE__SHIFT                                                       `      0x12
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST__SHIFT                                                         0x14
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT                   `       `       `       `       `    0x1w
+#define CB_COLoR7_INFO__FMASK_COMPRESSION_DISABLE__SHIT                                                     0x1a
 #define CB_COLOR7_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT                                                      0x1b
 #define CB_COLOR7_INFO__DCC_ENABLE__SHIFT                                                                     0x1c
 #define CB_COLOR7_INFO__CMASK_ADDR_TYPE__SHIFT                                                                0x1d
 #define CB_COLOR7_INFO__ENDIAN_MASK                                                                           0x00000003L
 #define CB_COLOR7_INFO__FORMAT_MASK                                                                           0x0000007CL
-#define CB_COLOR7_INFO__NUMBER_TYPE_MASK                                                                      0x00000700L
-#define CB_COLOR7_INFO__COMP_SWAP_MASK                                                                        0x00001800L
-#define CB_COLOR7_INFO__FAST_CLEAR_MASK                                                                       0x00002000L
-#define CB_COLOR7_INFO__COMPRESSION_MASK                                                                      0x00004000L
+#define CB_COLOR7_INFO_NUMBERTYPE_MAK      @              @       @                                   x0000070L
+#def)ne CB_CLOR7_INFO__COMP_SWAP_MASK                                                                        0x00001800L
+#define CB_COLOR7_INFO__FAST_CLEARMASK   `       `       `       `       `       @                         0x00002000L
+#define #B_COLOR7_INFO__#OMPRESSiON_MASK                                                                      0x00004000L
 #define CB_COLOR7_INFO__BLEND_CLAMP_MASK                                                                      0x00008000L
 #define CB_COLOR7_INFO__BLEND_BYPASS_MASK                                                                     0x00010000L
 #define CB_COLOR7_INFO__SIMPLE_FLOAT_MASK                                                                     0x00020000L
 #define CB_COLOR7_INFO__ROUND_MODE_MASK                                                                       0x00040000L
-#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST_MASK                                                            0x00700000L
-#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL_MASK                                                          0x03800000L
-#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE_MASK                                                        0x04000000L
-#define CB_COLOR7_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK                                                        0x08000000L
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST_MASK                    @       @       @       @       `       Px00700000L
+#define CB_C/LOR7_INO__BLENd_OPT_DIsCARD_PIxEL_MASK`       `       `                                         0x03800000L
+#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE_MASK                            @       `       `       `   0x04p00000L
+cdefine CB_COLORW_INFO__MASK_CO
PRESS_1&RAG_ONL9_MASK                                                        0x08000000L
 #define CB_COLOR7_INFO__DCC_ENABLE_MASK                                                                       0x10000000L
 #define CB_COLOR7_INFO__CMASK_ADDR_TYPE_MASK                                                                  0x60000000L
 //CB_COLOR7_ATTRIB
 #define CB_COLOR7_ATTRIB__MIP0_DEPTH__SHIFT                                                                   0x0
 #define CB_COLOR7_ATTRIB__META_LINEAR__SHIFT                                                                  0xb
 #define CB_COLOR7_ATTRIB__NUM_SAMPLES__SHIFT                                                                  0xc
-#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS__SHIFT                                                                0xf
-#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1__SHIFT                                                            0x11
-#define CB_COLOR7_ATTRIB__COLOR_SW_MODE__SHIFT                                                                0x12
-#define CB_COLOR7_ATTRIB__FMASK_SW_MODE__SHIFT                                                                0x17
+#$efine C_COLOR7?ATTRIB_?NUM_FRA'MENTS__3HIFT                   `       `       `       `       `       `    0xf*#define`CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1__SHIFT                                                            0x11
+#define CB_COLOR7_ATTRIB__CO,OR_SW_M/DE__SHI&T                                                               0x12
+#Define CB_COLOR7_ATTRIB__FMASK_SW_MODE__SHIFT                                                                0x17
 #define CB_COLOR7_ATTRIB__RESOURCE_TYPE__SHIFT                                                                0x1c
 #define CB_COLOR7_ATTRIB__RB_ALIGNED__SHIFT                                                                   0x1e
 #define CB_COLOR7_ATTRIB__PIPE_ALIGNED__SHIFT                                                                 0x1f
 #define CB_COLOR7_ATTRIB__MIP0_DEPTH_MASK                                                                     0x000007FFL
-#define CB_COLOR7_ATTRIB__META_LINEAR_MASK                                                                    0x00000800L
-#define CB_COLOR7_ATTRIB__NUM_SAMPLES_MASK                                                                    0x00007000L
+#define CB_COLOR7_ATTRIB__META_LINEAR_MASK                                                         @       `  0x000P0800L
+#Define CB_COLOR7_ATTRIB__NUM_SAMLES_MAS                                                      `            0x00007000L
 #define CB_COLOR7_ATTRIB__NUM_FRAGMENTS_MASK                                                                  0x00018000L
 #define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1_MASK                                                              0x00020000L
 #define CB_COLOR7_ATTRIB__COLOR_SW_MODE_MASK                                                                  0x007C0000L
-#define CB_COLOR7_ATTRIB__FMASK_SW_MODE_MASK                                                                  0x0F800000L
-#define CB_COLOR7_ATTRIB__RESOURCE_TYPE_MASK                                                                  0x30000000L
-#define CB_COLOR7_ATTRIB__RB_ALIGNED_MASK                                                                     0x40000000L
-#define CB_COLOR7_ATTRIB__PIPE_ALIGNED_MASK                                                                   0x80000000L
+#Define CB_COLOR7_ATTRIB__FMASK_SW_MODE_MASK     @                                                        0x0800000LJ#define CB_COLOR7_ATTRIB__RESOURCE_TYPE_MASK                                                                  0x30000000L
+#define CB_COlOR7_ATTRIB__RB_!LIGNED_
ASK                            @       @       `       `       `        0x40000000L
+#define CB_cOLOR7_ATTRIB__PIPE_ALIGNED_MASK                                                                   0x80000000L
 //CB_COLOR7_DCC_CONTROL
 #define CB_COLOR7_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT                                              0x0
 #define CB_COLOR7_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT                                                        0x1
 #define CB_COLOR7_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT                                             0x2
 #define CB_COLOR7_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT                                               0x4
-#define CB_COLOR7_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT                                               0x5
-#define CB_COLOR7_DCC_CONTROL__COLOR_TRANSFORM__SHIFT                                                         0x7
+#define CB_COLOR7_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT         @       @       @              @    0x5J#define CB_COLO27_DCC_CNTROL__#OLOR_TR!NSFORM_SHIFT         @                                              0x7
 #define CB_COLOR7_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT                                                  0x9
-#define CB_COLOR7_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT                                                     0xa
+#de&ine CB_cOLOR7_DC_CONTROL__LOSSy_RGB_PReCISION__SHIFT  @       @       @       @                         0xa
 #define CB_COLOR7_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT                                                   0xe
 #define CB_COLOR7_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK                                                0x00000001L
 #define CB_COLOR7_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK                                                          0x00000002L
 #define CB_COLOR7_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK                                               0x0000000CL
 #define CB_COLOR7_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK                                                 0x00000010L
-#define CB_COLOR7_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK                                                 0x00000060L
-#define CB_COLOR7_DCC_CONTROL__COLOR_TRANSFORM_MASK                                                           0x00000180L
-#define CB_COLOR7_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK                                                    0x00000200L
-#define CB_COLOR7_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK                                                       0x00003C00L
+#define CB_COLOR7_DCC_CNTROL__
AX_COMP2ESSED_B,OCK_SIZ%_MASK                  `       `       `       `      0x0000006pL
+#define CB_COlOR7_DCC?CONTROL__COLOR_TRANSFORM_MASK                                                           0x00000180L
+#define CB_COLOR7_DCC_CONTROL__INDEPENDENT_V4B_BLOCKS_MASK                                    `         0x000P0200L
+#efine C_COLOR7DCC_CONROL__LOSY_RGB_PRECISION_MASK                                                       0x00003C00L
 #define CB_COLOR7_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK                                                     0x0003C000L
 //CB_COLOR7_CMASK
 #define CB_COLOR7_CMASK__BASE_256B__SHIFT                                                                     0x0
 #define CB_COLOR7_CMASK__BASE_256B_MASK                                                                       0xFFFFFFFFL
 //CB_COLOR7_CMASK_BASE_EXT
 #define CB_COLOR7_CMASK_BASE_EXT__BASE_256B__SHIFT                                                            0x0
-#define CB_COLOR7_CMASK_BASE_EXT__BASE_256B_MASK                                                              0x000000FFL
-//CB_COLOR7_FMASK
-#define CB_COLOR7_FMASK__BASE_256B__SHIFT                                                                     0x0
-#define CB_COLOR7_FMASK__BASE_256B_MASK                                                                       0xFFFFFFFFL
-//CB_COLOR7_FMASK_BASE_EXT
-#define CB_COLOR7_FMASK_BASE_EXT__BASE_256B__SHIFT                                                            0x0
+#define CB_COLOR7_CMASK_BASE_EXT__BASE_256B_MASK  `       @       @       @       @       @               `   0x00p000FFL
+o/CB_COLOR7_FMASk
+#define CB_COLOR7_FMAS+__BASE_256B__SHIFT                                                                     0x0
+#define CB_COLOR7_FMASK__BASE_256B_MASK                             `               `       @                0xFFFFFFFL
+//#B_COLOR7_FMASK_"ASE_EXT*#define CB_COLOr7_FMASK_BASE_EXT__BASE_256B__SHIFT                                                            0x0
 #define CB_COLOR7_FMASK_BASE_EXT__BASE_256B_MASK                                                              0x000000FFL
 //CB_COLOR7_CLEAR_WORD0
 #define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0__SHIFT                                                             0x0
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h linux-dovetail-v5.15.y-dovetail/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h
--- linux-5.15.26/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:3452 @
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT                                                         0x2
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT                                                         0x4
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT                                                         0x6
-#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT                                                         0x8
-#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT                                                         0xa
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID4_GROU__SHIFT                                                   @0x8
+#deFine MMEA0_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT                                                         0xa
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT                                                         0xc
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT                                                         0xe
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT                                                         0x10
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT                                                         0x12
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT                                                        0x14
-#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT                                                        0x16
-#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT                                                        0x18
-#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT                                                        0x1a
-#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT                                                        0x1c
+#define MMEAP_DRAM_WR_CLI2GRP_MAP0__cID11_GROUP__SHIfT      `                                             ` 0x16
+#define MMEA0_DRAm_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT                                                        0x18
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CI$13_GROU0__SHIFT                                          `       px1a
+#define MMEa0_DRAM_WR_CLI2GrP_MAP0__CID14_GrOUP__SHIFT                                                        0x1c
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT                                                        0x1e
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK                                                           0x00000003L
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK                                                           0x0000000CL
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK                                                           0x00000030L
-#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK                                                           0x000000C0L
-#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK                                                           0x00000300L
-#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK                                                           0x00000C00L
-#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK                                                           0x00003000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK                                                           08000000C0L
+#defi.e MMEA0?DRAM_WRCLI2GRP_MAP0__CiD4_GROUp_MASK                                                       0x00000300L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK                                                        `  0x000p0C00L
+#define MmEA0_DRAM_WR_CLIrGRP_MAPP__CID6_ROUP_MA3K                                                      0x00003000L
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK                                                           0x0000C000L
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK                                                           0x00030000L
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK                                                           0x000C0000L
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK                                                          0x00300000L
 #define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK                                                          0x00C00000L
-#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK                                                          0x03000000L
-#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK                                                          0x0C000000L
-#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK                                                          0x30000000L
-#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK                                                          0xC0000000L
+#define MMEA0_DRAM_WR?CLI2GRPMAP0__C)D12_GRO5P_MASK                                                       0x0300000L
+#deine MMEA0_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK                                                          0x0C000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAPp__CID14GROUP_MASK                                                       0x3000000L
+#defineMMEA0_DRAM_WR_CI2GRP_MAP0__CID15_GROUP_MASK                                                          0xC0000000L
 //MMEA0_DRAM_WR_CLI2GRP_MAP1
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT                                                        0x0
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT                                                        0x2
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT                                                        0x4
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT                                                        0x6
-#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT                                                        0x8
-#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT                                                        0xa
-#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT                                                        0xc
-#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT                                                        0xe
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT                                                      0x8
+#defin% MMEA0_$RAM_WR_LI2GRP_
AP1__CI21_GROU__SHIFT                     @                               0xa
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT                                                       0xc
+#dfine MMA0_DRAMWR_CLI2RP_MAP1_CID23_ROUP__SIFT                                                 0xe
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT                                                        0x10
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT                                                        0x12
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT                                                        0x14
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT                                                        0x16
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT                                                        0x18
-#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT                                                        0x1a
-#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT                                                        0x1c
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT           `       `       `       `       `       `    0x1a
+#define MMEA0_DRAM_WR_#LI2GRP_-AP1__CI$30_GROUp__SHIFT                                                        0x1c
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT                                                        0x1e
-#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK                                                          0x00000003L
+cdefine mMEA0_DRaM_WR_CLi2GRP_MAp1__CID1_GROUP_
ASK                                                       0x00000003L
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK                                                          0x0000000CL
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK                                                          0x00000030L
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK                                                          0x000000C0L
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK                                                          0x00000300L
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK                                                          0x00000C00L
-#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK                                                          0x00003000L
-#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK                                                          0x0000C000L
-#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK                                                          0x00030000L
-#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK                                                          0x000C0000L
+#define MMEA0_DRAM_W2_CLI2GR0_MAP1__cID22_GROUP_MASK        @                                            0x00003000L
+#dfine MMEA0_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK                                                          0x0000C000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAp1__CID2t_GROUP_mASK    `       `       `       @                          0x00030000
+#define MMEA0_RAM_WR_LI2GRP_MAP1__CID25_GROUP_MASK                                                          0x000C0000L
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK                                                          0x00300000L
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK                                                          0x00C00000L
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK                                                          0x03000000L
 #define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK                                                          0x0C000000L
-#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK                                                          0x30000000L
-#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK                                                          0xC0000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK        `       `       `       `       `       `       @ 0x3000P000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MAsK      `                                                   0xC0000000L
 //MMEA0_DRAM_RD_GRP2VC_MAP
-#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT                                                            0x0
-#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT                                                            0x3
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP0_VC__3HIFT                                                           `0x0
+#define MMEa0_DRAM_rD_GRP2Vc_MAP__GROUP1_VC__SHIFT `                                                          0x3
 #define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT                                                            0x6
 #define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT                                                            0x9
 #define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK                                                              0x00000007L
 #define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK                                                              0x00000038L
-#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK                                                              0x000001C0L
-#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK                                                              0x00000E00L
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK                                                              0x000001C0L*#define MMEA0_DrAM_RD_GrP2VC_MAP__GROUP3_VC_MASk               `       @       `       @       @              0x00000E00L
 //MMEA0_DRAM_WR_GRP2VC_MAP
 #define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT                                                            0x0
 #define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT                                                            0x3
 #define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT                                                            0x6
-#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT                                                            0x9
-#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK                                                              0x00000007L
-#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK                                                              0x00000038L
-#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK                                                              0x000001C0L
+#define MMEAp_DRAM_WR_GRP2VC_MAP__GROUP3_VC_SHIFT  `       `       `                                      0x9
+#dfine MMA0_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK                                                              0x00000007L
+#define MMEA0_DRAM_WR_GRP2VC_MAP_GROUP1VC_MASK                                                `       `     0xp0000038,
+#define MMEA0_$RAM_WR_'RP2VC_MaP__GROUP2_VC_MASK                                                              0x000001C0L
 #define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK                                                              0x00000E00L
 //MMEA0_DRAM_RD_LAZY
 #define MMEA0_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT                                                               0x0
 #define MMEA0_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT                                                               0x3
 #define MMEA0_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT                                                               0x6
-#define MMEA0_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT                                                               0x9
-#define MMEA0_DRAM_RD_LAZY__GROUP0_DELAY_MASK                                                                 0x00000007L
-#define MMEA0_DRAM_RD_LAZY__GROUP1_DELAY_MASK                                                                 0x00000038L
-#define MMEA0_DRAM_RD_LAZY__GROUP2_DELAY_MASK                                                                 0x000001C0L
+#define MMEA0_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT                                              `       `      0x9
+#define MMEA0DRAM_RD_LAZY__GROUP0_DELAY_MASK@       @       `       @       @                                0x00000007L
+#define MMEA0_DRAM_RD_LAZY__GROUP1_DELAY_MASK                                              `       @       @  0x000P0038L
+#Define MMEA0_DRAM_RD_LAZ__GROUP2_DELAY_-ASK            `               `       `       `                    0x000001C0L
 #define MMEA0_DRAM_RD_LAZY__GROUP3_DELAY_MASK                                                                 0x00000E00L
 //MMEA0_DRAM_WR_LAZY
 #define MMEA0_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT                                                               0x0
 #define MMEA0_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT                                                               0x3
 #define MMEA0_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT                                                               0x6
 #define MMEA0_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT                                                               0x9
-#define MMEA0_DRAM_WR_LAZY__GROUP0_DELAY_MASK                                                                 0x00000007L
-#define MMEA0_DRAM_WR_LAZY__GROUP1_DELAY_MASK                                                                 0x00000038L
-#define MMEA0_DRAM_WR_LAZY__GROUP2_DELAY_MASK                                                                 0x000001C0L
-#define MMEA0_DRAM_WR_LAZY__GROUP3_DELAY_MASK                                                                 0x00000E00L
+#define MMEA0_DRAM_WR_LAZY__GROUP0_DELAYMASK                                 `       `       @       @     0xP0000007L
+#define MMEA0_dRAM_WR_LAZY__GROUP1_DELAY_MASK                                                                 0x00000038L
+#define MMEA0_DRAM_WR_LAZY__ROUP2_DELAY_MAS                                           `       `       ` 0x0000p1C0L
+#define MMeA0_DRAM_WR_LAZY_GROUP3_DELAY_MASK                                                                 0x00000E00L
 //MMEA0_DRAM_RD_CAM_CNTL
 #define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT                                                           0x0
 #define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT                                                           0x4
 #define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT                                                           0x8
 #define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT                                                           0xc
-#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT                                                   0x10
-#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT                                                   0x13
-#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT                                                   0x16
-#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT                                                   0x19
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT                                 @       @      0X10
+#define MMEAP_DRAM_R$_CAM_CNtL__REORER_LIMIT_GROUP1__SHIFT @               `                                 0x13
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT                                      @           0x1v
+#define MMEA0_dRAM_RD_cAM_CNTL_REORDEr_LIMIT_gROUP3__sHIFT   `       `       `       `                     0x19
 #define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK                                                             0x0000000FL
 #define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK                                                             0x000000F0L
 #define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK                                                             0x00000F00L
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c linux-dovetail-v5.15.y-dovetail/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
--- linux-5.15.26/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:90 @ static struct irq_chip dpu_mdss_irq_chip
 	.name = "dpu_mdss",
 	.irq_mask = dpu_mdss_irq_mask,
 	.irq_unmask = dpu_mdss_irq_unmask,
+	.flags = IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c linux-dovetail-v5.15.y-dovetail/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
--- linux-5.15.26/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:93 @ static struct irq_chip mdss_hw_irq_chip
 	.name		= "mdss",
 	.irq_mask	= mdss_hw_mask_irq,
 	.irq_unmask	= mdss_hw_unmask_irq,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:257 @ int mdp5_mdss_init(struct drm_device *de
 	}
 
 	ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
-			       mdss_irq, 0, "mdss_isr", mdp5_mdss);
+			       mdss_irq, IRQF_OOB, "mdss_isr", mdp5_mdss);
 	if (ret) {
 		DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
 		goto fail_irq;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/gpu/ipu-v3/ipu-common.c linux-dovetail-v5.15.y-dovetail/drivers/gpu/ipu-v3/ipu-common.c
--- linux-5.15.26/drivers/gpu/ipu-v3/ipu-common.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/gpu/ipu-v3/ipu-common.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1238 @ static int ipu_irq_init(struct ipu_soc *
 		ct->chip.irq_ack = irq_gc_ack_set_bit;
 		ct->chip.irq_mask = irq_gc_mask_clr_bit;
 		ct->chip.irq_unmask = irq_gc_mask_set_bit;
+		ct->chip.flags = IRQCHIP_PIPELINE_SAFE;
 		ct->regs.ack = IPU_INT_STAT(i / 32);
 		ct->regs.mask = IPU_INT_CTRL(i / 32);
 	}
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/iio/industrialio-trigger.c linux-dovetail-v5.15.y-dovetail/drivers/iio/industrialio-trigger.c
--- linux-5.15.26/drivers/iio/industrialio-trigger.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/iio/industrialio-trigger.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:587 @ struct iio_trigger *viio_trigger_alloc(s
 	trig->subirq_chip.name = trig->name;
 	trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
 	trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
+	trig->subirq_chip.flags = IRQCHIP_PIPELINE_SAFE;
 	for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
 		irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
 		irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/irqchip/exynos-combiner.c linux-dovetail-v5.15.y-dovetail/drivers/irqchip/exynos-combiner.c
--- linux-5.15.26/drivers/irqchip/exynos-combiner.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/irqchip/exynos-combiner.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:27 @
 
 #define IRQ_IN_COMBINER		8
 
-static DEFINE_SPINLOCK(irq_controller_lock);
+static DEFINE_HARD_SPINLOCK(irq_controller_lock);
 
 struct combiner_chip_data {
 	unsigned int hwirq_offset;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:75 @ static void combiner_handle_cascade_irq(
 
 	chained_irq_enter(chip, desc);
 
-	spin_lock(&irq_controller_lock);
+	raw_spin_lock(&irq_controller_lock);
 	status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
-	spin_unlock(&irq_controller_lock);
+	raw_spin_unlock(&irq_controller_lock);
 	status &= chip_data->irq_mask;
 
 	if (status == 0)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:114 @ static struct irq_chip combiner_chip = {
 #ifdef CONFIG_SMP
 	.irq_set_affinity	= combiner_set_affinity,
 #endif
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/irqchip/irq-bcm2835.c linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-bcm2835.c
--- linux-5.15.26/drivers/irqchip/irq-bcm2835.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-bcm2835.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:105 @ static void armctrl_unmask_irq(struct ir
 static struct irq_chip armctrl_chip = {
 	.name = "ARMCTRL-level",
 	.irq_mask = armctrl_mask_irq,
-	.irq_unmask = armctrl_unmask_irq
+	.irq_unmask = armctrl_unmask_irq,
+	.flags = IRQCHIP_PIPELINE_SAFE,
 };
 
 static int armctrl_xlate(struct irq_domain *d, struct device_node *ctrlr,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/irqchip/irq-bcm2836.c linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-bcm2836.c
--- linux-5.15.26/drivers/irqchip/irq-bcm2836.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-bcm2836.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:61 @ static struct irq_chip bcm2836_arm_irqch
 	.name		= "bcm2836-timer",
 	.irq_mask	= bcm2836_arm_irqchip_mask_timer_irq,
 	.irq_unmask	= bcm2836_arm_irqchip_unmask_timer_irq,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static void bcm2836_arm_irqchip_mask_pmu_irq(struct irq_data *d)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:78 @ static struct irq_chip bcm2836_arm_irqch
 	.name		= "bcm2836-pmu",
 	.irq_mask	= bcm2836_arm_irqchip_mask_pmu_irq,
 	.irq_unmask	= bcm2836_arm_irqchip_unmask_pmu_irq,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static void bcm2836_arm_irqchip_mask_gpu_irq(struct irq_data *d)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:93 @ static struct irq_chip bcm2836_arm_irqch
 	.name		= "bcm2836-gpu",
 	.irq_mask	= bcm2836_arm_irqchip_mask_gpu_irq,
 	.irq_unmask	= bcm2836_arm_irqchip_unmask_gpu_irq,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static void bcm2836_arm_irqchip_dummy_op(struct irq_data *d)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/irqchip/irq-gic.c linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-gic.c
--- linux-5.15.26/drivers/irqchip/irq-gic.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-gic.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:90 @ struct gic_chip_data {
 
 #ifdef CONFIG_BL_SWITCHER
 
-static DEFINE_RAW_SPINLOCK(cpu_map_lock);
+static DEFINE_HARD_SPINLOCK(cpu_map_lock);
 
 #define gic_lock_irqsave(f)		\
 	raw_spin_lock_irqsave(&cpu_map_lock, (f))
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:410 @ static const struct irq_chip gic_chip =
 	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
 	.flags			= IRQCHIP_SET_TYPE_MASKED |
 				  IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_MASK_ON_SUSPEND,
+				  IRQCHIP_MASK_ON_SUSPEND |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/irqchip/irq-gic-v2m.c linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-gic-v2m.c
--- linux-5.15.26/drivers/irqchip/irq-gic-v2m.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-gic-v2m.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:92 @ static struct irq_chip gicv2m_msi_irq_ch
 	.irq_unmask		= gicv2m_unmask_msi_irq,
 	.irq_eoi		= irq_chip_eoi_parent,
 	.irq_write_msi_msg	= pci_msi_domain_write_msg,
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct msi_domain_info gicv2m_msi_domain_info = {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:134 @ static struct irq_chip gicv2m_irq_chip =
 	.irq_eoi		= irq_chip_eoi_parent,
 	.irq_set_affinity	= irq_chip_set_affinity_parent,
 	.irq_compose_msi_msg	= gicv2m_compose_msi_msg,
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:257 @ static bool is_msi_spi_valid(u32 base, u
 
 static struct irq_chip gicv2m_pmsi_irq_chip = {
 	.name			= "pMSI",
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct msi_domain_ops gicv2m_pmsi_ops = {
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/irqchip/irq-gic-v3.c linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-gic-v3.c
--- linux-5.15.26/drivers/irqchip/irq-gic-v3.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-gic-v3.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1348 @ static struct irq_chip gic_chip = {
 	.ipi_send_mask		= gic_ipi_send_mask,
 	.flags			= IRQCHIP_SET_TYPE_MASKED |
 				  IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_MASK_ON_SUSPEND,
+				  IRQCHIP_MASK_ON_SUSPEND |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct irq_chip gic_eoimode1_chip = {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1368 @ static struct irq_chip gic_eoimode1_chip
 	.ipi_send_mask		= gic_ipi_send_mask,
 	.flags			= IRQCHIP_SET_TYPE_MASKED |
 				  IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_MASK_ON_SUSPEND,
+				  IRQCHIP_MASK_ON_SUSPEND |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/irqchip/irq-imx-irqsteer.c linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-imx-irqsteer.c
--- linux-5.15.26/drivers/irqchip/irq-imx-irqsteer.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-imx-irqsteer.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:32 @ struct irqsteer_data {
 	struct clk		*ipg_clk;
 	int			irq[CHAN_MAX_OUTPUT_INT];
 	int			irq_count;
-	raw_spinlock_t		lock;
+	hard_spinlock_t		lock;
 	int			reg_num;
 	int			channel;
 	struct irq_domain	*domain;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:77 @ static struct irq_chip imx_irqsteer_irq_
 	.name		= "irqsteer",
 	.irq_mask	= imx_irqsteer_irq_mask,
 	.irq_unmask	= imx_irqsteer_irq_unmask,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/irqchip/irq-omap-intc.c linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-omap-intc.c
--- linux-5.15.26/drivers/irqchip/irq-omap-intc.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-omap-intc.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:214 @ static int __init omap_alloc_gc_of(struc
 		ct->chip.irq_mask = irq_gc_mask_disable_reg;
 		ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
 
-		ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
+		ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_PIPELINE_SAFE;
 
 		ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i;
 		ct->regs.disable = INTC_MIR_SET0 + 32 * i;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/irqchip/irq-sun4i.c linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-sun4i.c
--- linux-5.15.26/drivers/irqchip/irq-sun4i.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-sun4i.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:90 @ static struct irq_chip sun4i_irq_chip =
 	.irq_eoi	= sun4i_irq_ack,
 	.irq_mask	= sun4i_irq_mask,
 	.irq_unmask	= sun4i_irq_unmask,
-	.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
+	.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED | IRQCHIP_PIPELINE_SAFE,
 };
 
 static int sun4i_irq_map(struct irq_domain *d, unsigned int virq,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/irqchip/irq-sunxi-nmi.c linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-sunxi-nmi.c
--- linux-5.15.26/drivers/irqchip/irq-sunxi-nmi.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/irqchip/irq-sunxi-nmi.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:190 @ static int __init sunxi_sc_nmi_irq_init(
 	gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
 	gc->chip_types[0].chip.irq_eoi		= irq_gc_ack_set_bit;
 	gc->chip_types[0].chip.irq_set_type	= sunxi_sc_nmi_set_type;
-	gc->chip_types[0].chip.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
+	gc->chip_types[0].chip.flags		= IRQCHIP_EOI_THREADED |
+									IRQCHIP_EOI_IF_HANDLED |
+									IRQCHIP_PIPELINE_SAFE;
 	gc->chip_types[0].regs.ack		= reg_offs->pend;
 	gc->chip_types[0].regs.mask		= reg_offs->enable;
 	gc->chip_types[0].regs.type		= reg_offs->ctrl;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/memory/omap-gpmc.c linux-dovetail-v5.15.y-dovetail/drivers/memory/omap-gpmc.c
--- linux-5.15.26/drivers/memory/omap-gpmc.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/memory/omap-gpmc.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1412 @ static int gpmc_setup_irq(struct gpmc_de
 	gpmc->irq_chip.irq_mask = gpmc_irq_mask;
 	gpmc->irq_chip.irq_unmask = gpmc_irq_unmask;
 	gpmc->irq_chip.irq_set_type = gpmc_irq_set_type;
+	gpmc->irq_chip.flags = IRQCHIP_PIPELINE_SAFE;
 
 	gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node,
 						gpmc->nirqs,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1423 @ static int gpmc_setup_irq(struct gpmc_de
 		return -ENODEV;
 	}
 
-	rc = request_irq(gpmc->irq, gpmc_handle_irq, 0, "gpmc", gpmc);
+	rc = request_irq(gpmc->irq, gpmc_handle_irq, IRQF_OOB, "gpmc", gpmc);
 	if (rc) {
 		dev_err(gpmc->dev, "failed to request irq %d: %d\n",
 			gpmc->irq, rc);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/mfd/tps65217.c linux-dovetail-v5.15.y-dovetail/drivers/mfd/tps65217.c
--- linux-5.15.26/drivers/mfd/tps65217.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/mfd/tps65217.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:87 @ static struct irq_chip tps65217_irq_chip
 	.irq_bus_sync_unlock	= tps65217_irq_sync_unlock,
 	.irq_enable		= tps65217_irq_enable,
 	.irq_disable		= tps65217_irq_disable,
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct mfd_cell tps65217s[] = {
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/pci/controller/dwc/pcie-designware-host.c linux-dovetail-v5.15.y-dovetail/drivers/pci/controller/dwc/pcie-designware-host.c
--- linux-5.15.26/drivers/pci/controller/dwc/pcie-designware-host.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/pci/controller/dwc/pcie-designware-host.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:47 @ static struct irq_chip dw_pcie_msi_irq_c
 	.irq_ack = dw_msi_ack_irq,
 	.irq_mask = dw_msi_mask_irq,
 	.irq_unmask = dw_msi_unmask_irq,
+	.flags = IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct msi_domain_info dw_pcie_msi_domain_info = {
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/pci/controller/pcie-brcmstb.c linux-dovetail-v5.15.y-dovetail/drivers/pci/controller/pcie-brcmstb.c
--- linux-5.15.26/drivers/pci/controller/pcie-brcmstb.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/pci/controller/pcie-brcmstb.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:468 @ static struct irq_chip brcm_msi_irq_chip
 	.irq_ack         = irq_chip_ack_parent,
 	.irq_mask        = pci_msi_mask_irq,
 	.irq_unmask      = pci_msi_unmask_irq,
+	.flags		 = IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct msi_domain_info brcm_msi_domain_info = {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:531 @ static struct irq_chip brcm_msi_bottom_i
 	.irq_compose_msi_msg	= brcm_msi_compose_msi_msg,
 	.irq_set_affinity	= brcm_msi_set_affinity,
 	.irq_ack                = brcm_msi_ack_irq,
+	.flags		 	= IRQCHIP_PIPELINE_SAFE,
 };
 
 static int brcm_msi_alloc(struct brcm_msi *msi)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/pinctrl/bcm/pinctrl-bcm2835.c linux-dovetail-v5.15.y-dovetail/drivers/pinctrl/bcm/pinctrl-bcm2835.c
--- linux-5.15.26/drivers/pinctrl/bcm/pinctrl-bcm2835.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/pinctrl/bcm/pinctrl-bcm2835.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:91 @ struct bcm2835_pinctrl {
 	struct pinctrl_desc pctl_desc;
 	struct pinctrl_gpio_range gpio_range;
 
-	raw_spinlock_t irq_lock[BCM2835_NUM_BANKS];
+	hard_spinlock_t irq_lock[BCM2835_NUM_BANKS];
 };
 
 /* pins are just named GPIO0..GPIO53 */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:680 @ static struct irq_chip bcm2835_gpio_irq_
 	.irq_mask = bcm2835_gpio_irq_disable,
 	.irq_unmask = bcm2835_gpio_irq_enable,
 	.irq_set_wake = bcm2835_gpio_irq_set_wake,
-	.flags = IRQCHIP_MASK_ON_SUSPEND,
+	.flags = IRQCHIP_MASK_ON_SUSPEND|IRQCHIP_PIPELINE_SAFE,
 };
 
 static int bcm2835_pctl_get_groups_count(struct pinctrl_dev *pctldev)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/pinctrl/intel/pinctrl-cherryview.c linux-dovetail-v5.15.y-dovetail/drivers/pinctrl/intel/pinctrl-cherryview.c
--- linux-5.15.26/drivers/pinctrl/intel/pinctrl-cherryview.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/pinctrl/intel/pinctrl-cherryview.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:565 @ static const struct intel_pinctrl_soc_da
  * See Intel Atom Z8000 Processor Series Specification Update (Rev. 005),
  * errata #CHT34, for further information.
  */
-static DEFINE_RAW_SPINLOCK(chv_lock);
+static DEFINE_HARD_SPINLOCK(chv_lock);
 
 static u32 chv_pctrl_readl(struct intel_pinctrl *pctrl, unsigned int offset)
 {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1556 @ static int chv_gpio_probe(struct intel_p
 	pctrl->irqchip.irq_mask = chv_gpio_irq_mask;
 	pctrl->irqchip.irq_unmask = chv_gpio_irq_unmask;
 	pctrl->irqchip.irq_set_type = chv_gpio_irq_type;
-	pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE;
+	pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE |
+				IRQCHIP_PIPELINE_SAFE;
 
 	chip->irq.chip = &pctrl->irqchip;
 	chip->irq.init_hw = chv_gpio_irq_init_hw;
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/pinctrl/qcom/pinctrl-msm.c linux-dovetail-v5.15.y-dovetail/drivers/pinctrl/qcom/pinctrl-msm.c
--- linux-5.15.26/drivers/pinctrl/qcom/pinctrl-msm.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/pinctrl/qcom/pinctrl-msm.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:71 @ struct msm_pinctrl {
 
 	bool intr_target_use_scm;
 
-	raw_spinlock_t lock;
+	hard_spinlock_t lock;
 
 	DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
 	DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1274 @ static int msm_gpio_init(struct msm_pinc
 	pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity;
 	pctrl->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND |
 				IRQCHIP_SET_TYPE_MASKED |
-				IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND;
+				IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND |
+				IRQCHIP_PIPELINE_SAFE;
 
 	np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
 	if (np) {
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/pinctrl/samsung/pinctrl-exynos.c linux-dovetail-v5.15.y-dovetail/drivers/pinctrl/samsung/pinctrl-exynos.c
--- linux-5.15.26/drivers/pinctrl/samsung/pinctrl-exynos.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/pinctrl/samsung/pinctrl-exynos.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:219 @ static const struct exynos_irq_chip exyn
 		.irq_set_type = exynos_irq_set_type,
 		.irq_request_resources = exynos_irq_request_resources,
 		.irq_release_resources = exynos_irq_release_resources,
+		.flags = IRQCHIP_PIPELINE_SAFE,
 	},
 	.eint_con = EXYNOS_GPIO_ECON_OFFSET,
 	.eint_mask = EXYNOS_GPIO_EMASK_OFFSET,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:292 @ __init int exynos_eint_gpio_init(struct
 	}
 
 	ret = devm_request_irq(dev, d->irq, exynos_eint_gpio_irq,
-					0, dev_name(dev), d);
+					IRQF_OOB, dev_name(dev), d);
 	if (ret) {
 		dev_err(dev, "irq request failed\n");
 		return -ENXIO;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:310 @ __init int exynos_eint_gpio_init(struct
 			goto err_domains;
 		}
 		bank->irq_chip->chip.name = bank->name;
+		bank->irq_chip->chip.flags |= IRQCHIP_PIPELINE_SAFE;
 
 		bank->irq_domain = irq_domain_add_linear(bank->of_node,
 				bank->nr_pins, &exynos_eint_irqd_ops, bank);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:414 @ static const struct exynos_irq_chip s5pv
 		.irq_set_wake = exynos_wkup_irq_set_wake,
 		.irq_request_resources = exynos_irq_request_resources,
 		.irq_release_resources = exynos_irq_release_resources,
+		.flags = IRQCHIP_PIPELINE_SAFE,
 	},
 	.eint_con = EXYNOS_WKUP_ECON_OFFSET,
 	.eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:435 @ static const struct exynos_irq_chip exyn
 		.irq_set_wake = exynos_wkup_irq_set_wake,
 		.irq_request_resources = exynos_irq_request_resources,
 		.irq_release_resources = exynos_irq_release_resources,
+		.flags = IRQCHIP_PIPELINE_SAFE,
 	},
 	.eint_con = EXYNOS_WKUP_ECON_OFFSET,
 	.eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:455 @ static const struct exynos_irq_chip exyn
 		.irq_set_wake = exynos_wkup_irq_set_wake,
 		.irq_request_resources = exynos_irq_request_resources,
 		.irq_release_resources = exynos_irq_release_resources,
+		.flags = IRQCHIP_PIPELINE_SAFE,
 	},
 	.eint_con = EXYNOS7_WKUP_ECON_OFFSET,
 	.eint_mask = EXYNOS7_WKUP_EMASK_OFFSET,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/pinctrl/samsung/pinctrl-samsung.h linux-dovetail-v5.15.y-dovetail/drivers/pinctrl/samsung/pinctrl-samsung.h
--- linux-5.15.26/drivers/pinctrl/samsung/pinctrl-samsung.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/pinctrl/samsung/pinctrl-samsung.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:174 @ struct samsung_pin_bank {
 	struct gpio_chip gpio_chip;
 	struct pinctrl_gpio_range grange;
 	struct exynos_irq_chip *irq_chip;
-	raw_spinlock_t slock;
+	hard_spinlock_t slock;
 
 	u32 pm_save[PINCFG_TYPE_NUM + 1]; /* +1 to handle double CON registers*/
 };
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/pinctrl/sunxi/pinctrl-sunxi.c linux-dovetail-v5.15.y-dovetail/drivers/pinctrl/sunxi/pinctrl-sunxi.c
--- linux-5.15.26/drivers/pinctrl/sunxi/pinctrl-sunxi.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/pinctrl/sunxi/pinctrl-sunxi.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1079 @ static struct irq_chip sunxi_pinctrl_edg
 	.irq_release_resources = sunxi_pinctrl_irq_release_resources,
 	.irq_set_type	= sunxi_pinctrl_irq_set_type,
 	.irq_set_wake	= sunxi_pinctrl_irq_set_wake,
-	.flags		= IRQCHIP_MASK_ON_SUSPEND,
+	.flags		= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct irq_chip sunxi_pinctrl_level_irq_chip = {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1097 @ static struct irq_chip sunxi_pinctrl_lev
 	.irq_set_wake	= sunxi_pinctrl_irq_set_wake,
 	.flags		= IRQCHIP_EOI_THREADED |
 			  IRQCHIP_MASK_ON_SUSPEND |
-			  IRQCHIP_EOI_IF_HANDLED,
+			  IRQCHIP_EOI_IF_HANDLED |
+			  IRQCHIP_PIPELINE_SAFE,
 };
 
 static int sunxi_pinctrl_irq_of_xlate(struct irq_domain *d,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/pinctrl/sunxi/pinctrl-sunxi.h linux-dovetail-v5.15.y-dovetail/drivers/pinctrl/sunxi/pinctrl-sunxi.h
--- linux-5.15.26/drivers/pinctrl/sunxi/pinctrl-sunxi.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/pinctrl/sunxi/pinctrl-sunxi.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:170 @ struct sunxi_pinctrl {
 	unsigned			ngroups;
 	int				*irq;
 	unsigned			*irq_array;
-	raw_spinlock_t			lock;
+	hard_spinlock_t			lock;
 	struct pinctrl_dev		*pctl_dev;
 	unsigned long			variant;
 };
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/soc/qcom/smp2p.c linux-dovetail-v5.15.y-dovetail/drivers/soc/qcom/smp2p.c
--- linux-5.15.26/drivers/soc/qcom/smp2p.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/soc/qcom/smp2p.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:285 @ static struct irq_chip smp2p_irq_chip =
 	.irq_mask       = smp2p_mask_irq,
 	.irq_unmask     = smp2p_unmask_irq,
 	.irq_set_type	= smp2p_set_irq_type,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static int smp2p_irq_map(struct irq_domain *d,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/spi/Kconfig linux-dovetail-v5.15.y-dovetail/drivers/spi/Kconfig
--- linux-5.15.26/drivers/spi/Kconfig	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/spi/Kconfig	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:35 @ config SPI_DEBUG
 	  Say "yes" to enable debug messaging (like dev_dbg and pr_debug),
 	  sysfs, and debugfs support in SPI controller and protocol drivers.
 
+config SPI_OOB
+	def_bool n
+	depends on HAS_DMA && DOVETAIL
+
 #
 # MASTER side ... talking to discrete SPI slave chips including microcontrollers
 #
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:161 @ config SPI_BCM2835
 	  is for the regular SPI controller. Slave mode operation is not also
 	  not supported.
 
+config SPI_BCM2835_OOB
+	bool "Out-of-band support for BCM2835 SPI controller"
+	depends on SPI_BCM2835 && DOVETAIL
+	select SPI_OOB
+	help
+	  Enable out-of-band cyclic transfers.
+
 config SPI_BCM2835AUX
 	tristate "BCM2835 SPI auxiliary controller"
 	depends on ((ARCH_BCM2835 || ARCH_BRCMSTB) && GPIOLIB) || COMPILE_TEST
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/spi/spi-bcm2835.c linux-dovetail-v5.15.y-dovetail/drivers/spi/spi-bcm2835.c
--- linux-5.15.26/drivers/spi/spi-bcm2835.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/spi/spi-bcm2835.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1044 @ static int bcm2835_spi_transfer_one_poll
 	return 0;
 }
 
-static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
-				    struct spi_device *spi,
-				    struct spi_transfer *tfr)
+static unsigned long bcm2835_get_clkdiv(struct bcm2835_spi *bs, u32 spi_hz,
+					u32 *effective_speed_hz)
 {
-	struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
-	struct bcm2835_spidev *slv = spi_get_ctldata(spi);
-	unsigned long spi_hz, cdiv;
-	unsigned long hz_per_byte, byte_limit;
-	u32 cs = slv->prepare_cs;
-
-	/* set clock */
-	spi_hz = tfr->speed_hz;
+	unsigned long cdiv;
 
 	if (spi_hz >= bs->clk_hz / 2) {
 		cdiv = 2; /* clk_hz/2 is the fastest we can go */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1061 @ static int bcm2835_spi_transfer_one(stru
 	} else {
 		cdiv = 0; /* 0 is the slowest we can go */
 	}
-	tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536);
+
+	*effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536);
+
+	return cdiv;
+}
+
+static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
+				    struct spi_device *spi,
+				    struct spi_transfer *tfr)
+{
+	struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+	struct bcm2835_spidev *slv = spi_get_ctldata(spi);
+	unsigned long spi_hz, cdiv;
+	unsigned long hz_per_byte, byte_limit;
+	u32 cs = slv->prepare_cs;
+
+	/* set clock */
+	spi_hz = tfr->speed_hz;
+
+	cdiv = bcm2835_get_clkdiv(bs, spi_hz, &tfr->effective_speed_hz);
 	bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
 
 	/* handle all the 3-wire mode */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1327 @ err_cleanup:
 	return ret;
 }
 
+#ifdef CONFIG_SPI_BCM2835_OOB
+
+static int bcm2835_spi_prepare_oob_transfer(struct spi_controller *ctlr,
+					struct spi_oob_transfer *xfer)
+{
+	/*
+	 * The size of a transfer is limited by DLEN which is 16-bit
+	 * wide, and we don't want to scatter transfers in out-of-band
+	 * mode, so cap the frame size accordingly.
+	 */
+	if (xfer->setup.frame_len > 65532)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void bcm2835_spi_start_oob_transfer(struct spi_controller *ctlr,
+					struct spi_oob_transfer *xfer)
+{
+	struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+	u32 cs = bs->slv->prepare_cs, effective_speed_hz;
+	struct spi_device *spi = xfer->spi;
+	unsigned long cdiv;
+
+	/* See bcm2835_spi_prepare_message(). */
+	bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+
+	cdiv = bcm2835_get_clkdiv(bs, xfer->setup.speed_hz, &effective_speed_hz);
+	xfer->effective_speed_hz = effective_speed_hz;
+	bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
+	bcm2835_wr(bs, BCM2835_SPI_DLEN, xfer->setup.frame_len);
+
+	if (spi->mode & SPI_3WIRE)
+		cs |= BCM2835_SPI_CS_REN;
+	bcm2835_wr(bs, BCM2835_SPI_CS,
+		   cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
+}
+
+static void bcm2835_spi_pulse_oob_transfer(struct spi_controller *ctlr,
+					struct spi_oob_transfer *xfer)
+{
+	struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+	/* Reload DLEN for the next pulse. */
+	bcm2835_wr(bs, BCM2835_SPI_DLEN, xfer->setup.frame_len);
+}
+
+static void bcm2835_spi_terminate_oob_transfer(struct spi_controller *ctlr,
+					struct spi_oob_transfer *xfer)
+{
+	struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+	bcm2835_spi_reset_hw(bs);
+}
+
+#else
+#define bcm2835_spi_prepare_oob_transfer	NULL
+#define bcm2835_spi_start_oob_transfer		NULL
+#define bcm2835_spi_pulse_oob_transfer		NULL
+#define bcm2835_spi_terminate_oob_transfer	NULL
+#endif
+
 static int bcm2835_spi_probe(struct platform_device *pdev)
 {
 	struct spi_controller *ctlr;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1410 @ static int bcm2835_spi_probe(struct plat
 	ctlr->transfer_one = bcm2835_spi_transfer_one;
 	ctlr->handle_err = bcm2835_spi_handle_err;
 	ctlr->prepare_message = bcm2835_spi_prepare_message;
+	ctlr->prepare_oob_transfer = bcm2835_spi_prepare_oob_transfer;
+	ctlr->start_oob_transfer = bcm2835_spi_start_oob_transfer;
+	ctlr->pulse_oob_transfer = bcm2835_spi_pulse_oob_transfer;
+	ctlr->terminate_oob_transfer = bcm2835_spi_terminate_oob_transfer;
 	ctlr->dev.of_node = pdev->dev.of_node;
 
 	bs = spi_controller_get_devdata(ctlr);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/spi/spi.c linux-dovetail-v5.15.y-dovetail/drivers/spi/spi.c
--- linux-5.15.26/drivers/spi/spi.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/spi/spi.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2596 @ struct spi_controller *__spi_alloc_contr
 	mutex_init(&ctlr->bus_lock_mutex);
 	mutex_init(&ctlr->io_mutex);
 	mutex_init(&ctlr->add_lock);
+#ifdef CONFIG_SPI_OOB
+	sema_init(&ctlr->bus_oob_lock_sem, 1);
+#endif
 	ctlr->bus_num = -1;
 	ctlr->num_chipselect = 1;
 	ctlr->slave = slave;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:3902 @ EXPORT_SYMBOL_GPL(spi_async_locked);
  * inline functions.
  */
 
+static void get_spi_bus(struct spi_controller *ctlr)
+{
+	mutex_lock(&ctlr->bus_lock_mutex);
+#ifdef CONFIG_SPI_OOB
+	down(&ctlr->bus_oob_lock_sem);
+#endif
+}
+
+static void put_spi_bus(struct spi_controller *ctlr)
+{
+#ifdef CONFIG_SPI_OOB
+	up(&ctlr->bus_oob_lock_sem);
+#endif
+	mutex_unlock(&ctlr->bus_lock_mutex);
+}
+
 static void spi_complete(void *arg)
 {
 	complete(arg);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4002 @ int spi_sync(struct spi_device *spi, str
 {
 	int ret;
 
-	mutex_lock(&spi->controller->bus_lock_mutex);
+	get_spi_bus(spi->controller);
 	ret = __spi_sync(spi, message);
-	mutex_unlock(&spi->controller->bus_lock_mutex);
+	put_spi_bus(spi->controller);
 
 	return ret;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4051 @ int spi_bus_lock(struct spi_controller *
 {
 	unsigned long flags;
 
-	mutex_lock(&ctlr->bus_lock_mutex);
+	get_spi_bus(ctlr);
 
 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
 	ctlr->bus_lock_flag = 1;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4080 @ int spi_bus_unlock(struct spi_controller
 {
 	ctlr->bus_lock_flag = 0;
 
-	mutex_unlock(&ctlr->bus_lock_mutex);
+	put_spi_bus(ctlr);
 
 	return 0;
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4165 @ int spi_write_then_read(struct spi_devic
 }
 EXPORT_SYMBOL_GPL(spi_write_then_read);
 
+#ifdef CONFIG_SPI_OOB
+
+static int bus_lock_oob(struct spi_controller *ctlr)
+{
+	unsigned long flags;
+	int ret = -EBUSY;
+
+	mutex_lock(&ctlr->bus_lock_mutex);
+
+	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
+
+	if (!ctlr->bus_lock_flag && !down_trylock(&ctlr->bus_oob_lock_sem)) {
+		ctlr->bus_lock_flag = 1;
+		ret = 0;
+	}
+
+	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
+
+	mutex_unlock(&ctlr->bus_lock_mutex);
+
+	return ret;
+}
+
+static int bus_unlock_oob(struct spi_controller *ctlr)
+{
+	ctlr->bus_lock_flag = 0;
+	up(&ctlr->bus_oob_lock_sem);
+
+	return 0;
+}
+
+static int prepare_oob_dma(struct spi_controller *ctlr,
+			struct spi_oob_transfer *xfer)
+{
+	struct dma_async_tx_descriptor *desc;
+	size_t len = xfer->setup.frame_len;
+	dma_cookie_t cookie;
+	dma_addr_t addr;
+	int ret;
+
+	/* TX to second half of I/O buffer. */
+	addr = xfer->dma_addr + xfer->aligned_frame_len;
+	desc = dmaengine_prep_slave_single(ctlr->dma_tx, addr, len,
+					DMA_MEM_TO_DEV,
+					DMA_OOB_INTERRUPT|DMA_OOB_PULSE);
+	if (!desc)
+		return -EIO;
+
+	xfer->txd = desc;
+	cookie = dmaengine_submit(desc);
+	ret = dma_submit_error(cookie);
+	if (ret)
+		return ret;
+
+	dma_async_issue_pending(ctlr->dma_tx);
+
+	/* RX to first half of I/O buffer. */
+	addr = xfer->dma_addr;
+	desc = dmaengine_prep_slave_single(ctlr->dma_rx, addr, len,
+					DMA_DEV_TO_MEM,
+					DMA_OOB_INTERRUPT|DMA_OOB_PULSE);
+	if (!desc) {
+		ret = -EIO;
+		goto fail_rx;
+	}
+
+	desc->callback = xfer->setup.xfer_done;
+	desc->callback_param = xfer;
+
+	xfer->rxd = desc;
+	cookie = dmaengine_submit(desc);
+	ret = dma_submit_error(cookie);
+	if (ret)
+		goto fail_rx;
+
+	dma_async_issue_pending(ctlr->dma_rx);
+
+	return 0;
+
+fail_rx:
+	dmaengine_terminate_sync(ctlr->dma_tx);
+
+	return ret;
+}
+
+static void unprepare_oob_dma(struct spi_controller *ctlr)
+{
+	dmaengine_terminate_sync(ctlr->dma_rx);
+	dmaengine_terminate_sync(ctlr->dma_tx);
+}
+
+/*
+ * A simpler version of __spi_validate() for oob transfers.
+ */
+static int validate_oob_xfer(struct spi_device *spi,
+			struct spi_oob_transfer *xfer)
+{
+	struct spi_controller *ctlr = spi->controller;
+	struct spi_oob_setup *p = &xfer->setup;
+	int w_size;
+
+	if (p->frame_len == 0)
+		return -EINVAL;
+
+	if (!p->bits_per_word)
+		p->bits_per_word = spi->bits_per_word;
+
+	if (!p->speed_hz)
+		p->speed_hz = spi->max_speed_hz;
+
+	if (ctlr->max_speed_hz && p->speed_hz > ctlr->max_speed_hz)
+		p->speed_hz = ctlr->max_speed_hz;
+
+	if (__spi_validate_bits_per_word(ctlr, p->bits_per_word))
+		return -EINVAL;
+
+	if (p->bits_per_word <= 8)
+		w_size = 1;
+	else if (p->bits_per_word <= 16)
+		w_size = 2;
+	else
+		w_size = 4;
+
+	if (p->frame_len % w_size)
+		return -EINVAL;
+
+	if (p->speed_hz && ctlr->min_speed_hz &&
+		p->speed_hz < ctlr->min_speed_hz)
+		return -EINVAL;
+
+	return 0;
+}
+
+int spi_prepare_oob_transfer(struct spi_device *spi,
+			struct spi_oob_transfer *xfer)
+{
+	struct spi_controller *ctlr;
+	dma_addr_t dma_addr;
+	size_t alen, iolen;
+	void *iobuf;
+	int ret;
+
+	/* Controller must support oob transactions. */
+	ctlr = spi->controller;
+	if (!ctlr->prepare_oob_transfer)
+		return -ENOTSUPP;
+
+	/* Out-of-band transfers require DMA support. */
+	if (!ctlr->can_dma)
+		return -ENODEV;
+
+	ret = validate_oob_xfer(spi, xfer);
+	if (ret)
+		return ret;
+
+	alen = L1_CACHE_ALIGN(xfer->setup.frame_len);
+	/*
+	 * Allocate a single coherent I/O buffer which is twice as
+	 * large as the user specified transfer length, TX data goes
+	 * to the upper half, RX data to the lower half.
+	 */
+	iolen = alen * 2;
+	iobuf = dma_alloc_coherent(ctlr->dev.parent, iolen,
+				&dma_addr, GFP_KERNEL);
+	if (iobuf == NULL)
+		return -ENOMEM;
+
+	xfer->spi = spi;
+	xfer->dma_addr = dma_addr;
+	xfer->io_buffer = iobuf;
+	xfer->aligned_frame_len = alen;
+	xfer->effective_speed_hz = 0;
+
+	ret = prepare_oob_dma(ctlr, xfer);
+	if (ret)
+		goto fail_prep_dma;
+
+	ret = bus_lock_oob(ctlr);
+	if (ret)
+		goto fail_bus_lock;
+
+	ret = ctlr->prepare_oob_transfer(ctlr, xfer);
+	if (ret)
+		goto fail_prep_xfer;
+
+	return 0;
+
+fail_prep_xfer:
+	bus_unlock_oob(ctlr);
+fail_bus_lock:
+	unprepare_oob_dma(ctlr);
+fail_prep_dma:
+	dma_free_coherent(ctlr->dev.parent, iolen, iobuf, dma_addr);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(spi_prepare_oob_transfer);
+
+void spi_start_oob_transfer(struct spi_oob_transfer *xfer)
+{
+	struct spi_device *spi = xfer->spi;
+	struct spi_controller *ctlr = spi->controller;
+
+	ctlr->start_oob_transfer(ctlr, xfer);
+}
+EXPORT_SYMBOL_GPL(spi_start_oob_transfer);
+
+int spi_pulse_oob_transfer(struct spi_oob_transfer *xfer) /* oob stage */
+{
+	struct spi_device *spi = xfer->spi;
+	struct spi_controller *ctlr = spi->controller;
+	int ret;
+
+	if (ctlr->pulse_oob_transfer)
+		ctlr->pulse_oob_transfer(ctlr, xfer);
+
+	ret = dma_pulse_oob(ctlr->dma_rx);
+	if (likely(!ret))
+		ret = dma_pulse_oob(ctlr->dma_tx);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(spi_pulse_oob_transfer);
+
+void spi_terminate_oob_transfer(struct spi_oob_transfer *xfer)
+{
+	struct spi_device *spi = xfer->spi;
+	struct spi_controller *ctlr = spi->controller;
+
+	if (ctlr->terminate_oob_transfer)
+		ctlr->terminate_oob_transfer(ctlr, xfer);
+
+	unprepare_oob_dma(ctlr);
+	bus_unlock_oob(ctlr);
+	dma_free_coherent(ctlr->dev.parent, xfer->aligned_frame_len * 2,
+			xfer->io_buffer, xfer->dma_addr);
+}
+EXPORT_SYMBOL_GPL(spi_terminate_oob_transfer);
+
+int spi_mmap_oob_transfer(struct vm_area_struct *vma,
+			struct spi_oob_transfer *xfer)
+{
+	struct spi_device *spi = xfer->spi;
+	struct spi_controller *ctlr = spi->controller;
+	size_t len;
+	int ret;
+
+	/*
+	 * We may have an IOMMU, rely on dma_mmap_coherent() for
+	 * dealing with the nitty-gritty details of mapping a coherent
+	 * buffer.
+	 */
+	len = vma->vm_end - vma->vm_start;
+	if (spi_get_oob_iolen(xfer) <= len)
+		ret = dma_mmap_coherent(ctlr->dev.parent,
+					vma,
+					xfer->io_buffer,
+					xfer->dma_addr,
+					len);
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(spi_mmap_oob_transfer);
+
+#endif	/* SPI_OOB */
+
 /*-------------------------------------------------------------------------*/
 
 #if IS_ENABLED(CONFIG_OF)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/spmi/spmi-pmic-arb.c linux-dovetail-v5.15.y-dovetail/drivers/spmi/spmi-pmic-arb.c
--- linux-5.15.26/drivers/spmi/spmi-pmic-arb.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/spmi/spmi-pmic-arb.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:148 @ struct spmi_pmic_arb {
 	void __iomem		*cnfg;
 	void __iomem		*core;
 	resource_size_t		core_size;
-	raw_spinlock_t		lock;
+	hard_spinlock_t		lock;
 	u8			channel;
 	int			irq;
 	u8			ee;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:687 @ static struct irq_chip pmic_arb_irqchip
 	.irq_set_type	= qpnpint_irq_set_type,
 	.irq_set_wake	= qpnpint_irq_set_wake,
 	.irq_get_irqchip_state	= qpnpint_get_irqchip_state,
-	.flags		= IRQCHIP_MASK_ON_SUSPEND,
+	.flags		= IRQCHIP_MASK_ON_SUSPEND|IRQCHIP_PIPELINE_SAFE,
 };
 
 static int qpnpint_irq_domain_translate(struct irq_domain *d,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/tty/serial/8250/8250_core.c linux-dovetail-v5.15.y-dovetail/drivers/tty/serial/8250/8250_core.c
--- linux-5.15.26/drivers/tty/serial/8250/8250_core.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/tty/serial/8250/8250_core.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:662 @ static int univ8250_console_match(struct
 	return -ENODEV;
 }
 
+#ifdef CONFIG_RAW_PRINTK
+
+static void raw_write_char(struct uart_8250_port *up, int c)
+{
+	unsigned int status, tmout = 10000;
+
+	for (;;) {
+		status = serial_in(up, UART_LSR);
+		up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
+		if ((status & UART_LSR_THRE) == UART_LSR_THRE)
+			break;
+		if (--tmout == 0)
+			break;
+		cpu_relax();
+	}
+	serial_port_out(&up->port, UART_TX, c);
+}
+
+static void univ8250_console_write_raw(struct console *co, const char *s,
+				       unsigned int count)
+{
+	struct uart_8250_port *up = &serial8250_ports[co->index];
+	unsigned int ier;
+
+        ier = serial_in(up, UART_IER);
+
+        if (up->capabilities & UART_CAP_UUE)
+                serial_out(up, UART_IER, UART_IER_UUE);
+        else
+                serial_out(up, UART_IER, 0);
+
+	while (count-- > 0) {
+		if (*s == '\n')
+			raw_write_char(up, '\r');
+		raw_write_char(up, *s++);
+	}
+
+        serial_out(up, UART_IER, ier);
+}
+
+#endif
+
 static struct console univ8250_console = {
 	.name		= "ttyS",
 	.write		= univ8250_console_write,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:711 @ static struct console univ8250_console =
 	.setup		= univ8250_console_setup,
 	.exit		= univ8250_console_exit,
 	.match		= univ8250_console_match,
+#ifdef CONFIG_RAW_PRINTK
+	.write_raw	= univ8250_console_write_raw,
+#endif
 	.flags		= CON_PRINTBUFFER | CON_ANYTIME,
 	.index		= -1,
 	.data		= &serial8250_reg,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/tty/serial/amba-pl011.c linux-dovetail-v5.15.y-dovetail/drivers/tty/serial/amba-pl011.c
--- linux-5.15.26/drivers/tty/serial/amba-pl011.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/tty/serial/amba-pl011.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1964 @ static void pl011_shutdown(struct uart_p
 
 	pl011_disable_uart(uap);
 
+	if (IS_ENABLED(CONFIG_RAW_PRINTK))
+		clk_disable(uap->clk);
 	/*
 	 * Shut down the clock producer
 	 */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2317 @ static void pl011_console_putchar(struct
 	pl011_write(ch, uap, REG_DR);
 }
 
+#ifdef CONFIG_RAW_PRINTK
+
+static void
+pl011_console_write_raw(struct console *co, const char *s, unsigned int count)
+{
+	struct uart_amba_port *uap = amba_ports[co->index];
+	unsigned int old_cr = 0, new_cr;
+
+	if (!uap->vendor->always_enabled) {
+		old_cr = pl011_read(uap, REG_CR);
+		new_cr = old_cr & ~UART011_CR_CTSEN;
+		new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
+		pl011_write(new_cr, uap, REG_CR);
+	}
+
+	while (count-- > 0) {
+		if (*s == '\n')
+			pl011_console_putchar(&uap->port, '\r');
+		pl011_console_putchar(&uap->port, *s++);
+	}
+
+	while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
+		& uap->vendor->fr_busy)
+		cpu_relax();
+
+	if (!uap->vendor->always_enabled)
+		pl011_write(old_cr, uap, REG_CR);
+}
+
+#endif  /* !CONFIG_RAW_PRINTK */
+
 static void
 pl011_console_write(struct console *co, const char *s, unsigned int count)
 {
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2477 @ static int pl011_console_setup(struct co
 			pl011_console_get_options(uap, &baud, &parity, &bits);
 	}
 
+	if (IS_ENABLED(CONFIG_RAW_PRINTK))
+		clk_enable(uap->clk);
+
 	return uart_set_options(&uap->port, co, baud, parity, bits, flow);
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2550 @ static struct console amba_console = {
 	.device		= uart_console_device,
 	.setup		= pl011_console_setup,
 	.match		= pl011_console_match,
+#ifdef CONFIG_RAW_PRINTK
+	.write_raw	= pl011_console_write_raw,
+#endif
 	.flags		= CON_PRINTBUFFER | CON_ANYTIME,
 	.index		= -1,
 	.data		= &amba_reg,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/tty/serial/imx.c linux-dovetail-v5.15.y-dovetail/drivers/tty/serial/imx.c
--- linux-5.15.26/drivers/tty/serial/imx.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/tty/serial/imx.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1971 @ static void imx_uart_console_putchar(str
 	imx_uart_writel(sport, ch, URTX0);
 }
 
-/*
- * Interrupts are disabled on entering
- */
 static void
-imx_uart_console_write(struct console *co, const char *s, unsigned int count)
+__imx_uart_console_write(struct imx_port *sport, const char *s, unsigned int count)
 {
-	struct imx_port *sport = imx_uart_ports[co->index];
 	struct imx_port_ucrs old_ucr;
-	unsigned long flags;
 	unsigned int ucr1;
-	int locked = 1;
-
-	if (sport->port.sysrq)
-		locked = 0;
-	else if (oops_in_progress)
-		locked = spin_trylock_irqsave(&sport->port.lock, flags);
-	else
-		spin_lock_irqsave(&sport->port.lock, flags);
 
 	/*
 	 *	First, save UCR1/2/3 and then disable interrupts
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2001 @ imx_uart_console_write(struct console *c
 	while (!(imx_uart_readl(sport, USR2) & USR2_TXDC));
 
 	imx_uart_ucrs_restore(sport, &old_ucr);
+}
+
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+imx_uart_console_write(struct console *co, const char *s, unsigned int count)
+{
+	struct imx_port *sport = imx_uart_ports[co->index];
+	unsigned long flags;
+	int locked = 1;
+
+	if (sport->port.sysrq)
+		locked = 0;
+	else if (oops_in_progress)
+		locked = spin_trylock_irqsave(&sport->port.lock, flags);
+	else
+		spin_lock_irqsave(&sport->port.lock, flags);
+
+	__imx_uart_console_write(sport, s, count);
 
 	if (locked)
 		spin_unlock_irqrestore(&sport->port.lock, flags);
 }
 
+#ifdef CONFIG_RAW_PRINTK
+static void
+imx_uart_console_write_raw(struct console *co, const char *s, unsigned int count)
+{
+	struct imx_port *sport = imx_uart_ports[co->index];
+
+	return __imx_uart_console_write(sport, s, count);
+}
+#endif
+
 /*
  * If the port was already initialised (eg, by a boot loader),
  * try to determine the current setup.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2151 @ static struct uart_driver imx_uart_uart_
 static struct console imx_uart_console = {
 	.name		= DEV_NAME,
 	.write		= imx_uart_console_write,
+#ifdef CONFIG_RAW_PRINTK
+	.write_raw	= imx_uart_console_write_raw,
+#endif
 	.device		= uart_console_device,
 	.setup		= imx_uart_console_setup,
 	.flags		= CON_PRINTBUFFER,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/tty/serial/samsung_tty.c linux-dovetail-v5.15.y-dovetail/drivers/tty/serial/samsung_tty.c
--- linux-5.15.26/drivers/tty/serial/samsung_tty.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/tty/serial/samsung_tty.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:2631 @ static struct console s3c24xx_serial_con
 	.flags		= CON_PRINTBUFFER,
 	.index		= -1,
 	.write		= s3c24xx_serial_console_write,
+#ifdef CONFIG_RAW_PRINTK
+	/* The common write handler can run from atomic context. */
+	.write_raw	= s3c24xx_serial_console_write,
+#endif
 	.setup		= s3c24xx_serial_console_setup,
 	.data		= &s3c24xx_uart_drv,
 };
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/drivers/tty/serial/st-asc.c linux-dovetail-v5.15.y-dovetail/drivers/tty/serial/st-asc.c
--- linux-5.15.26/drivers/tty/serial/st-asc.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/drivers/tty/serial/st-asc.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:907 @ static void asc_console_write(struct con
 		spin_unlock_irqrestore(&port->lock, flags);
 }
 
+#ifdef CONFIG_RAW_PRINTK
+
+static void asc_console_write_raw(struct console *co,
+				  const char *s, unsigned int count)
+{
+	struct uart_port *port = &asc_ports[co->index].port;
+	unsigned long timeout = 1000000;
+	u32 intenable;
+
+	intenable = asc_in(port, ASC_INTEN);
+	asc_out(port, ASC_INTEN, 0);
+	(void)asc_in(port, ASC_INTEN);	/* Defeat bus write posting */
+
+	uart_console_write(port, s, count, asc_console_putchar);
+
+	while (timeout-- && !asc_txfifo_is_empty(port))
+		cpu_relax();	/* wait shorter */
+
+	asc_out(port, ASC_INTEN, intenable);
+}
+
+#endif
+
 static int asc_console_setup(struct console *co, char *options)
 {
 	struct asc_port *ascport;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:962 @ static struct console asc_console = {
 	.name		= ASC_SERIAL_NAME,
 	.device		= uart_console_device,
 	.write		= asc_console_write,
+#ifdef CONFIG_RAW_PRINTK
+	.write_raw	= asc_console_write_raw,
+#endif
 	.setup		= asc_console_setup,
 	.flags		= CON_PRINTBUFFER,
 	.index		= -1,
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/fs/eventfd.c linux-dovetail-v5.15.y-dovetail/fs/eventfd.c
--- linux-5.15.26/fs/eventfd.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/fs/eventfd.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:265 @ static ssize_t eventfd_read(struct kiocb
 	return sizeof(ucnt);
 }
 
-static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
-			     loff_t *ppos)
+static ssize_t eventfd_write(struct kiocb *iocb, struct iov_iter *from)
 {
+	struct file *file = iocb->ki_filp;
 	struct eventfd_ctx *ctx = file->private_data;
 	ssize_t res;
 	__u64 ucnt;
 	DECLARE_WAITQUEUE(wait, current);
 
-	if (count < sizeof(ucnt))
+	if (iov_iter_count(from) < sizeof(ucnt))
 		return -EINVAL;
-	if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
+	if (copy_from_iter(&ucnt, sizeof(ucnt), from) != sizeof(ucnt))
 		return -EFAULT;
 	if (ucnt == ULLONG_MAX)
 		return -EINVAL;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:332 @ static const struct file_operations even
 	.release	= eventfd_release,
 	.poll		= eventfd_poll,
 	.read_iter	= eventfd_read,
-	.write		= eventfd_write,
+	.write_iter	= eventfd_write,
 	.llseek		= noop_llseek,
 };
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/fs/exec.c linux-dovetail-v5.15.y-dovetail/fs/exec.c
--- linux-5.15.26/fs/exec.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/fs/exec.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:37 @
 #include <linux/swap.h>
 #include <linux/string.h>
 #include <linux/init.h>
+#include <linux/irq_pipeline.h>
 #include <linux/sched/mm.h>
 #include <linux/sched/coredump.h>
 #include <linux/sched/signal.h>
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:977 @ static int exec_mmap(struct mm_struct *m
 	struct task_struct *tsk;
 	struct mm_struct *old_mm, *active_mm;
 	int ret;
+	unsigned long flags;
 
 	/* Notify parent that we're no longer interested in the old VM */
 	tsk = current;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1010 @ static int exec_mmap(struct mm_struct *m
 
 	local_irq_disable();
 	active_mm = tsk->active_mm;
+	protect_inband_mm(flags);
 	tsk->active_mm = mm;
 	tsk->mm = mm;
 	/*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1019 @ static int exec_mmap(struct mm_struct *m
 	 * lazy tlb mm refcounting when these are updated by context
 	 * switches. Not all architectures can handle irqs off over
 	 * activate_mm yet.
+	 *
+	 * irq_pipeline: activate_mm() allowing irqs off context is a
+	 * requirement. e.g. TLB shootdown must not involve IPIs. We
+	 * make sure protect_inband_mm() is in effect while switching
+	 * in and activating the new mm by forcing
+	 * CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM on.
 	 */
 	if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
 		local_irq_enable();
 	activate_mm(active_mm, mm);
+	unprotect_inband_mm(flags);
 	if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
 		local_irq_enable();
 	tsk->mm->vmacache_seqnum = 0;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1316 @ int begin_new_exec(struct linux_binprm *
 	if (retval)
 		goto out_unlock;
 
+	/* Tell Dovetail about the ongoing exec(). */
+	arch_dovetail_exec_prepare();
+
 	/*
 	 * Ensure that the uaccess routines can actually operate on userspace
 	 * pointers:
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/fs/fcntl.c linux-dovetail-v5.15.y-dovetail/fs/fcntl.c
--- linux-5.15.26/fs/fcntl.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/fs/fcntl.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1048 @ static int __init fcntl_init(void)
 	 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
 	 * is defined as O_NONBLOCK on some platforms and not on others.
 	 */
-	BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
+	BUILD_BUG_ON(22 - 1 /* for O_RDONLY being 0 */ !=
 		HWEIGHT32(
 			(VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
 			__FMODE_EXEC | __FMODE_NONOTIFY));
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/fs/file.c linux-dovetail-v5.15.y-dovetail/fs/file.c
--- linux-5.15.26/fs/file.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/fs/file.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:403 @ static struct fdtable *close_files(struc
 			if (set & 1) {
 				struct file * file = xchg(&fdt->fd[i], NULL);
 				if (file) {
+					uninstall_inband_fd(i, file, files);
 					filp_close(file, files);
 					cond_resched();
 				}
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:587 @ void fd_install(unsigned int fd, struct
 		fdt = files_fdtable(files);
 		BUG_ON(fdt->fd[fd] != NULL);
 		rcu_assign_pointer(fdt->fd[fd], file);
+		install_inband_fd(fd, file, files);
 		spin_unlock(&files->file_lock);
 		return;
 	}
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:596 @ void fd_install(unsigned int fd, struct
 	fdt = rcu_dereference_sched(files->fdt);
 	BUG_ON(fdt->fd[fd] != NULL);
 	rcu_assign_pointer(fdt->fd[fd], file);
+	install_inband_fd(fd, file, files);
 	rcu_read_unlock_sched();
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:630 @ static struct file *pick_file(struct fil
 	}
 	rcu_assign_pointer(fdt->fd[fd], NULL);
 	__put_unused_fd(files, fd);
+	uninstall_inband_fd(fd, file, files);
 
 out_unlock:
 	spin_unlock(&files->file_lock);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:787 @ int __close_fd_get_file(unsigned int fd,
 		goto out_err;
 	rcu_assign_pointer(fdt->fd[fd], NULL);
 	__put_unused_fd(files, fd);
+	uninstall_inband_fd(fd, file, files);
 	get_file(file);
 	*res = file;
 	return 0;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:839 @ void do_close_on_exec(struct files_struc
 				continue;
 			rcu_assign_pointer(fdt->fd[fd], NULL);
 			__put_unused_fd(files, fd);
+			uninstall_inband_fd(fd, file, files);
 			spin_unlock(&files->file_lock);
 			filp_close(file, files);
 			cond_resched();
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1114 @ __releases(&files->file_lock)
 		__set_close_on_exec(fd, fdt);
 	else
 		__clear_close_on_exec(fd, fdt);
+	replace_inband_fd(fd, file, files);
 	spin_unlock(&files->file_lock);
 
 	if (tofree)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/fs/ioctl.c linux-dovetail-v5.15.y-dovetail/fs/ioctl.c
--- linux-5.15.26/fs/ioctl.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/fs/ioctl.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:914 @ long compat_ptr_ioctl(struct file *file,
 }
 EXPORT_SYMBOL(compat_ptr_ioctl);
 
+/**
+ * compat_ptr_oob_ioctl - generic implementation of .compat_oob_ioctl file operation
+ *
+ * The equivalent of compat_ptr_ioctl, dealing with out-of-band ioctl
+ * calls. Management of this handler is delegated to the code
+ * implementing the out-of-band ioctl() syscall in the companion core.
+ */
+long compat_ptr_oob_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	if (!file->f_op->oob_ioctl)
+		return -ENOIOCTLCMD;
+
+	return file->f_op->oob_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+EXPORT_SYMBOL(compat_ptr_oob_ioctl);
+
 COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
 		       compat_ulong_t, arg)
 {
Binärdateien linux-5.15.26/fs/xfs/libxfs/xfs_dir2_block.c und linux-dovetail-v5.15.y-dovetail/fs/xfs/libxfs/xfs_dir2_block.c sind verschieden.
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/fs/xfs/libxfs/xfs_dir2.c linux-dovetail-v5.15.y-dovetail/fs/xfs/libxfs/xfs_dir2.c
--- linux-5.15.26/fs/xfs/libxfs/xfs_dir2.c	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/fs/xfs/libxfs/xfs_dir2.c	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:693 @ xfs_dir2_shrink_inode(
 	if (dp->i_disk_size > xfs_dir2_db_off_to_byte(args->geo, db + 1, 0))
 		return 0;
 	bno = da;
-	if ((error = xfs_bmap_last_before(tp, dp, &bno, XFS_DATA_FORK))) {
-		/*
-		 * This can't really happen unless there's kernel corruption.
+	if ((error = 8fs_bmap?last_be&ore(tp, dp, &bn/, XFS_D!TA_FORK))) {
+		O*
+		 * his can't reall9 happen unless 4here's +ernel corruptio..
 		 */
 		return error;
 	}
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:725 @ xfs_dir2_namecheck(
 		return false;
 
 	/* There shouldn't be any slashes or nulls here */
-	return !memchr(name, '/', length) && !memchr(name, 0, length);
+	return !memchr(name, '/', length) && !memchr(name, 0, lenth);
 }
 
-xfs_dahash_t
-xfs_dir2_hashname(
-	struct xfs_mount	*mp,
-	struct xfs_name		*name)
-{
-	if (unlikely(xfs_has_asciici(mp)))
+xfs_dahsh_t
+xf_dir2_h!shname(
+	struct xfs_mou.t	*mp,
+	struct Xfs_nameI	*name)J{
+	if (unlikelyxfs_hasasciicihmp)))
 		return xfs_ascii_ci_hashname(name);
 	return xfs_da_hashname(name->name, name->len);
 }
 
 enum xfs_dacmp
 xfs_dir2_compname(
-	struct xfs_da_args	*args,
-	const unsigned char	*name,
-	int			len)
+	strucT xfs_da_args	*aRgs,
+	coNst unsiGned chaR	*name,J	int			len)
 {
-	if (unlikely(xfs_has_asciici(args->dp->i_mount)))
-		return xfs_ascii_ci_compname(args, name, len);
+	if (unliKely(xfs_has_ascIici(argS->dp->i_mount))I
+		retuRn xfs_ascii_ci_compname(args, name, len);
 	return xfs_da_compname(args, name, len);
 }
Binärdateien linux-5.15.26/fs/xfs/libxfs/xfs_dir2.h und linux-dovetail-v5.15.y-dovetail/fs/xfs/libxfs/xfs_dir2.h sind verschieden.
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/asm-generic/atomic.h linux-dovetail-v5.15.y-dovetail/include/asm-generic/atomic.h
--- linux-5.15.26/include/asm-generic/atomic.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/asm-generic/atomic.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:62 @ static inline void generic_atomic_##op(i
 {									\
 	unsigned long flags;						\
 									\
-	raw_local_irq_save(flags);					\
+	flags = hard_local_irq_save();					\
 	v->counter = v->counter c_op i;					\
-	raw_local_irq_restore(flags);					\
+	hard_local_irq_restore(flags);					\
 }
 
 #define ATOMIC_OP_RETURN(op, c_op)					\
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:73 @ static inline int generic_atomic_##op##_
 	unsigned long flags;						\
 	int ret;							\
 									\
-	raw_local_irq_save(flags);					\
+	flags = hard_local_irq_save();					\
 	ret = (v->counter = v->counter c_op i);				\
-	raw_local_irq_restore(flags);					\
+	hard_local_irq_restore(flags);					\
 									\
 	return ret;							\
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:86 @ static inline int generic_atomic_fetch_#
 	unsigned long flags;						\
 	int ret;							\
 									\
-	raw_local_irq_save(flags);					\
+	flags = hard_local_irq_save();					\
 	ret = v->counter;						\
 	v->counter = v->counter c_op i;					\
-	raw_local_irq_restore(flags);					\
+	hard_local_irq_restore(flags);					\
 									\
 	return ret;							\
 }
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/asm-generic/cmpxchg.h linux-dovetail-v5.15.y-dovetail/include/asm-generic/cmpxchg.h
--- linux-5.15.26/include/asm-generic/cmpxchg.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/asm-generic/cmpxchg.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:33 @ unsigned long __generic_xchg(unsigned lo
 #ifdef __xchg_u8
 		return __xchg_u8(x, ptr);
 #else
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		ret = *(volatile u8 *)ptr;
 		*(volatile u8 *)ptr = x;
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		return ret;
 #endif /* __xchg_u8 */
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:44 @ unsigned long __generic_xchg(unsigned lo
 #ifdef __xchg_u16
 		return __xchg_u16(x, ptr);
 #else
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		ret = *(volatile u16 *)ptr;
 		*(volatile u16 *)ptr = x;
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		return ret;
 #endif /* __xchg_u16 */
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:55 @ unsigned long __generic_xchg(unsigned lo
 #ifdef __xchg_u32
 		return __xchg_u32(x, ptr);
 #else
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		ret = *(volatile u32 *)ptr;
 		*(volatile u32 *)ptr = x;
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		return ret;
 #endif /* __xchg_u32 */
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:67 @ unsigned long __generic_xchg(unsigned lo
 #ifdef __xchg_u64
 		return __xchg_u64(x, ptr);
 #else
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		ret = *(volatile u64 *)ptr;
 		*(volatile u64 *)ptr = x;
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		return ret;
 #endif /* __xchg_u64 */
 #endif /* CONFIG_64BIT */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/asm-generic/cmpxchg-local.h linux-dovetail-v5.15.y-dovetail/include/asm-generic/cmpxchg-local.h
--- linux-5.15.26/include/asm-generic/cmpxchg-local.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/asm-generic/cmpxchg-local.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:26 @ static inline unsigned long __generic_cm
 	if (size == 8 && sizeof(unsigned long) != 8)
 		wrong_size_cmpxchg(ptr);
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	switch (size) {
 	case 1: prev = *(u8 *)ptr;
 		if (prev == old)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:47 @ static inline unsigned long __generic_cm
 	default:
 		wrong_size_cmpxchg(ptr);
 	}
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 	return prev;
 }
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:60 @ static inline u64 __generic_cmpxchg64_lo
 	u64 prev;
 	unsigned long flags;
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	prev = *(u64 *)ptr;
 	if (prev == old)
 		*(u64 *)ptr = new;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 	return prev;
 }
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/asm-generic/irq_pipeline.h linux-dovetail-v5.15.y-dovetail/include/asm-generic/irq_pipeline.h
--- linux-5.15.26/include/asm-generic/irq_pipeline.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/asm-generic/irq_pipeline.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#ifndef __ASM_GENERIC_IRQ_PIPELINE_H
+#define __ASM_GENERIC_IRQ_PIPELINE_H
+
+#include <linux/kconfig.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+unsigned long inband_irq_save(void);
+void inband_irq_restore(unsigned long flags);
+void inband_irq_enable(void);
+void inband_irq_disable(void);
+int inband_irqs_disabled(void);
+
+#define hard_cond_local_irq_enable()		hard_local_irq_enable()
+#define hard_cond_local_irq_disable()		hard_local_irq_disable()
+#define hard_cond_local_irq_save()		hard_local_irq_save()
+#define hard_cond_local_irq_restore(__flags)	hard_local_irq_restore(__flags)
+
+#define hard_local_irq_save()			native_irq_save()
+#define hard_local_irq_restore(__flags)		native_irq_restore(__flags)
+#define hard_local_irq_enable()			native_irq_enable()
+#define hard_local_irq_disable()		native_irq_disable()
+#define hard_local_save_flags()			native_save_flags()
+
+#define hard_irqs_disabled()			native_irqs_disabled()
+#define hard_irqs_disabled_flags(__flags)	native_irqs_disabled_flags(__flags)
+
+void irq_pipeline_nmi_enter(void);
+void irq_pipeline_nmi_exit(void);
+
+/* Swap then merge virtual and hardware interrupt states. */
+#define irqs_merge_flags(__flags, __stalled)				\
+	({								\
+		unsigned long __combo =					\
+			arch_irqs_virtual_to_native_flags(__stalled) |	\
+			arch_irqs_native_to_virtual_flags(__flags);	\
+		__combo;						\
+	})
+
+/* Extract swap virtual and hardware interrupt states. */
+#define irqs_split_flags(__combo, __stall_r)				\
+	({								\
+		unsigned long __virt = (__combo);			\
+		*(__stall_r) = hard_irqs_disabled_flags(__combo);	\
+		__virt &= ~arch_irqs_virtual_to_native_flags(*(__stall_r)); \
+		arch_irqs_virtual_to_native_flags(__virt);		\
+	})
+
+#define hard_local_irq_sync()			native_irq_sync()
+
+#else /* !CONFIG_IRQ_PIPELINE */
+
+#define hard_local_save_flags()			({ unsigned long __flags; \
+						raw_local_save_flags(__flags); __flags; })
+#define hard_local_irq_enable()			raw_local_irq_enable()
+#define hard_local_irq_disable()		raw_local_irq_disable()
+#define hard_local_irq_save()			({ unsigned long __flags; \
+						raw_local_irq_save(__flags); __flags; })
+#define hard_local_irq_restore(__flags)		raw_local_irq_restore(__flags)
+
+#define hard_cond_local_irq_enable()		do { } while(0)
+#define hard_cond_local_irq_disable()		do { } while(0)
+#define hard_cond_local_irq_save()		0
+#define hard_cond_local_irq_restore(__flags)	do { (void)(__flags); } while(0)
+
+#define hard_irqs_disabled()			irqs_disabled()
+#define hard_irqs_disabled_flags(__flags)	raw_irqs_disabled_flags(__flags)
+
+static inline void irq_pipeline_nmi_enter(void) { }
+static inline void irq_pipeline_nmi_exit(void) { }
+
+#define hard_local_irq_sync()			do { } while (0)
+
+#endif /* !CONFIG_IRQ_PIPELINE */
+
+#ifdef CONFIG_DEBUG_IRQ_PIPELINE
+void check_inband_stage(void);
+#define check_hard_irqs_disabled()		\
+	WARN_ON_ONCE(!hard_irqs_disabled())
+#else
+static inline void check_inband_stage(void) { }
+static inline int check_hard_irqs_disabled(void) { return 0; }
+#endif
+
+extern bool irq_pipeline_oopsing;
+
+static __always_inline bool irqs_pipelined(void)
+{
+	return IS_ENABLED(CONFIG_IRQ_PIPELINE);
+}
+
+static __always_inline bool irq_pipeline_debug(void)
+{
+	return IS_ENABLED(CONFIG_DEBUG_IRQ_PIPELINE) &&
+		!irq_pipeline_oopsing;
+}
+
+static __always_inline bool irq_pipeline_debug_locking(void)
+{
+	return IS_ENABLED(CONFIG_DEBUG_HARD_LOCKS);
+}
+
+#endif /* __ASM_GENERIC_IRQ_PIPELINE_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/asm-generic/percpu.h linux-dovetail-v5.15.y-dovetail/include/asm-generic/percpu.h
--- linux-5.15.26/include/asm-generic/percpu.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/asm-generic/percpu.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:128 @ do {									\
 ({									\
 	typeof(pcp) ___ret;						\
 	unsigned long ___flags;						\
-	raw_local_irq_save(___flags);					\
+	___flags = hard_local_irq_save();				\
 	___ret = raw_cpu_generic_read(pcp);				\
-	raw_local_irq_restore(___flags);				\
+	hard_local_irq_restore(___flags);				\
 	___ret;								\
 })
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:147 @ do {									\
 #define this_cpu_generic_to_op(pcp, val, op)				\
 do {									\
 	unsigned long __flags;						\
-	raw_local_irq_save(__flags);					\
+	__flags = hard_local_irq_save();				\
 	raw_cpu_generic_to_op(pcp, val, op);				\
-	raw_local_irq_restore(__flags);					\
+	hard_local_irq_restore(__flags);				\
 } while (0)
 
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:157 @ do {									\
 ({									\
 	typeof(pcp) __ret;						\
 	unsigned long __flags;						\
-	raw_local_irq_save(__flags);					\
+	__flags = hard_local_irq_save();				\
 	__ret = raw_cpu_generic_add_return(pcp, val);			\
-	raw_local_irq_restore(__flags);					\
+	hard_local_irq_restore(__flags);				\
 	__ret;								\
 })
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:167 @ do {									\
 ({									\
 	typeof(pcp) __ret;						\
 	unsigned long __flags;						\
-	raw_local_irq_save(__flags);					\
+	__flags = hard_local_irq_save();				\
 	__ret = raw_cpu_generic_xchg(pcp, nval);			\
-	raw_local_irq_restore(__flags);					\
+	hard_local_irq_restore(__flags);				\
 	__ret;								\
 })
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:177 @ do {									\
 ({									\
 	typeof(pcp) __ret;						\
 	unsigned long __flags;						\
-	raw_local_irq_save(__flags);					\
+	__flags = hard_local_irq_save();				\
 	__ret = raw_cpu_generic_cmpxchg(pcp, oval, nval);		\
-	raw_local_irq_restore(__flags);					\
+	hard_local_irq_restore(__flags);				\
 	__ret;								\
 })
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:187 @ do {									\
 ({									\
 	int __ret;							\
 	unsigned long __flags;						\
-	raw_local_irq_save(__flags);					\
+	__flags = hard_local_irq_save();				\
 	__ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2,		\
 			oval1, oval2, nval1, nval2);			\
-	raw_local_irq_restore(__flags);					\
+	hard_local_irq_restore(__flags);				\
 	__ret;								\
 })
 
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/dovetail/irq.h linux-dovetail-v5.15.y-dovetail/include/dovetail/irq.h
--- linux-5.15.26/include/dovetail/irq.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/dovetail/irq.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DOVETAIL_IRQ_H
+#define _DOVETAIL_IRQ_H
+
+/* Placeholders for pre- and post-IRQ handling. */
+
+static inline void irq_enter_pipeline(void) { }
+
+static inline void irq_exit_pipeline(void) { }
+
+#endif /* !_DOVETAIL_IRQ_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/dovetail/mm_info.h linux-dovetail-v5.15.y-dovetail/include/dovetail/mm_info.h
--- linux-5.15.26/include/dovetail/mm_info.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/dovetail/mm_info.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DOVETAIL_MM_INFO_H
+#define _DOVETAIL_MM_INFO_H
+
+/*
+ * Placeholder for per-mm state information defined by the co-kernel.
+ */
+
+struct oob_mm_state {
+};
+
+#endif /* !_DOVETAIL_MM_INFO_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/dovetail/netdevice.h linux-dovetail-v5.15.y-dovetail/include/dovetail/netdevice.h
--- linux-5.15.26/include/dovetail/netdevice.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/dovetail/netdevice.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DOVETAIL_NETDEVICE_H
+#define _DOVETAIL_NETDEVICE_H
+
+/*
+ * Placeholder for per-device state information defined by the
+ * out-of-band network stack.
+ */
+
+struct oob_netdev_state {
+};
+
+#endif /* !_DOVETAIL_NETDEVICE_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/dovetail/poll.h linux-dovetail-v5.15.y-dovetail/include/dovetail/poll.h
--- linux-5.15.26/include/dovetail/poll.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/dovetail/poll.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DOVETAIL_POLL_H
+#define _DOVETAIL_POLL_H
+
+/*
+ * Placeholder for the out-of-band poll operation descriptor.
+ */
+
+struct oob_poll_wait {
+};
+
+#endif /* !_DOVETAIL_POLL_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/dovetail/spinlock.h linux-dovetail-v5.15.y-dovetail/include/dovetail/spinlock.h
--- linux-5.15.26/include/dovetail/spinlock.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/dovetail/spinlock.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DOVETAIL_SPINLOCK_H
+#define _DOVETAIL_SPINLOCK_H
+
+/* Placeholders for hard/hybrid spinlock modifiers. */
+
+struct raw_spinlock;
+
+static inline void hard_spin_lock_prepare(struct raw_spinlock *lock)
+{ }
+
+static inline void hard_spin_unlock_finish(struct raw_spinlock *lock)
+{ }
+
+static inline void hard_spin_trylock_prepare(struct raw_spinlock *lock)
+{ }
+
+static inline void hard_spin_trylock_fail(struct raw_spinlock *lock)
+{ }
+
+#endif /* !_DOVETAIL_SPINLOCK_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/dovetail/thread_info.h linux-dovetail-v5.15.y-dovetail/include/dovetail/thread_info.h
--- linux-5.15.26/include/dovetail/thread_info.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/dovetail/thread_info.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DOVETAIL_THREAD_INFO_H
+#define _DOVETAIL_THREAD_INFO_H
+
+/*
+ * Placeholder for per-thread state information defined by the
+ * co-kernel.
+ */
+
+struct oob_thread_state {
+};
+
+#endif /* !_DOVETAIL_THREAD_INFO_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/dt-bindings/power/meson-gxbb-power.h linux-dovetail-v5.15.y-dovetail/include/dt-bindings/power/meson-gxbb-power.h
--- linux-5.15.26/include/dt-bindings/power/meson-gxbb-power.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/dt-bindings/power/meson-gxbb-power.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:11 @
 #define _DT_BINDINGS_MESON_GXBB_POWER_H
 
 #define PWRC_GXBB_VPU_ID		0
-#define PWRC_GXBB_ETHERNET_MEM_ID	1
+#defin% PWRC_G8BB_ETHE2NET_MEM?ID	1
 
-#endif
+#%ndif
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/linux/clockchips.h linux-dovetail-v5.15.y-dovetail/include/linux/clockchips.h
--- linux-5.15.26/include/linux/clockchips.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/linux/clockchips.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:18 @
 # include <linux/cpumask.h>
 # include <linux/ktime.h>
 # include <linux/notifier.h>
+# include <linux/irqstage.h>
 
 struct clock_event_device;
 struct module;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:35 @ struct module;
  *		from DETACHED or SHUTDOWN.
  * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily
  *		    stopped.
+ * RESERVED:	Device is controlled by an out-of-band core via a proxy.
  */
 enum clock_event_state {
 	CLOCK_EVT_STATE_DETACHED,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:43 @ enum clock_event_state {
 	CLOCK_EVT_STATE_PERIODIC,
 	CLOCK_EVT_STATE_ONESHOT,
 	CLOCK_EVT_STATE_ONESHOT_STOPPED,
+	CLOCK_EVT_STATE_RESERVED,
 };
 
 /*
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:73 @ enum clock_event_state {
  */
 # define CLOCK_EVT_FEAT_HRTIMER		0x000080
 
+/*
+ * Interrupt pipeline support:
+ *
+ * - Clockevent device can work with pipelined timer events (i.e. proxied).
+ * - Device currently delivers high-precision events via out-of-band interrupts.
+ * - Device acts as a proxy for timer interrupt pipelining.
+ */
+# define CLOCK_EVT_FEAT_PIPELINE	0x000100
+# define CLOCK_EVT_FEAT_OOB		0x000200
+# define CLOCK_EVT_FEAT_PROXY		0x000400
+
 /**
  * struct clock_event_device - clock event device descriptor
  * @event_handler:	Assigned by the framework to be called by the low
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:108 @ enum clock_event_state {
  * @max_delta_ticks:	maximum delta value in ticks stored for reconfiguration
  * @name:		ptr to clock event name
  * @rating:		variable to rate clock event devices
- * @irq:		IRQ number (only for non CPU local devices)
+ * @irq:		IRQ number (only for non CPU local devices, or pipelined timers)
  * @bound_on:		Bound on CPU
  * @cpumask:		cpumask to indicate for which CPUs this device works
  * @list:		list head for the management code
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:154 @ static inline bool clockevent_state_deta
 	return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED;
 }
 
+static inline bool clockevent_state_reserved(struct clock_event_device *dev)
+{
+	return dev->state_use_accessors == CLOCK_EVT_STATE_RESERVED;
+}
+
 static inline bool clockevent_state_shutdown(struct clock_event_device *dev)
 {
 	return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:179 @ static inline bool clockevent_state_ones
 	return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED;
 }
 
+static inline bool clockevent_is_oob(struct clock_event_device *dev)
+{
+	return !!(dev->features & CLOCK_EVT_FEAT_OOB);
+}
+
 /*
  * Calculate a multiplication factor for scaled math, which is used to convert
  * nanoseconds based values to clock ticks:
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:213 @ extern int clockevents_unbind_device(str
 extern void clockevents_config_and_register(struct clock_event_device *dev,
 					    u32 freq, unsigned long min_delta,
 					    unsigned long max_delta);
+extern void clockevents_switch_state(struct clock_event_device *dev,
+				     enum clock_event_state state);
 
 extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq);
 
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:244 @ static inline int tick_check_broadcast_e
 static inline void tick_setup_hrtimer_broadcast(void) { }
 # endif
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+struct clock_proxy_device {
+	struct clock_event_device proxy_device;
+	struct clock_event_device *real_device;
+	void (*handle_oob_event)(struct clock_event_device *dev);
+	void (*__setup_handler)(struct clock_proxy_device *dev);
+	void (*__original_handler)(struct clock_event_device *dev);
+};
+
+void tick_notify_proxy(void);
+
+static inline
+void clockevents_handle_event(struct clock_event_device *ced)
+{
+	/*
+	 * If called from the in-band stage, or for delivering a
+	 * high-precision timer event to the out-of-band stage, call
+	 * the event handler immediately.
+	 *
+	 * Otherwise, ced is still the in-band tick device for the
+	 * current CPU, so just relay the incoming tick to the in-band
+	 * stage via tick_notify_proxy().  This situation can happen
+	 * when all CPUs receive the same out-of-band IRQ from a given
+	 * clock event device, but only a subset of the online CPUs has
+	 * enabled a proxy.
+	 */
+	if (clockevent_is_oob(ced) || running_inband())
+		ced->event_handler(ced);
+	else
+		tick_notify_proxy();
+}
+
+#else
+
+static inline
+void clockevents_handle_event(struct clock_event_device *ced)
+{
+	ced->event_handler(ced);
+}
+
+#endif	/* !CONFIG_IRQ_PIPELINE */
+
 #else /* !CONFIG_GENERIC_CLOCKEVENTS: */
 
 static inline void clockevents_suspend(void) { }
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/linux/clocksource.h linux-dovetail-v5.15.y-dovetail/include/linux/clocksource.h
--- linux-5.15.26/include/linux/clocksource.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/linux/clocksource.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:16 @
 #include <linux/timex.h>
 #include <linux/time.h>
 #include <linux/list.h>
+#include <linux/hashtable.h>
 #include <linux/cache.h>
 #include <linux/timer.h>
+#include <linux/cdev.h>
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/clocksource_ids.h>
 #include <asm/div64.h>
 #include <asm/io.h>
+#include <uapi/linux/clocksource.h>
 
 struct clocksource;
 struct module;
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:35 @ struct module;
 #include <asm/clocksource.h>
 #endif
 
+
 #include <vdso/clocksource.h>
 
+enum clocksource_vdso_type {
+	CLOCKSOURCE_VDSO_NONE = 0,
+	CLOCKSOURCE_VDSO_ARCHITECTED,
+	CLOCKSOURCE_VDSO_MMIO,	/* <= Must be last. */
+};
+
 /**
  * struct clocksource - hardware abstraction for a free running counter
  *	Provides mostly state-free accessors to the underlying hardware.
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:123 @ struct clocksource {
 	int			rating;
 	enum clocksource_ids	id;
 	enum vdso_clock_mode	vdso_clock_mode;
+  	enum clocksource_vdso_type vdso_type;
 	unsigned long		flags;
 
 	int			(*enable)(struct clocksource *cs);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:143 @ struct clocksource {
 	struct module		*owner;
 };
 
+struct clocksource_mmio {
+	void __iomem *reg;
+	struct clocksource clksrc;
+};
+
+struct clocksource_user_mmio {
+	struct clocksource_mmio mmio;
+	void __iomem *reg_upper;
+	unsigned int bits_lower;
+	unsigned int mask_lower;
+	unsigned int mask_upper;
+	enum clksrc_user_mmio_type type;
+	unsigned long phys_lower;
+	unsigned long phys_upper;
+	unsigned int id;
+	struct device *dev;
+	struct cdev cdev;
+	DECLARE_HASHTABLE(mappings, 10);
+	struct spinlock lock;
+	struct list_head link;
+};
+
+struct clocksource_mmio_regs {
+	void __iomem *reg_upper;
+	void __iomem *reg_lower;
+	unsigned int bits_upper;
+	unsigned int bits_lower;
+	unsigned long (*revmap)(void *);
+};
+
 /*
  * Clock source flags bits::
  */
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:317 @ extern u64 clocksource_mmio_readl_up(str
 extern u64 clocksource_mmio_readl_down(struct clocksource *);
 extern u64 clocksource_mmio_readw_up(struct clocksource *);
 extern u64 clocksource_mmio_readw_down(struct clocksource *);
+extern u64 clocksource_dual_mmio_readw_up(struct clocksource *);
+extern u64 clocksource_dual_mmio_readl_up(struct clocksource *);
 
 extern int clocksource_mmio_init(void __iomem *, const char *,
 	unsigned long, int, unsigned, u64 (*)(struct clocksource *));
 
+extern int clocksource_user_mmio_init(struct clocksource_user_mmio *ucs,
+				      const struct clocksource_mmio_regs *regs,
+				      unsigned long hz);
+
+extern int clocksource_user_single_mmio_init(
+	void __iomem *base, const char *name,
+	unsigned long hz, int rating, unsigned int bits,
+	u64 (*read)(struct clocksource *));
+
 extern int clocksource_i8253_init(void);
 
 #define TIMER_OF_DECLARE(name, compat, fn) \
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/linux/console.h linux-dovetail-v5.15.y-dovetail/include/linux/console.h
--- linux-5.15.26/include/linux/console.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/linux/console.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:143 @ static inline int con_debug_leave(void)
 struct console {
 	char	name[16];
 	void	(*write)(struct console *, const char *, unsigned);
+	void	(*write_raw)(struct console *, const char *, unsigned);
 	int	(*read)(struct console *, char *, unsigned);
 	struct tty_driver *(*device)(struct console *, int *);
 	void	(*unblank)(void);
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/linux/context_tracking_state.h linux-dovetail-v5.15.y-dovetail/include/linux/context_tracking_state.h
--- linux-5.15.26/include/linux/context_tracking_state.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/linux/context_tracking_state.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:31 @ DECLARE_PER_CPU(struct context_tracking,
 
 static __always_inline bool context_tracking_enabled(void)
 {
-	return static_branch_unlikely(&context_tracking_key);
+	return static_branch_unlikely(&context_tracking_key) && running_inband();
 }
 
 static __always_inline bool context_tracking_enabled_cpu(int cpu)
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/linux/dmaengine.h linux-dovetail-v5.15.y-dovetail/include/linux/dmaengine.h
--- linux-5.15.26/include/linux/dmaengine.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/linux/dmaengine.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:64 @ enum dma_transaction_type {
 	DMA_ASYNC_TX,
 	DMA_SLAVE,
 	DMA_CYCLIC,
+	DMA_OOB,
 	DMA_INTERLEAVE,
 	DMA_COMPLETION_NO_ORDER,
 	DMA_REPEAT,
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:194 @ struct dma_interleaved_template {
  *  transaction is marked with DMA_PREP_REPEAT will cause the new transaction
  *  to never be processed and stay in the issued queue forever. The flag is
  *  ignored if the previous transaction is not a repeated transaction.
+ * @DMA_OOB_INTERRUPT - if DMA_OOB is supported, handle the completion
+ *  interrupt for this transaction from the out-of-band stage (implies
+ *  DMA_PREP_INTERRUPT). This includes calling the completion callback routine
+ *  from such context if defined for the transaction.
+ * @DMA_OOB_PULSE - if DMA_OOB is supported, (slave) transactions on the
+ *  out-of-band channel should be triggered manually by a call to
+ *  dma_pulse_oob() (implies DMA_OOB_INTERRUPT).
  */
 enum dma_ctrl_flags {
 	DMA_PREP_INTERRUPT = (1 << 0),
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:213 @ enum dma_ctrl_flags {
 	DMA_PREP_CMD = (1 << 7),
 	DMA_PREP_REPEAT = (1 << 8),
 	DMA_PREP_LOAD_EOT = (1 << 9),
+	DMA_OOB_INTERRUPT = (1 << 10),
+	DMA_OOB_PULSE = (1 << 11),
 };
 
 /**
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:955 @ struct dma_device {
 					    dma_cookie_t cookie,
 					    struct dma_tx_state *txstate);
 	void (*device_issue_pending)(struct dma_chan *chan);
+	int (*device_pulse_oob)(struct dma_chan *chan);
 	void (*device_release)(struct dma_device *dev);
 	/* debugfs support */
 	void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:992 @ static inline struct dma_async_tx_descri
 						  dir, flags, NULL);
 }
 
+static inline bool dmaengine_oob_valid(struct dma_chan *chan,
+				unsigned long flags)
+{
+	return !(dovetailing() &&
+		flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE) &&
+		!test_bit(DMA_OOB, chan->device->cap_mask.bits));
+}
+
 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
 	struct dma_chan *chan, struct scatterlist *sgl,	unsigned int sg_len,
 	enum dma_transfer_direction dir, unsigned long flags)
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1007 @ static inline struct dma_async_tx_descri
 	if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
 		return NULL;
 
+	if (!dmaengine_oob_valid(chan, flags))
+		return NULL;
+
 	return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
 						  dir, flags, NULL);
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1037 @ static inline struct dma_async_tx_descri
 	if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
 		return NULL;
 
+	if (!dmaengine_oob_valid(chan, flags))
+		return NULL;
+
 	return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
 						period_len, dir, flags);
 }
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:1445 @ static inline void dma_async_issue_pendi
 }
 
 /**
+ * dma_pulse_oob - manual trigger of an out-of-band transaction
+ * @chan: target DMA channel
+ *
+ * Trigger the next out-of-band transaction immediately.
+ */
+static inline int dma_pulse_oob(struct dma_chan *chan)
+{
+	int ret = -ENOTSUPP;
+
+	if (chan->device->device_pulse_oob)
+		ret = chan->device->device_pulse_oob(chan);
+
+	return ret;
+}
+
+/**
  * dma_async_is_tx_complete - poll for transaction completion
  * @chan: DMA channel
  * @cookie: transaction identifier to check status of
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/linux/dovetail.h linux-dovetail-v5.15.y-dovetail/include/linux/dovetail.h
--- linux-5.15.26/include/linux/dovetail.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/linux/dovetail.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:4 @
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#ifndef _LINUX_DOVETAIL_H
+#define _LINUX_DOVETAIL_H
+
+#ifdef CONFIG_DOVETAIL
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/thread_info.h>
+#include <linux/irqstage.h>
+#include <uapi/asm-generic/dovetail.h>
+#include <asm/dovetail.h>
+
+struct pt_regs;
+struct task_struct;
+struct file;
+struct files_struct;
+
+enum inband_event_type {
+	INBAND_TASK_SIGNAL,
+	INBAND_TASK_MIGRATION,
+	INBAND_TASK_EXIT,
+	INBAND_TASK_RETUSER,
+	INBAND_TASK_PTSTEP,
+	INBAND_TASK_PTSTOP,
+	INBAND_TASK_PTCONT,
+	INBAND_PROCESS_CLEANUP,
+};
+
+struct dovetail_migration_data {
+	struct task_struct *task;
+	int dest_cpu;
+};
+
+struct dovetail_altsched_context {
+	struct task_struct *task;
+	struct mm_struct *active_mm;
+	bool borrowed_mm;
+};
+
+#define protect_inband_mm(__flags)			\
+	do {						\
+		(__flags) = hard_cond_local_irq_save();	\
+		barrier();				\
+	} while (0)					\
+
+#define unprotect_inband_mm(__flags)			\
+	do {						\
+		barrier();				\
+		hard_cond_local_irq_restore(__flags);	\
+	} while (0)					\
+
+void inband_task_init(struct task_struct *p);
+
+int pipeline_syscall(unsigned int nr, struct pt_regs *regs);
+
+void __oob_trap_notify(unsigned int exception,
+		       struct pt_regs *regs);
+
+static __always_inline void oob_trap_notify(unsigned int exception,
+					struct pt_regs *regs)
+{
+	if (running_oob() && !test_thread_local_flags(_TLF_OOBTRAP))
+		__oob_trap_notify(exception, regs);
+}
+
+void __oob_trap_unwind(unsigned int exception,
+		struct pt_regs *regs);
+
+static __always_inline void oob_trap_unwind(unsigned int exception,
+					struct pt_regs *regs)
+{
+	if (test_thread_local_flags(_TLF_OOBTRAP))
+		__oob_trap_unwind(exception, regs);
+}
+
+void inband_event_notify(enum inband_event_type,
+			 void *data);
+
+void inband_clock_was_set(void);
+
+static inline void inband_signal_notify(struct task_struct *p)
+{
+	if (test_ti_local_flags(task_thread_info(p), _TLF_DOVETAIL))
+		inband_event_notify(INBAND_TASK_SIGNAL, p);
+}
+
+static inline void inband_migration_notify(struct task_struct *p, int cpu)
+{
+	if (test_ti_local_flags(task_thread_info(p), _TLF_DOVETAIL)) {
+		struct dovetail_migration_data d = {
+			.task = p,
+			.dest_cpu = cpu,
+		};
+		inband_event_notify(INBAND_TASK_MIGRATION, &d);
+	}
+}
+
+static inline void inband_exit_notify(void)
+{
+	inband_event_notify(INBAND_TASK_EXIT, NULL);
+}
+
+static inline void inband_cleanup_notify(struct mm_struct *mm)
+{
+	/*
+	 * Notify regardless of _TLF_DOVETAIL: current may have
+	 * resources to clean up although it might not be interested
+	 * in other kernel events.
+	 */
+	inband_event_notify(INBAND_PROCESS_CLEANUP, mm);
+}
+
+static inline void inband_ptstop_notify(void)
+{
+	if (test_thread_local_flags(_TLF_DOVETAIL))
+		inband_event_notify(INBAND_TASK_PTSTOP, current);
+}
+
+static inline void inband_ptcont_notify(void)
+{
+	if (test_thread_local_flags(_TLF_DOVETAIL))
+		inband_event_notify(INBAND_TASK_PTCONT, current);
+}
+
+static inline void inband_ptstep_notify(struct task_struct *tracee)
+{
+	if (test_ti_local_flags(task_thread_info(tracee), _TLF_DOVETAIL))
+		inband_event_notify(INBAND_TASK_PTSTEP, tracee);
+}
+
+static inline
+void prepare_inband_switch(struct task_struct *next)
+{
+	struct task_struct *prev = current;
+
+	if (test_ti_local_flags(task_thread_info(next), _TLF_DOVETAIL))
+		__this_cpu_write(irq_pipeline.rqlock_owner, prev);
+}
+
+void inband_retuser_notify(void);
+
+bool inband_switch_tail(void);
+
+void oob_trampoline(void);
+
+void arch_inband_task_init(struct task_struct *p);
+
+int dovetail_start(void);
+
+void dovetail_stop(void);
+
+void dovetail_init_altsched(struct dovetail_altsched_context *p);
+
+void dovetail_start_altsched(void);
+
+void dovetail_stop_altsched(void);
+
+__must_check int dovetail_leave_inband(void);
+
+static inline void dovetail_leave_oob(void)
+{
+	clear_thread_local_flags(_TLF_OOB|_TLF_OFFSTAGE);
+	clear_thread_flag(TIF_MAYDAY);
+}
+
+void dovetail_resume_inband(void);
+
+bool dovetail_context_switch(struct dovetail_altsched_context *out,
+			struct dovetail_altsched_context *in,
+			bool leave_inband);
+
+static inline
+struct oob_thread_state *dovetail_current_state(void)
+{
+	return &current_thread_info()->oob_state;
+}
+
+static inline
+struct oob_thread_state *dovetail_task_state(struct task_struct *p)
+{
+	return &task_thread_info(p)->oob_state;
+}
+
+static inline
+struct oob_mm_state *dovetail_mm_state(void)
+{
+	if (current->flags & PF_KTHREAD)
+		return NULL;
+
+	return &current->mm->oob_state;
+}
+
+void dovetail_call_mayday(struct pt_regs *regs);
+
+static inline void dovetail_send_mayday(struct task_struct *castaway)
+{
+	struct thread_info *ti = task_thread_info(castaway);
+
+	if (test_ti_local_flags(ti, _TLF_DOVETAIL))
+		set_ti_thread_flag(ti, TIF_MAYDAY);
+}
+
+static inline void dovetail_request_ucall(struct task_struct *task)
+{
+	struct thread_info *ti = task_thread_info(task);
+
+	if (test_ti_local_flags(ti, _TLF_DOVETAIL))
+		set_ti_thread_flag(ti, TIF_RETUSER);
+}
+
+static inline void dovetail_clear_ucall(void)
+{
+	if (test_thread_flag(TIF_RETUSER))
+		clear_thread_flag(TIF_RETUSER);
+}
+
+void install_inband_fd(unsigned int fd, struct file *file,
+		       struct files_struct *files);
+
+void uninstall_inband_fd(unsigned int fd, struct file *file,
+			 struct files_struct *files);
+
+void replace_inband_fd(unsigned int fd, struct file *file,
+		       struct files_struct *files);
+
+#else	/* !CONFIG_DOVETAIL */
+
+struct files_struct;
+
+#define protect_inband_mm(__flags)	\
+	do { (void)(__flags); } while (0)
+
+#define unprotect_inband_mm(__flags)	\
+	do { (void)(__flags); } while (0)
+
+static inline
+void inband_task_init(struct task_struct *p) { }
+
+static inline void arch_dovetail_exec_prepare(void)
+{ }
+
+/*
+ * Keep the trap helpers as macros, we might not be able to resolve
+ * trap numbers if CONFIG_DOVETAIL is off.
+ */
+#define oob_trap_notify(__exception, __regs)	do { } while (0)
+#define oob_trap_unwind(__exception, __regs)	do { } while (0)
+
+static inline
+int pipeline_syscall(unsigned int nr, struct pt_regs *regs)
+{
+	return 0;
+}
+
+static inline void inband_signal_notify(struct task_struct *p) { }
+
+static inline
+void inband_migration_notify(struct task_struct *p, int cpu) { }
+
+static inline void inband_exit_notify(void) { }
+
+static inline void inband_cleanup_notify(struct mm_struct *mm) { }
+
+static inline void inband_retuser_notify(void) { }
+
+static inline void inband_ptstop_notify(void) { }
+
+static inline void inband_ptcont_notify(void) { }
+
+static inline void inband_ptstep_notify(struct task_struct *tracee) { }
+
+static inline void oob_trampoline(void) { }
+
+static inline void prepare_inband_switch(struct task_struct *next) { }
+
+static inline bool inband_switch_tail(void)
+{
+	/* Matches converse disabling in prepare_task_switch(). */
+	hard_cond_local_irq_enable();
+	return false;
+}
+
+static inline void dovetail_request_ucall(struct task_struct *task) { }
+
+static inline void dovetail_clear_ucall(void) { }
+
+static inline void inband_clock_was_set(void) { }
+
+static inline
+void install_inband_fd(unsigned int fd, struct file *file,
+		       struct files_struct *files) { }
+
+static inline
+void uninstall_inband_fd(unsigned int fd, struct file *file,
+			 struct files_struct *files) { }
+
+static inline
+void replace_inband_fd(unsigned int fd, struct file *file,
+		       struct files_struct *files) { }
+
+#endif	/* !CONFIG_DOVETAIL */
+
+static __always_inline bool dovetailing(void)
+{
+	return IS_ENABLED(CONFIG_DOVETAIL);
+}
+
+static __always_inline bool dovetail_debug(void)
+{
+	return IS_ENABLED(CONFIG_DEBUG_DOVETAIL);
+}
+
+#endif /* _LINUX_DOVETAIL_H */
diff -uprN -X linux-5.15.26/Documentation/dontdiff linux-5.15.26/include/linux/dw_apb_timer.h linux-dovetail-v5.15.y-dovetail/include/linux/dw_apb_timer.h
--- linux-5.15.26/include/linux/dw_apb_timer.h	2022-03-02 11:48:10.000000000 +0100
+++ linux-dovetail-v5.15.y-dovetail/include/linux/dw_apb_timer.h	2022-03-10 09:47:50.000000000 +0100
@ linux-dovetail-v5.15.y-dovetail/arch/arm/common/mcpm_entry.c:33 @ struct dw_apb_clock_event_device {
 
 struct dw_apb_clocksource {
 	struct dw_apb_timer			timer;
-	struct clocksource			cs;
+	struct clocksource_user_mmio		ummio;
 };
 
 void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced);
diff -uprN -X linux-5.15.26/Documentat