From 7ea42302c3f2794cc07828ee5657c3e93614e4b9 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 6 Apr 2010 16:51:31 +0200
Subject: [PATCH 091/158] md: raid5: Make raid5_percpu handling RT aware

__raid_run_ops() disables preemption with get_cpu() around the access
to the raid5_percpu variables. That causes scheduling while atomic
spews on RT.

Serialize the access to the percpu data with a lock and keep the code
preemptible.

Reported-by: Udo van den Heuvel <udovdh@xs4all.nl>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
---
 drivers/md/raid5.c | 7 +++++--
 drivers/md/raid5.h | 1 +
 2 files changed, 6 insertions(+), 2 deletions(-)

Index: linux-5.15.19-rt29/drivers/md/raid5.c
===================================================================
@ linux-5.15.19-rt29/drivers/md/raid5.c:2220 @ static void raid_run_ops(struct stripe_h
 	struct raid5_percpu *percpu;
 	unsigned long cpu;
 
-	cpu = get_cpu();
+	cpu = get_cpu_light();
 	percpu = per_cpu_ptr(conf->percpu, cpu);
+	spin_lock(&percpu->lock);
 	if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
 		ops_run_biofill(sh);
 		overlap_clear++;
@ linux-5.15.19-rt29/drivers/md/raid5.c:2281 @ static void raid_run_ops(struct stripe_h
 			if (test_and_clear_bit(R5_Overlap, &dev->flags))
 				wake_up(&sh->raid_conf->wait_for_overlap);
 		}
-	put_cpu();
+	spin_unlock(&percpu->lock);
+	put_cpu_light();
 }
 
 static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
@ linux-5.15.19-rt29/drivers/md/raid5.c:7107 @ static int raid456_cpu_up_prepare(unsign
 			__func__, cpu);
 		return -ENOMEM;
 	}
+	spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
 	return 0;
 }
 
Index: linux-5.15.19-rt29/drivers/md/raid5.h
===================================================================
--- linux-5.15.19-rt29.orig/drivers/md/raid5.h
+++ linux-5.15.19-rt29/drivers/md/raid5.h
@ linux-5.15.19-rt29/drivers/md/raid5.c:638 @ struct r5conf {
 	int			recovery_disabled;
 	/* per cpu variables */
 	struct raid5_percpu {
+		spinlock_t	lock;		/* Protection for -RT */
 		struct page	*spare_page; /* Used when checking P/Q in raid6 */
 		void		*scribble;  /* space for constructing buffer
 					     * lists and performing address