From 1be3652868bff10f9bb9fa9ece3e67bcb3793018 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 7 Jul 2020 12:25:11 +0200
Subject: [PATCH 112/158] drm/i915: Don't disable interrupts and pretend a lock
 as been acquired in __timeline_mark_lock().

This is a revert of commits
   d67739268cf0e ("drm/i915/gt: Mark up the nested engine-pm timeline lock as irqsafe")
   6c69a45445af9 ("drm/i915/gt: Mark context->active_count as protected by timeline->mutex")

The existing code leads to a different behaviour depending on whether
lockdep is enabled or not. Any following lock that is acquired without
disabling interrupts (but needs to) will not be noticed by lockdep.

This it not just a lockdep annotation but is used but an actual mutex_t
that is properly used as a lock but in case of __timeline_mark_lock()
lockdep is only told that it is acquired but no lock has been acquired.

It appears that its purpose is just satisfy the lockdep_assert_held()
check in intel_context_mark_active(). The other problem with disabling
interrupts is that on PREEMPT_RT interrupts are also disabled which
leads to problems for instance later during memory allocation.

Add a CONTEXT_IS_PARKED bit to intel_engine_cs and set_bit/clear_bit it
instead of mutex_acquire/mutex_release. Use test_bit in the two
identified spots which relied on the lockdep annotation.

Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 drivers/gpu/drm/i915/gt/intel_context.h       |  3 +-
 drivers/gpu/drm/i915/gt/intel_context_types.h |  1 +
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     | 38 +------------------
 drivers/gpu/drm/i915/i915_request.h           |  3 +-
 4 files changed, 7 insertions(+), 38 deletions(-)

Index: linux-5.15.19-rt29/drivers/gpu/drm/i915/gt/intel_context.h
===================================================================
@ linux-5.15.19-rt29/drivers/gpu/drm/i915/gt/intel_context.h:166 @ static inline void intel_context_enter(s
 
 static inline void intel_context_mark_active(struct intel_context *ce)
 {
-	lockdep_assert_held(&ce->timeline->mutex);
+	lockdep_assert(lockdep_is_held(&ce->timeline->mutex) ||
+		       test_bit(CONTEXT_IS_PARKED, &ce->flags));
 	++ce->active_count;
 }
 
Index: linux-5.15.19-rt29/drivers/gpu/drm/i915/gt/intel_context_types.h
===================================================================
--- linux-5.15.19-rt29.orig/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ linux-5.15.19-rt29/drivers/gpu/drm/i915/gt/intel_context_types.h
@ linux-5.15.19-rt29/drivers/gpu/drm/i915/gt/intel_context.h:115 @ struct intel_context {
 #define CONTEXT_FORCE_SINGLE_SUBMISSION	7
 #define CONTEXT_NOPREEMPT		8
 #define CONTEXT_LRCA_DIRTY		9
+#define CONTEXT_IS_PARKED		10
 
 	struct {
 		u64 timeout_us;
Index: linux-5.15.19-rt29/drivers/gpu/drm/i915/gt/intel_engine_pm.c
===================================================================
--- linux-5.15.19-rt29.orig/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ linux-5.15.19-rt29/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@ linux-5.15.19-rt29/drivers/gpu/drm/i915/gt/intel_context.h:83 @ static int __engine_unpark(struct intel_
 	return 0;
 }
 
-#if IS_ENABLED(CONFIG_LOCKDEP)
-
-static unsigned long __timeline_mark_lock(struct intel_context *ce)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
-
-	return flags;
-}
-
-static void __timeline_mark_unlock(struct intel_context *ce,
-				   unsigned long flags)
-{
-	mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
-	local_irq_restore(flags);
-}
-
-#else
-
-static unsigned long __timeline_mark_lock(struct intel_context *ce)
-{
-	return 0;
-}
-
-static void __timeline_mark_unlock(struct intel_context *ce,
-				   unsigned long flags)
-{
-}
-
-#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
-
 static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
 	struct i915_request *rq = to_request(fence);
@ linux-5.15.19-rt29/drivers/gpu/drm/i915/gt/intel_context.h:129 @ static bool switch_to_kernel_context(str
 {
 	struct intel_context *ce = engine->kernel_context;
 	struct i915_request *rq;
-	unsigned long flags;
 	bool result = true;
 
 	/* GPU is pointing to the void, as good as in the kernel context. */
@ linux-5.15.19-rt29/drivers/gpu/drm/i915/gt/intel_context.h:170 @ static bool switch_to_kernel_context(str
 	 * engine->wakeref.count, we may see the request completion and retire
 	 * it causing an underflow of the engine->wakeref.
 	 */
-	flags = __timeline_mark_lock(ce);
+	set_bit(CONTEXT_IS_PARKED, &ce->flags);
 	GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
 
 	rq = __i915_request_create(ce, GFP_NOWAIT);
@ linux-5.15.19-rt29/drivers/gpu/drm/i915/gt/intel_context.h:202 @ static bool switch_to_kernel_context(str
 
 	result = false;
 out_unlock:
-	__timeline_mark_unlock(ce, flags);
+	clear_bit(CONTEXT_IS_PARKED, &ce->flags);
 	return result;
 }
 
Index: linux-5.15.19-rt29/drivers/gpu/drm/i915/i915_request.h
===================================================================
--- linux-5.15.19-rt29.orig/drivers/gpu/drm/i915/i915_request.h
+++ linux-5.15.19-rt29/drivers/gpu/drm/i915/i915_request.h
@ linux-5.15.19-rt29/drivers/gpu/drm/i915/gt/intel_context.h:612 @ i915_request_timeline(const struct i915_
 {
 	/* Valid only while the request is being constructed (or retired). */
 	return rcu_dereference_protected(rq->timeline,
-					 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
+					 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex) ||
+					 test_bit(CONTEXT_IS_PARKED, &rq->context->flags));
 }
 
 static inline struct i915_gem_context *