sched.h: Move (spin|rwlock)_needbreak() to spinlock.h
authorKent Overstreet <kent.overstreet@linux.dev>
Mon, 11 Dec 2023 19:05:04 +0000 (14:05 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Thu, 21 Dec 2023 00:26:30 +0000 (19:26 -0500)
This lets us kill the dependency on spinlock.h.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
include/linux/sched.h
include/linux/spinlock.h

index 5a5b7b12268266724f3ed896a2b237af9ac36503..7501a3451a20170282924f1a6bf7ffea0d6e00e8 100644 (file)
@@ -2227,37 +2227,6 @@ static inline bool preempt_model_preemptible(void)
        return preempt_model_full() || preempt_model_rt();
 }
 
-/*
- * Does a critical section need to be broken due to another
- * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
- * but a general need for low latency)
- */
-static inline int spin_needbreak(spinlock_t *lock)
-{
-#ifdef CONFIG_PREEMPTION
-       return spin_is_contended(lock);
-#else
-       return 0;
-#endif
-}
-
-/*
- * Check if a rwlock is contended.
- * Returns non-zero if there is another task waiting on the rwlock.
- * Returns zero if the lock is not contended or the system / underlying
- * rwlock implementation does not support contention detection.
- * Technically does not depend on CONFIG_PREEMPTION, but a general need
- * for low latency.
- */
-static inline int rwlock_needbreak(rwlock_t *lock)
-{
-#ifdef CONFIG_PREEMPTION
-       return rwlock_is_contended(lock);
-#else
-       return 0;
-#endif
-}
-
 static __always_inline bool need_resched(void)
 {
        return unlikely(tif_need_resched());
index 31d3d747a9db78b94d571db3886afff61dda5162..0c71f06454d9e100fff6a79c047164e0f403ba19 100644 (file)
@@ -449,6 +449,37 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
        return raw_spin_is_contended(&lock->rlock);
 }
 
+/*
+ * Does a critical section need to be broken due to another
+ * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
+ * but a general need for low latency)
+ */
+static inline int spin_needbreak(spinlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+       return spin_is_contended(lock);
+#else
+       return 0;
+#endif
+}
+
+/*
+ * Check if a rwlock is contended.
+ * Returns non-zero if there is another task waiting on the rwlock.
+ * Returns zero if the lock is not contended or the system / underlying
+ * rwlock implementation does not support contention detection.
+ * Technically does not depend on CONFIG_PREEMPTION, but a general need
+ * for low latency.
+ */
+static inline int rwlock_needbreak(rwlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+       return rwlock_is_contended(lock);
+#else
+       return 0;
+#endif
+}
+
 #define assert_spin_locked(lock)       assert_raw_spin_locked(&(lock)->rlock)
 
 #else  /* !CONFIG_PREEMPT_RT */