cleanup: Add conditional guard support
authorPeter Zijlstra <peterz@infradead.org>
Sun, 17 Sep 2023 11:22:17 +0000 (13:22 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 15 Nov 2023 09:15:33 +0000 (10:15 +0100)
Adds:

 - DEFINE_GUARD_COND() / DEFINE_LOCK_GUARD_1_COND() to extend existing
   guards with conditional lock primitives, eg. mutex_trylock(),
   mutex_lock_interruptible().

   nb. both primitives allow NULL 'locks', which cause the lock to
       fail (obviously).

 - extends scoped_guard() to not take the body when the the
   conditional guard 'fails'. eg.

     scoped_guard (mutex_intr, &task->signal_cred_guard_mutex) {
...
     }

   will only execute the body when the mutex is held.

 - provides scoped_cond_guard(name, fail, args...); which extends
   scoped_guard() to do fail when the lock-acquire fails.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20231102110706.460851167%40infradead.org
include/linux/cleanup.h
include/linux/mutex.h
include/linux/rwsem.h
include/linux/spinlock.h

index 9f1a9c455b6842a2c0fb6d4334b7f975d2a636b3..c2d09bc4f9768cc3321dbb4490216fd0a2699320 100644 (file)
@@ -125,25 +125,55 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
  *     trivial wrapper around DEFINE_CLASS() above specifically
  *     for locks.
  *
+ * DEFINE_GUARD_COND(name, ext, condlock)
+ *     wrapper around EXTEND_CLASS above to add conditional lock
+ *     variants to a base class, eg. mutex_trylock() or
+ *     mutex_lock_interruptible().
+ *
  * guard(name):
- *     an anonymous instance of the (guard) class
+ *     an anonymous instance of the (guard) class, not recommended for
+ *     conditional locks.
  *
  * scoped_guard (name, args...) { }:
  *     similar to CLASS(name, scope)(args), except the variable (with the
  *     explicit name 'scope') is declard in a for-loop such that its scope is
  *     bound to the next (compound) statement.
  *
+ *     for conditional locks the loop body is skipped when the lock is not
+ *     acquired.
+ *
+ * scoped_cond_guard (name, fail, args...) { }:
+ *      similar to scoped_guard(), except it does fail when the lock
+ *      acquire fails.
+ *
  */
 
 #define DEFINE_GUARD(_name, _type, _lock, _unlock) \
-       DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T)
+       DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
+       static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
+       { return *_T; }
+
+#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
+       EXTEND_CLASS(_name, _ext, \
+                    ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
+                    class_##_name##_t _T) \
+       static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+       { return class_##_name##_lock_ptr(_T); }
 
 #define guard(_name) \
        CLASS(_name, __UNIQUE_ID(guard))
 
+#define __guard_ptr(_name) class_##_name##_lock_ptr
+
 #define scoped_guard(_name, args...)                                   \
        for (CLASS(_name, scope)(args),                                 \
-            *done = NULL; !done; done = (void *)1)
+            *done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1)
+
+#define scoped_cond_guard(_name, _fail, args...) \
+       for (CLASS(_name, scope)(args), \
+            *done = NULL; !done; done = (void *)1) \
+               if (!__guard_ptr(_name)(&scope)) _fail; \
+               else
 
 /*
  * Additional helper macros for generating lock guards with types, either for
@@ -152,6 +182,7 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
  *
  * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
  * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
+ * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
  *
  * will result in the following type:
  *
@@ -173,6 +204,11 @@ typedef struct {                                                   \
 static inline void class_##_name##_destructor(class_##_name##_t *_T)   \
 {                                                                      \
        if (_T->lock) { _unlock; }                                      \
+}                                                                      \
+                                                                       \
+static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T)    \
+{                                                                      \
+       return _T->lock;                                                \
 }
 
 
@@ -201,4 +237,14 @@ __DEFINE_LOCK_GUARD_1(_name, _type, _lock)
 __DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__)               \
 __DEFINE_LOCK_GUARD_0(_name, _lock)
 
+#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock)               \
+       EXTEND_CLASS(_name, _ext,                                       \
+                    ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
+                       if (_T->lock && !(_condlock)) _T->lock = NULL;  \
+                       _t; }),                                         \
+                    typeof_member(class_##_name##_t, lock) l)          \
+       static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+       { return class_##_name##_lock_ptr(_T); }
+
+
 #endif /* __LINUX_GUARDS_H */
index a33aa9eb9fc3b0154b97096e4a5bcdea2a91fef0..95d11308f995d01c5972a2c21cf99d2bf9a23955 100644 (file)
@@ -221,6 +221,7 @@ extern void mutex_unlock(struct mutex *lock);
 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
 
 DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
-DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T))
+DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
+DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
 
 #endif /* __LINUX_MUTEX_H */
index 1dd530ce8b45b9a7aa3934272b229360d2eee3ef..9c29689ff505e09854c93eb449665a31145e3df2 100644 (file)
@@ -203,11 +203,11 @@ extern void up_read(struct rw_semaphore *sem);
 extern void up_write(struct rw_semaphore *sem);
 
 DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
-DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
-
-DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T))
-DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T))
+DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
+DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)
 
+DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
+DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
 
 /*
  * downgrade write lock to read lock
index 31d3d747a9db78b94d571db3886afff61dda5162..ceb56b39c70f775a736ad2918a2d0901af240f40 100644 (file)
@@ -507,6 +507,8 @@ DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
                    raw_spin_lock(_T->lock),
                    raw_spin_unlock(_T->lock))
 
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
+
 DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
                    raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
                    raw_spin_unlock(_T->lock))
@@ -515,23 +517,36 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
                    raw_spin_lock_irq(_T->lock),
                    raw_spin_unlock_irq(_T->lock))
 
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
+
 DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
                    raw_spin_lock_irqsave(_T->lock, _T->flags),
                    raw_spin_unlock_irqrestore(_T->lock, _T->flags),
                    unsigned long flags)
 
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
+                        raw_spin_trylock_irqsave(_T->lock, _T->flags))
+
 DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
                    spin_lock(_T->lock),
                    spin_unlock(_T->lock))
 
+DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
+
 DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
                    spin_lock_irq(_T->lock),
                    spin_unlock_irq(_T->lock))
 
+DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
+                        spin_trylock_irq(_T->lock))
+
 DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
                    spin_lock_irqsave(_T->lock, _T->flags),
                    spin_unlock_irqrestore(_T->lock, _T->flags),
                    unsigned long flags)
 
+DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
+                        spin_trylock_irqsave(_T->lock, _T->flags))
+
 #undef __LINUX_INSIDE_SPINLOCK_H
 #endif /* __LINUX_SPINLOCK_H */