if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
waiter->fl_lmops->lm_notify(waiter);
else
- wake_up(&waiter->fl_wait);
+ locks_wake_up(waiter);
/*
* The setting of fl_blocker to NULL marks the "done"
static bool locks_conflict(struct file_lock *caller_fl,
struct file_lock *sys_fl)
{
- if (sys_fl->fl_type == F_WRLCK)
+ if (lock_is_write(sys_fl))
return true;
- if (caller_fl->fl_type == F_WRLCK)
+ if (lock_is_write(caller_fl))
return true;
return false;
}
struct file_lock *sys_fl)
{
/* F_UNLCK checks any locks on the same fd. */
- if (caller_fl->fl_type == F_UNLCK) {
+ if (lock_is_unlock(caller_fl)) {
if (!posix_same_owner(caller_fl, sys_fl))
return false;
return locks_overlap(caller_fl, sys_fl);
break;
}
- if (request->fl_type == F_UNLCK) {
+ if (lock_is_unlock(request)) {
if ((request->fl_flags & FL_EXISTS) && !found)
error = -ENOENT;
goto out;
ctx = locks_get_lock_context(inode, request->fl_type);
if (!ctx)
- return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
+ return lock_is_unlock(request) ? 0 : -ENOMEM;
/*
* We may need two file_lock structures for this operation,
continue;
if (fl->fl_start > request->fl_end)
break;
- if (request->fl_type == F_UNLCK)
+ if (lock_is_unlock(request))
added = true;
if (fl->fl_start < request->fl_start)
left = fl;
error = 0;
if (!added) {
- if (request->fl_type == F_UNLCK) {
+ if (lock_is_unlock(request)) {
if (request->fl_flags & FL_EXISTS)
error = -ENOENT;
goto out;
spin_lock(&ctx->flc_lock);
fl = list_first_entry_or_null(&ctx->flc_lease,
struct file_lock, fl_list);
- if (fl && (fl->fl_type == F_WRLCK))
+ if (fl && lock_is_write(fl))
has_lease = true;
spin_unlock(&ctx->flc_lock);
}
int fcntl_setlease(unsigned int fd, struct file *filp, int arg);
int fcntl_getlease(struct file *filp);
+static inline bool lock_is_unlock(struct file_lock *fl)
+{
+ return fl->fl_type == F_UNLCK;
+}
+
+static inline bool lock_is_read(struct file_lock *fl)
+{
+ return fl->fl_type == F_RDLCK;
+}
+
+static inline bool lock_is_write(struct file_lock *fl)
+{
+ return fl->fl_type == F_WRLCK;
+}
+
+static inline void locks_wake_up(struct file_lock *fl)
+{
+ wake_up(&fl->fl_wait);
+}
+
+/* for walking lists of file_locks linked by fl_list */
+#define for_each_file_lock(_fl, _head) list_for_each_entry(_fl, _head, fl_list)
+
/* fs/locks.c */
void locks_free_lock_context(struct inode *inode);
void locks_free_lock(struct file_lock *fl);
return F_UNLCK;
}
+static inline bool lock_is_unlock(struct file_lock *fl)
+{
+ return false;
+}
+
+static inline bool lock_is_read(struct file_lock *fl)
+{
+ return false;
+}
+
+static inline bool lock_is_write(struct file_lock *fl)
+{
+ return false;
+}
+
+static inline void locks_wake_up(struct file_lock *fl)
+{
+}
+
+#define for_each_file_lock(_fl, _head) while(false)
+
static inline void
locks_free_lock_context(struct inode *inode)
{