rxrpc: Fix locking issue
authorDavid Howells <dhowells@redhat.com>
Sat, 21 May 2022 07:45:28 +0000 (08:45 +0100)
committerDavid S. Miller <davem@davemloft.net>
Sun, 22 May 2022 20:03:01 +0000 (21:03 +0100)
There's a locking issue with the per-netns list of calls in rxrpc.  The
pieces of code that add and remove a call from the list use write_lock()
and the calls procfile uses read_lock() to access it.  However, the timer
callback function may trigger a removal by trying to queue a call for
processing and finding that it's already queued - at which point it has a
spare refcount that it has to do something with.  Unfortunately, if it puts
the call and this reduces the refcount to 0, the call will be removed from
the list.  Unfortunately, since the _bh variants of the locking functions
aren't used, this can deadlock.

================================
WARNING: inconsistent lock state
5.18.0-rc3-build4+ #10 Not tainted
--------------------------------
inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
ksoftirqd/2/25 [HC0[0]:SC1[1]:HE1:SE0] takes:
ffff888107ac4038 (&rxnet->call_lock){+.?.}-{2:2}, at: rxrpc_put_call+0x103/0x14b
{SOFTIRQ-ON-W} state was registered at:
...
 Possible unsafe locking scenario:

       CPU0
       ----
  lock(&rxnet->call_lock);
  <Interrupt>
    lock(&rxnet->call_lock);

 *** DEADLOCK ***

1 lock held by ksoftirqd/2/25:
 #0: ffff8881008ffdb0 ((&call->timer)){+.-.}-{0:0}, at: call_timer_fn+0x5/0x23d

Changes
=======
ver #2)
 - Changed to using list_next_rcu() rather than rcu_dereference() directly.

Fixes: 17926a79320a ("[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both")
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
Signed-off-by: David S. Miller <davem@davemloft.net>
fs/seq_file.c
include/linux/list.h
include/linux/seq_file.h
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_object.c
net/rxrpc/net_ns.c
net/rxrpc/proc.c

index 7ab8a58c29b618b9ead091266dd5b4c55f58c0cb..9456a2032224a2d212e49bf7b45c05d1c76a52d4 100644 (file)
@@ -931,6 +931,38 @@ struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
 }
 EXPORT_SYMBOL(seq_list_next);
 
+struct list_head *seq_list_start_rcu(struct list_head *head, loff_t pos)
+{
+       struct list_head *lh;
+
+       list_for_each_rcu(lh, head)
+               if (pos-- == 0)
+                       return lh;
+
+       return NULL;
+}
+EXPORT_SYMBOL(seq_list_start_rcu);
+
+struct list_head *seq_list_start_head_rcu(struct list_head *head, loff_t pos)
+{
+       if (!pos)
+               return head;
+
+       return seq_list_start_rcu(head, pos - 1);
+}
+EXPORT_SYMBOL(seq_list_start_head_rcu);
+
+struct list_head *seq_list_next_rcu(void *v, struct list_head *head,
+                                   loff_t *ppos)
+{
+       struct list_head *lh;
+
+       lh = list_next_rcu((struct list_head *)v);
+       ++*ppos;
+       return lh == head ? NULL : lh;
+}
+EXPORT_SYMBOL(seq_list_next_rcu);
+
 /**
  * seq_hlist_start - start an iteration of a hlist
  * @head: the head of the hlist
index c147eeb2d39d7c07017dbdaf827ac5cac643ea3f..57e8b559cdf6bbacf4caf1de939e27e9e24de3e5 100644 (file)
@@ -605,6 +605,16 @@ static inline void list_splice_tail_init(struct list_head *list,
 #define list_for_each(pos, head) \
        for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
 
+/**
+ * list_for_each_rcu - Iterate over a list in an RCU-safe fashion
+ * @pos:       the &struct list_head to use as a loop cursor.
+ * @head:      the head for your list.
+ */
+#define list_for_each_rcu(pos, head)             \
+       for (pos = rcu_dereference((head)->next); \
+            !list_is_head(pos, (head)); \
+            pos = rcu_dereference(pos->next))
+
 /**
  * list_for_each_continue - continue iteration over a list
  * @pos:       the &struct list_head to use as a loop cursor.
index 60820ab511d224f3bb655b0a43459591eae6821e..bd023dd38ae6f626417d5b44b3c5f0fcd0409817 100644 (file)
@@ -277,6 +277,10 @@ extern struct list_head *seq_list_start_head(struct list_head *head,
 extern struct list_head *seq_list_next(void *v, struct list_head *head,
                loff_t *ppos);
 
+extern struct list_head *seq_list_start_rcu(struct list_head *head, loff_t pos);
+extern struct list_head *seq_list_start_head_rcu(struct list_head *head, loff_t pos);
+extern struct list_head *seq_list_next_rcu(void *v, struct list_head *head, loff_t *ppos);
+
 /*
  * Helpers for iteration over hlist_head-s in seq_files
  */
index 52a23d03d694e38eced14ec8e9ee3473a3c75316..dcc0ec0bf3de3c4b6f20bfb04b21ef5c4670e783 100644 (file)
@@ -60,7 +60,7 @@ struct rxrpc_net {
        struct proc_dir_entry   *proc_net;      /* Subdir in /proc/net */
        u32                     epoch;          /* Local epoch for detecting local-end reset */
        struct list_head        calls;          /* List of calls active in this namespace */
-       rwlock_t                call_lock;      /* Lock for ->calls */
+       spinlock_t              call_lock;      /* Lock for ->calls */
        atomic_t                nr_calls;       /* Count of allocated calls */
 
        atomic_t                nr_conns;
index 8ae59edc25510a4cf6087ad23a0a2edc666590e6..99e10eea3732177d101ba7e35b90e8be03f57129 100644 (file)
@@ -140,9 +140,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
        write_unlock(&rx->call_lock);
 
        rxnet = call->rxnet;
-       write_lock(&rxnet->call_lock);
-       list_add_tail(&call->link, &rxnet->calls);
-       write_unlock(&rxnet->call_lock);
+       spin_lock_bh(&rxnet->call_lock);
+       list_add_tail_rcu(&call->link, &rxnet->calls);
+       spin_unlock_bh(&rxnet->call_lock);
 
        b->call_backlog[call_head] = call;
        smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
index 8764a4f19c03484e3c8bc7ee6e470bf5c285f89f..84d0a41096450cbe8a7aa70441025f4d52c025a6 100644 (file)
@@ -337,9 +337,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
        write_unlock(&rx->call_lock);
 
        rxnet = call->rxnet;
-       write_lock(&rxnet->call_lock);
-       list_add_tail(&call->link, &rxnet->calls);
-       write_unlock(&rxnet->call_lock);
+       spin_lock_bh(&rxnet->call_lock);
+       list_add_tail_rcu(&call->link, &rxnet->calls);
+       spin_unlock_bh(&rxnet->call_lock);
 
        /* From this point on, the call is protected by its own lock. */
        release_sock(&rx->sk);
@@ -633,9 +633,9 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
                ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
 
                if (!list_empty(&call->link)) {
-                       write_lock(&rxnet->call_lock);
+                       spin_lock_bh(&rxnet->call_lock);
                        list_del_init(&call->link);
-                       write_unlock(&rxnet->call_lock);
+                       spin_unlock_bh(&rxnet->call_lock);
                }
 
                rxrpc_cleanup_call(call);
@@ -707,7 +707,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
        _enter("");
 
        if (!list_empty(&rxnet->calls)) {
-               write_lock(&rxnet->call_lock);
+               spin_lock_bh(&rxnet->call_lock);
 
                while (!list_empty(&rxnet->calls)) {
                        call = list_entry(rxnet->calls.next,
@@ -722,12 +722,12 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
                               rxrpc_call_states[call->state],
                               call->flags, call->events);
 
-                       write_unlock(&rxnet->call_lock);
+                       spin_unlock_bh(&rxnet->call_lock);
                        cond_resched();
-                       write_lock(&rxnet->call_lock);
+                       spin_lock_bh(&rxnet->call_lock);
                }
 
-               write_unlock(&rxnet->call_lock);
+               spin_unlock_bh(&rxnet->call_lock);
        }
 
        atomic_dec(&rxnet->nr_calls);
index 34f389975a7d004b3b58345c26d1988c89599834..bb4c25d6df64c68db0a6d16457d75263928980c0 100644 (file)
@@ -50,7 +50,7 @@ static __net_init int rxrpc_init_net(struct net *net)
        rxnet->epoch |= RXRPC_RANDOM_EPOCH;
 
        INIT_LIST_HEAD(&rxnet->calls);
-       rwlock_init(&rxnet->call_lock);
+       spin_lock_init(&rxnet->call_lock);
        atomic_set(&rxnet->nr_calls, 1);
 
        atomic_set(&rxnet->nr_conns, 1);
index 8967201fd8e54eb35fb6d0d46f287d5393fd4841..245418943e01ccdeb861ae03f2796c296ad90080 100644 (file)
@@ -26,29 +26,23 @@ static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
  */
 static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
        __acquires(rcu)
-       __acquires(rxnet->call_lock)
 {
        struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
 
        rcu_read_lock();
-       read_lock(&rxnet->call_lock);
-       return seq_list_start_head(&rxnet->calls, *_pos);
+       return seq_list_start_head_rcu(&rxnet->calls, *_pos);
 }
 
 static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
 
-       return seq_list_next(v, &rxnet->calls, pos);
+       return seq_list_next_rcu(v, &rxnet->calls, pos);
 }
 
 static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
-       __releases(rxnet->call_lock)
        __releases(rcu)
 {
-       struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
-
-       read_unlock(&rxnet->call_lock);
        rcu_read_unlock();
 }