ksmbd: fix race condition from parallel smb2 lock requests
authorNamjae Jeon <linkinjeon@kernel.org>
Wed, 4 Oct 2023 09:31:03 +0000 (18:31 +0900)
committerSteve French <stfrench@microsoft.com>
Thu, 5 Oct 2023 01:21:48 +0000 (20:21 -0500)
There is a race condition issue between parallel smb2 lock request.

                                            Time
                                             +
Thread A                                     | Thread A
smb2_lock                                    | smb2_lock
                                             |
 insert smb_lock to lock_list                |
 spin_unlock(&work->conn->llist_lock)        |
                                             |
                                             |   spin_lock(&conn->llist_lock);
                                             |   kfree(cmp_lock);
                                             |
 // UAF!                                     |
 list_add(&smb_lock->llist, &rollback_list)  +

This patch swaps the line for adding the smb lock to the rollback list and
adding the lock list of connection to fix the race issue.

Reported-by: luosili <rootlab@huawei.com>
Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
Signed-off-by: Steve French <stfrench@microsoft.com>
fs/smb/server/smb2pdu.c

index e774c9855f7ffedec3f72d7b8015b96a72aa530c..fd6f05786ac2a61cc4863c27d4b62bed01bbf83e 100644 (file)
@@ -7038,10 +7038,6 @@ skip:
 
                                ksmbd_debug(SMB,
                                            "would have to wait for getting lock\n");
-                               spin_lock(&work->conn->llist_lock);
-                               list_add_tail(&smb_lock->clist,
-                                             &work->conn->lock_list);
-                               spin_unlock(&work->conn->llist_lock);
                                list_add(&smb_lock->llist, &rollback_list);
 
                                argv = kmalloc(sizeof(void *), GFP_KERNEL);
@@ -7072,9 +7068,6 @@ skip:
 
                                if (work->state != KSMBD_WORK_ACTIVE) {
                                        list_del(&smb_lock->llist);
-                                       spin_lock(&work->conn->llist_lock);
-                                       list_del(&smb_lock->clist);
-                                       spin_unlock(&work->conn->llist_lock);
                                        locks_free_lock(flock);
 
                                        if (work->state == KSMBD_WORK_CANCELLED) {
@@ -7094,19 +7087,16 @@ skip:
                                }
 
                                list_del(&smb_lock->llist);
-                               spin_lock(&work->conn->llist_lock);
-                               list_del(&smb_lock->clist);
-                               spin_unlock(&work->conn->llist_lock);
                                release_async_work(work);
                                goto retry;
                        } else if (!rc) {
+                               list_add(&smb_lock->llist, &rollback_list);
                                spin_lock(&work->conn->llist_lock);
                                list_add_tail(&smb_lock->clist,
                                              &work->conn->lock_list);
                                list_add_tail(&smb_lock->flist,
                                              &fp->lock_list);
                                spin_unlock(&work->conn->llist_lock);
-                               list_add(&smb_lock->llist, &rollback_list);
                                ksmbd_debug(SMB, "successful in taking lock\n");
                        } else {
                                goto out;