RDMA/core: Clean up cq pool mechanism
authorJack Morgenstein <jackm@dev.mellanox.co.il>
Tue, 8 Dec 2020 07:35:43 +0000 (09:35 +0200)
committerJason Gunthorpe <jgg@nvidia.com>
Thu, 10 Dec 2020 19:05:17 +0000 (15:05 -0400)
The CQ pool mechanism had two problems:

1. The CQ pool lists were uninitialized in the device registration error
   flow.  As a result, all the list pointers remained NULL.  This caused
   the kernel to crash (in procedure ib_cq_pool_destroy) when that error
   flow was taken (and unregister called).  The stack trace snippet:

     BUG: kernel NULL pointer dereference, address: 0000000000000000
     #PF: supervisor read access in kernel mode
     #PF: error_code(0×0000) ? not-present page
     PGD 0 P4D 0
     Oops: 0000 [#1] SMP PTI
     . . .
     RIP: 0010:ib_cq_pool_destroy+0x1b/0×70 [ib_core]
     . . .
     Call Trace:
      disable_device+0x9f/0×130 [ib_core]
      __ib_unregister_device+0x35/0×90 [ib_core]
      ib_register_device+0x529/0×610 [ib_core]
      __mlx5_ib_add+0x3a/0×70 [mlx5_ib]
      mlx5_add_device+0x87/0×1c0 [mlx5_core]
      mlx5_register_interface+0x74/0xc0 [mlx5_core]
      do_one_initcall+0x4b/0×1f4
      do_init_module+0x5a/0×223
      load_module+0x1938/0×1d40

2. At device unregister, when cleaning up the cq pool, the cq's in the
   pool lists were freed, but the cq entries were left in the list.

The fix for the first issue is to initialize the cq pool lists when the
ib_device structure is allocated for a new device (in procedure
_ib_alloc_device).

The fix for the second problem is to delete cq entries from the pool lists
when cleaning up the cq pool.

In addition, procedure ib_cq_pool_destroy() is renamed to the more
appropriate name ib_cq_pool_cleanup().

Fixes: 4aa1615268a8 ("RDMA/core: Fix ordering of CQ pool destruction")
Link: https://lore.kernel.org/r/20201208073545.9723-2-leon@kernel.org
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/cq.c
drivers/infiniband/core/device.c

index baa86c86efad5d3b750b00e9514e420dd901999f..315f7a297eee35e6c1ce0c840bb63c1312dacc90 100644 (file)
@@ -402,7 +402,6 @@ void rdma_umap_priv_init(struct rdma_umap_priv *priv,
                         struct vm_area_struct *vma,
                         struct rdma_user_mmap_entry *entry);
 
-void ib_cq_pool_init(struct ib_device *dev);
-void ib_cq_pool_destroy(struct ib_device *dev);
+void ib_cq_pool_cleanup(struct ib_device *dev);
 
 #endif /* _CORE_PRIV_H */
index d4248bbe74da8ae69edcde424358c5b511e58664..433b426729d4cefa940b8683060d47fefae4c460 100644 (file)
@@ -349,16 +349,7 @@ void ib_free_cq(struct ib_cq *cq)
 }
 EXPORT_SYMBOL(ib_free_cq);
 
-void ib_cq_pool_init(struct ib_device *dev)
-{
-       unsigned int i;
-
-       spin_lock_init(&dev->cq_pools_lock);
-       for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++)
-               INIT_LIST_HEAD(&dev->cq_pools[i]);
-}
-
-void ib_cq_pool_destroy(struct ib_device *dev)
+void ib_cq_pool_cleanup(struct ib_device *dev)
 {
        struct ib_cq *cq, *n;
        unsigned int i;
@@ -367,6 +358,7 @@ void ib_cq_pool_destroy(struct ib_device *dev)
                list_for_each_entry_safe(cq, n, &dev->cq_pools[i],
                                         pool_entry) {
                        WARN_ON(cq->cqe_used);
+                       list_del(&cq->pool_entry);
                        cq->shared = false;
                        ib_free_cq(cq);
                }
index 3ab1edea6acbe9b2172d20b85da52dcc66279e8f..11485b8748a2acffc1b90a2d7a27221d9f36b868 100644 (file)
@@ -570,6 +570,7 @@ static void rdma_init_coredev(struct ib_core_device *coredev,
 struct ib_device *_ib_alloc_device(size_t size)
 {
        struct ib_device *device;
+       unsigned int i;
 
        if (WARN_ON(size < sizeof(struct ib_device)))
                return NULL;
@@ -601,6 +602,10 @@ struct ib_device *_ib_alloc_device(size_t size)
        init_completion(&device->unreg_completion);
        INIT_WORK(&device->unregistration_work, ib_unregister_work);
 
+       spin_lock_init(&device->cq_pools_lock);
+       for (i = 0; i < ARRAY_SIZE(device->cq_pools); i++)
+               INIT_LIST_HEAD(&device->cq_pools[i]);
+
        device->uverbs_cmd_mask =
                BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
                BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
@@ -1262,7 +1267,7 @@ static void disable_device(struct ib_device *device)
                remove_client_context(device, cid);
        }
 
-       ib_cq_pool_destroy(device);
+       ib_cq_pool_cleanup(device);
 
        /* Pairs with refcount_set in enable_device */
        ib_device_put(device);
@@ -1307,8 +1312,6 @@ static int enable_device_and_get(struct ib_device *device)
                        goto out;
        }
 
-       ib_cq_pool_init(device);
-
        down_read(&clients_rwsem);
        xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
                ret = add_client_context(device, client);