Merge tag 'v6.6' into rdma.git for-next
authorJason Gunthorpe <jgg@nvidia.com>
Tue, 31 Oct 2023 13:54:48 +0000 (10:54 -0300)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 31 Oct 2023 13:54:48 +0000 (10:54 -0300)
Resolve conflict by taking the spin_lock hunk from for-next:

 https://lore.kernel.org/r/20230928113851.5197a1ec@canb.auug.org.au

Required for the next patch.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
1  2 
drivers/infiniband/core/nldev.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/sw/siw/siw_cm.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

Simple merge
Simple merge
index b0fa2d6449736b6ea6e321df0bf89b3054965e81,8a3762d9ff58c1550f3d2b429a47949079a089df..1d6c54a53df629f9742fd16e3432caa11aee3b5e
@@@ -1001,12 -1028,17 +1002,17 @@@ void mlx5_mkey_cache_cleanup(struct mlx
        mutex_lock(&dev->cache.rb_lock);
        for (node = rb_first(root); node; node = rb_next(node)) {
                ent = rb_entry(node, struct mlx5_cache_ent, node);
 -              xa_lock_irq(&ent->mkeys);
 +              spin_lock_irq(&ent->mkeys_queue.lock);
                ent->disabled = true;
 -              xa_unlock_irq(&ent->mkeys);
 +              spin_unlock_irq(&ent->mkeys_queue.lock);
-               cancel_delayed_work_sync(&ent->dwork);
        }
+       mutex_unlock(&dev->cache.rb_lock);
+       /*
+        * After all entries are disabled and will not reschedule on WQ,
+        * flush it and all async commands.
+        */
+       flush_workqueue(dev->cache.wq);
  
        mlx5_mkey_cache_debugfs_cleanup(dev);
        mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
Simple merge