SUNRPC: change sp_nrthreads to atomic_t
authorNeilBrown <neilb@suse.de>
Mon, 11 Sep 2023 14:40:09 +0000 (10:40 -0400)
committerChuck Lever <chuck.lever@oracle.com>
Mon, 16 Oct 2023 16:44:07 +0000 (12:44 -0400)
Using an atomic_t avoids the need to take a spinlock (which can soon be
removed).

Choosing a thread to kill needs to be careful as we cannot set the "die
now" bit atomically with the test on the count.  Instead we temporarily
increase the count.

Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
fs/nfsd/nfssvc.c
include/linux/sunrpc/svc.h
net/sunrpc/svc.c

index 0b03a2e50deeaf1c4030856305d83bb32ff8f4ae..433154b9eee0c28ac363db315a581e36da0ae4f3 100644 (file)
@@ -713,14 +713,13 @@ int nfsd_nrpools(struct net *net)
 
 int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
 {
-       int i = 0;
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+       struct svc_serv *serv = nn->nfsd_serv;
+       int i;
 
-       if (nn->nfsd_serv != NULL) {
-               for (i = 0; i < nn->nfsd_serv->sv_nrpools && i < n; i++)
-                       nthreads[i] = nn->nfsd_serv->sv_pools[i].sp_nrthreads;
-       }
-
+       if (serv)
+               for (i = 0; i < serv->sv_nrpools && i < n; i++)
+                       nthreads[i] = atomic_read(&serv->sv_pools[i].sp_nrthreads);
        return 0;
 }
 
index 7ff9fe785e49464b999384fb7878d237caf395d2..9d0fcd6148ae9b3eab6a9c294f1d94ab97c2a2e6 100644 (file)
@@ -36,7 +36,7 @@ struct svc_pool {
        unsigned int            sp_id;          /* pool id; also node id on NUMA */
        spinlock_t              sp_lock;        /* protects all fields */
        struct lwq              sp_xprts;       /* pending transports */
-       unsigned int            sp_nrthreads;   /* # of threads in pool */
+       atomic_t                sp_nrthreads;   /* # of threads in pool */
        struct list_head        sp_all_threads; /* all server threads */
        struct llist_head       sp_idle_threads; /* idle server threads */
 
index 244b5b9eba4d635ff991fdfa46dc65dacee1b757..0928d3f918b0bbcf1dfd41e16062203e45ed2116 100644 (file)
@@ -681,8 +681,8 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
        serv->sv_nrthreads += 1;
        spin_unlock_bh(&serv->sv_lock);
 
+       atomic_inc(&pool->sp_nrthreads);
        spin_lock_bh(&pool->sp_lock);
-       pool->sp_nrthreads++;
        list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
        spin_unlock_bh(&pool->sp_lock);
        return rqstp;
@@ -727,23 +727,24 @@ svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
 }
 
 static struct svc_pool *
-svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
+svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool,
+               unsigned int *state)
 {
+       struct svc_pool *pool;
        unsigned int i;
 
+retry:
+       pool = target_pool;
+
        if (pool != NULL) {
-               spin_lock_bh(&pool->sp_lock);
-               if (pool->sp_nrthreads)
+               if (atomic_inc_not_zero(&pool->sp_nrthreads))
                        goto found_pool;
-               spin_unlock_bh(&pool->sp_lock);
                return NULL;
        } else {
                for (i = 0; i < serv->sv_nrpools; i++) {
                        pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
-                       spin_lock_bh(&pool->sp_lock);
-                       if (pool->sp_nrthreads)
+                       if (atomic_inc_not_zero(&pool->sp_nrthreads))
                                goto found_pool;
-                       spin_unlock_bh(&pool->sp_lock);
                }
                return NULL;
        }
@@ -751,8 +752,12 @@ svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *stat
 found_pool:
        set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
        set_bit(SP_NEED_VICTIM, &pool->sp_flags);
-       spin_unlock_bh(&pool->sp_lock);
-       return pool;
+       if (!atomic_dec_and_test(&pool->sp_nrthreads))
+               return pool;
+       /* Nothing left in this pool any more */
+       clear_bit(SP_NEED_VICTIM, &pool->sp_flags);
+       clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+       goto retry;
 }
 
 static int
@@ -828,13 +833,10 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
 int
 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
 {
-       if (pool == NULL) {
+       if (!pool)
                nrservs -= serv->sv_nrthreads;
-       } else {
-               spin_lock_bh(&pool->sp_lock);
-               nrservs -= pool->sp_nrthreads;
-               spin_unlock_bh(&pool->sp_lock);
-       }
+       else
+               nrservs -= atomic_read(&pool->sp_nrthreads);
 
        if (nrservs > 0)
                return svc_start_kthreads(serv, pool, nrservs);
@@ -921,10 +923,11 @@ svc_exit_thread(struct svc_rqst *rqstp)
        struct svc_pool *pool = rqstp->rq_pool;
 
        spin_lock_bh(&pool->sp_lock);
-       pool->sp_nrthreads--;
        list_del_rcu(&rqstp->rq_all);
        spin_unlock_bh(&pool->sp_lock);
 
+       atomic_dec(&pool->sp_nrthreads);
+
        spin_lock_bh(&serv->sv_lock);
        serv->sv_nrthreads -= 1;
        spin_unlock_bh(&serv->sv_lock);