uint64_t                ls_recover_seq;
        struct dlm_recover      *ls_recover_args;
        struct rw_semaphore     ls_in_recovery; /* block local requests */
-       struct rw_semaphore     ls_recv_active; /* block dlm_recv */
+       rwlock_t                ls_recv_active; /* block dlm_recv */
        struct list_head        ls_requestqueue;/* queue remote requests */
        rwlock_t                ls_requestqueue_lock;
        struct dlm_rcom         *ls_recover_buf;
 
        /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
           be inactive (in this ls) before transitioning to recovery mode */
 
-       down_read(&ls->ls_recv_active);
+       read_lock(&ls->ls_recv_active);
        if (hd->h_cmd == DLM_MSG)
                dlm_receive_message(ls, &p->message, nodeid);
        else if (hd->h_cmd == DLM_RCOM)
        else
                log_error(ls, "invalid h_cmd %d from %d lockspace %x",
                          hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace));
-       up_read(&ls->ls_recv_active);
+       read_unlock(&ls->ls_recv_active);
 
        dlm_put_lockspace(ls);
 }
 
        ls->ls_recover_seq = get_random_u64();
        ls->ls_recover_args = NULL;
        init_rwsem(&ls->ls_in_recovery);
-       init_rwsem(&ls->ls_recv_active);
+       rwlock_init(&ls->ls_recv_active);
        INIT_LIST_HEAD(&ls->ls_requestqueue);
        rwlock_init(&ls->ls_requestqueue_lock);
        spin_lock_init(&ls->ls_clear_proc_locks);
 
         * message to the requestqueue without races.
         */
 
-       down_write(&ls->ls_recv_active);
+       write_lock(&ls->ls_recv_active);
 
        /*
         * Abort any recovery that's in progress (see RECOVER_STOP,
         * requestqueue for later.
         */
 
-       up_write(&ls->ls_recv_active);
+       write_unlock(&ls->ls_recv_active);
 
        /*
         * This in_recovery lock does two things:
 
 {
        int error = -EINTR;
 
-       down_write(&ls->ls_recv_active);
+       write_lock(&ls->ls_recv_active);
 
        spin_lock(&ls->ls_recover_lock);
        if (ls->ls_recover_seq == seq) {
        }
        spin_unlock(&ls->ls_recover_lock);
 
-       up_write(&ls->ls_recv_active);
+       write_unlock(&ls->ls_recv_active);
        return error;
 }