{
        int i = cgroup_atype - CGROUP_LSM_START;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
        if (--cgroup_lsm_atype[i].refcnt <= 0)
                cgroup_lsm_atype[i].attach_btf_id = 0;
        WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0);
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 }
 #else
 static enum cgroup_bpf_attach_type
 
        unsigned int atype;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
 
        for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
                struct hlist_head *progs = &cgrp->bpf.progs[atype];
                bpf_cgroup_storage_free(storage);
        }
 
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 
        for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
                cgroup_bpf_put(p);
 {
        int ret;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
        ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags);
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
        return ret;
 }
 
 
        cg_link = container_of(link, struct bpf_cgroup_link, link);
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
        /* link might have been auto-released by dying cgroup, so fail */
        if (!cg_link->cgroup) {
                ret = -ENOLINK;
        }
        ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
 out_unlock:
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
        return ret;
 }
 
 {
        int ret;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
        ret = __cgroup_bpf_detach(cgrp, prog, NULL, type);
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
        return ret;
 }
 
 {
        int ret;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
        ret = __cgroup_bpf_query(cgrp, attr, uattr);
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
        return ret;
 }
 
        if (!cg_link->cgroup)
                return;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
 
        /* re-check cgroup under lock again */
        if (!cg_link->cgroup) {
-               mutex_unlock(&cgroup_mutex);
+               cgroup_unlock();
                return;
        }
 
        cg = cg_link->cgroup;
        cg_link->cgroup = NULL;
 
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 
        cgroup_put(cg);
 }
                container_of(link, struct bpf_cgroup_link, link);
        u64 cg_id = 0;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
        if (cg_link->cgroup)
                cg_id = cgroup_id(cg_link->cgroup);
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 
        seq_printf(seq,
                   "cgroup_id:\t%llu\n"
                container_of(link, struct bpf_cgroup_link, link);
        u64 cg_id = 0;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
        if (cg_link->cgroup)
                cg_id = cgroup_id(cg_link->cgroup);
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 
        info->cgroup.cgroup_id = cg_id;
        info->cgroup.attach_type = cg_link->type;
 
        struct cgroup_root *root;
        int retval = 0;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
        cgroup_attach_lock(true);
        for_each_root(root) {
                struct cgroup *from_cgrp;
                        break;
        }
        cgroup_attach_unlock(true);
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 
        return retval;
 }
        if (ret)
                return ret;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
 
        percpu_down_write(&cgroup_threadgroup_rwsem);
 
 out_err:
        cgroup_migrate_finish(&mgctx);
        percpu_up_write(&cgroup_threadgroup_rwsem);
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
        return ret;
 }
 
        kernfs_break_active_protection(new_parent);
        kernfs_break_active_protection(kn);
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
 
        ret = kernfs_rename(kn, new_parent, new_name_str);
        if (!ret)
                TRACE_CGROUP_PATH(rename, cgrp);
 
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 
        kernfs_unbreak_active_protection(kn);
        kernfs_unbreak_active_protection(new_parent);
        trace_cgroup_remount(root);
 
  out_unlock:
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
        return ret;
 }
 
        if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
                ret = 1;        /* restart */
 
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 
        if (!ret)
                ret = cgroup_do_get_tree(fc);
 
        cgroup_favor_dynmods(root, false);
        cgroup_exit_root_id(root);
 
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 
        cgroup_rstat_exit(cgrp);
        kernfs_destroy_root(root->kf_root);
        else
                cgrp = kn->parent->priv;
 
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 
        kernfs_unbreak_active_protection(kn);
        cgroup_put(cgrp);
        if (drain_offline)
                cgroup_lock_and_drain_offline(cgrp);
        else
-               mutex_lock(&cgroup_mutex);
+               cgroup_lock();
 
        if (!cgroup_is_dead(cgrp))
                return cgrp;
                struct super_block *sb = fc->root->d_sb;
                struct cgroup *cgrp;
 
-               mutex_lock(&cgroup_mutex);
+               cgroup_lock();
                spin_lock_irq(&css_set_lock);
 
                cgrp = cset_cgroup_from_root(ctx->ns->root_cset, ctx->root);
 
                spin_unlock_irq(&css_set_lock);
-               mutex_unlock(&cgroup_mutex);
+               cgroup_unlock();
 
                nsdentry = kernfs_node_dentry(cgrp->kn, sb);
                dput(fc->root);
 {
        int ret;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
        spin_lock_irq(&css_set_lock);
 
        ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
 
        spin_unlock_irq(&css_set_lock);
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 
        return ret;
 }
        int hierarchy_id = 1;
        int ret;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
        spin_lock_irq(&css_set_lock);
 
        root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
        }
 
        spin_unlock_irq(&css_set_lock);
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
        return ret;
 }
 EXPORT_SYMBOL_GPL(task_cgroup_path);
        int ssid;
 
 restart:
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
 
        cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
                for_each_subsys(ss, ssid) {
                        prepare_to_wait(&dsct->offline_waitq, &wait,
                                        TASK_UNINTERRUPTIBLE);
 
-                       mutex_unlock(&cgroup_mutex);
+                       cgroup_unlock();
                        schedule();
                        finish_wait(&dsct->offline_waitq, &wait);
 
        if (!(cfts[0].flags & __CFTYPE_ADDED))
                return -ENOENT;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
        ret = cgroup_rm_cftypes_locked(cfts);
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
        return ret;
 }
 
        if (ret)
                return ret;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
 
        list_add_tail(&cfts->node, &ss->cfts);
        ret = cgroup_apply_cftypes(cfts, true);
        if (ret)
                cgroup_rm_cftypes_locked(cfts);
 
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
        return ret;
 }
 
        struct cgroup_subsys *ss = css->ss;
        struct cgroup *cgrp = css->cgroup;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
 
        css->flags |= CSS_RELEASED;
        list_del_rcu(&css->sibling);
                                         NULL);
        }
 
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 
        INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
        queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
        struct cgroup_subsys_state *css =
                container_of(work, struct cgroup_subsys_state, destroy_work);
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
 
        do {
                offline_css(css);
                css = css->parent;
        } while (css && atomic_dec_and_test(&css->online_cnt));
 
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 }
 
 /* css kill confirmation processing requires process context, bounce */
 
        pr_debug("Initializing cgroup subsys %s\n", ss->name);
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
 
        idr_init(&ss->css_idr);
        INIT_LIST_HEAD(&ss->cfts);
 
        BUG_ON(online_css(css));
 
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 }
 
 /**
 
        get_user_ns(init_cgroup_ns.user_ns);
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
 
        /*
         * Add init_css_set to the hash table so that dfl_root can link to
 
        BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
 
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
 
        for_each_subsys(ss, ssid) {
                if (ss->early_init) {
                if (ss->bind)
                        ss->bind(init_css_set.subsys[ssid]);
 
-               mutex_lock(&cgroup_mutex);
+               cgroup_lock();
                css_populate_dir(init_css_set.subsys[ssid]);
-               mutex_unlock(&cgroup_mutex);
+               cgroup_unlock();
        }
 
        /* init_css_set.subsys[] has been updated, re-hash */
        if (!buf)
                goto out;
 
-       mutex_lock(&cgroup_mutex);
+       cgroup_lock();
        spin_lock_irq(&css_set_lock);
 
        for_each_root(root) {
        retval = 0;
 out_unlock:
        spin_unlock_irq(&css_set_lock);
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
        kfree(buf);
 out:
        return retval;
        struct file *f;
 
        if (kargs->flags & CLONE_INTO_CGROUP)
-               mutex_lock(&cgroup_mutex);
+               cgroup_lock();
 
        cgroup_threadgroup_change_begin(current);
 
 
 err:
        cgroup_threadgroup_change_end(current);
-       mutex_unlock(&cgroup_mutex);
+       cgroup_unlock();
        if (f)
                fput(f);
        if (dst_cgrp)
                struct cgroup *cgrp = kargs->cgrp;
                struct css_set *cset = kargs->cset;
 
-               mutex_unlock(&cgroup_mutex);
+               cgroup_unlock();
 
                if (cset) {
                        put_css_set(cset);