This patch adds kfuncs bpf_iter_css_task_{new,next,destroy} which allow
creation and manipulation of struct bpf_iter_css_task in open-coded
iterator style. These kfuncs actually wrapps css_task_iter_{start,next,
end}. BPF programs can use these kfuncs through bpf_for_each macro for
iteration of all tasks under a css.
css_task_iter_*() would try to get the global spin-lock *css_set_lock*, so
the bpf side has to be careful in where it allows to use this iter.
Currently we only allow it in bpf_lsm and bpf iter-s.
Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
Acked-by: Tejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20231018061746.111364-3-zhouchuyi@bytedance.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU)
 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY)
+BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY)
 BTF_ID_FLAGS(func, bpf_dynptr_adjust)
 BTF_ID_FLAGS(func, bpf_dynptr_is_null)
 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
 
 
 __diag_pop();
 
+struct bpf_iter_css_task {
+       __u64 __opaque[1];
+} __attribute__((aligned(8)));
+
+struct bpf_iter_css_task_kern {
+       struct css_task_iter *css_it;
+} __attribute__((aligned(8)));
+
+__diag_push();
+__diag_ignore_all("-Wmissing-prototypes",
+                 "Global functions as their definitions will be in vmlinux BTF");
+
+__bpf_kfunc int bpf_iter_css_task_new(struct bpf_iter_css_task *it,
+               struct cgroup_subsys_state *css, unsigned int flags)
+{
+       struct bpf_iter_css_task_kern *kit = (void *)it;
+
+       BUILD_BUG_ON(sizeof(struct bpf_iter_css_task_kern) != sizeof(struct bpf_iter_css_task));
+       BUILD_BUG_ON(__alignof__(struct bpf_iter_css_task_kern) !=
+                                       __alignof__(struct bpf_iter_css_task));
+       kit->css_it = NULL;
+       switch (flags) {
+       case CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED:
+       case CSS_TASK_ITER_PROCS:
+       case 0:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       kit->css_it = bpf_mem_alloc(&bpf_global_ma, sizeof(struct css_task_iter));
+       if (!kit->css_it)
+               return -ENOMEM;
+       css_task_iter_start(css, flags, kit->css_it);
+       return 0;
+}
+
+__bpf_kfunc struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it)
+{
+       struct bpf_iter_css_task_kern *kit = (void *)it;
+
+       if (!kit->css_it)
+               return NULL;
+       return css_task_iter_next(kit->css_it);
+}
+
+__bpf_kfunc void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it)
+{
+       struct bpf_iter_css_task_kern *kit = (void *)it;
+
+       if (!kit->css_it)
+               return;
+       css_task_iter_end(kit->css_it);
+       bpf_mem_free(&bpf_global_ma, kit->css_it);
+}
+
+__diag_pop();
+
 DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
 
 static void do_mmap_read_unlock(struct irq_work *entry)
 
        KF_bpf_percpu_obj_new_impl,
        KF_bpf_percpu_obj_drop_impl,
        KF_bpf_throw,
+       KF_bpf_iter_css_task_new,
 };
 
 BTF_SET_START(special_kfunc_set)
 BTF_ID(func, bpf_percpu_obj_new_impl)
 BTF_ID(func, bpf_percpu_obj_drop_impl)
 BTF_ID(func, bpf_throw)
+BTF_ID(func, bpf_iter_css_task_new)
 BTF_SET_END(special_kfunc_set)
 
 BTF_ID_LIST(special_kfunc_list)
 BTF_ID(func, bpf_percpu_obj_new_impl)
 BTF_ID(func, bpf_percpu_obj_drop_impl)
 BTF_ID(func, bpf_throw)
+BTF_ID(func, bpf_iter_css_task_new)
 
 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
 {
                                                  &meta->arg_rbtree_root.field);
 }
 
+static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env)
+{
+       enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
+
+       switch (prog_type) {
+       case BPF_PROG_TYPE_LSM:
+               return true;
+       case BPF_TRACE_ITER:
+               return env->prog->aux->sleepable;
+       default:
+               return false;
+       }
+}
+
 static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta,
                            int insn_idx)
 {
                        break;
                }
                case KF_ARG_PTR_TO_ITER:
+                       if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) {
+                               if (!check_css_task_iter_allowlist(env)) {
+                                       verbose(env, "css_task_iter is only allowed in bpf_lsm and bpf iter-s\n");
+                                       return -EINVAL;
+                               }
+                       }
                        ret = process_iter_arg(env, regno, insn_idx, meta);
                        if (ret < 0)
                                return ret;
 
                __bpf_assert_op(LHS, <=, END, value, false);            \
        })
 
+struct bpf_iter_css_task;
+struct cgroup_subsys_state;
+extern int bpf_iter_css_task_new(struct bpf_iter_css_task *it,
+               struct cgroup_subsys_state *css, unsigned int flags) __weak __ksym;
+extern struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it) __weak __ksym;
+extern void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) __weak __ksym;
+
+
 #endif