bpf: Allow using bpf_sk_storage in FENTRY/FEXIT/RAW_TP
authorMartin KaFai Lau <kafai@fb.com>
Thu, 12 Nov 2020 21:13:13 +0000 (13:13 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 13 Nov 2020 02:39:28 +0000 (18:39 -0800)
This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
the bpf_sk_storage_(get|delete) helper, so those tracing programs
can access the sk's bpf_local_storage and the later selftest
will show some examples.

The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
cg sockops...etc which is running either in softirq or
task context.

This patch adds bpf_sk_storage_get_tracing_proto and
bpf_sk_storage_delete_tracing_proto.  They will check
in runtime that the helpers can only be called when serving
softirq or running in a task context.  That should enable
most common tracing use cases on sk.

During the load time, the new tracing_allowed() function
will ensure the tracing prog using the bpf_sk_storage_(get|delete)
helper is not tracing any bpf_sk_storage*() function itself.
The sk is passed as "void *" when calling into bpf_local_storage.

This patch only allows tracing a kernel function.

Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Song Liu <songliubraving@fb.com>
Link: https://lore.kernel.org/bpf/20201112211313.2587383-1-kafai@fb.com
include/net/bpf_sk_storage.h
kernel/trace/bpf_trace.c
net/core/bpf_sk_storage.c

index 3c516dd07cafd0215f1f3ed28cfea6e8bf42d9d4..0e85713f56df3cd020c2cb3ce31fbc65a8992383 100644 (file)
@@ -20,6 +20,8 @@ void bpf_sk_storage_free(struct sock *sk);
 
 extern const struct bpf_func_proto bpf_sk_storage_get_proto;
 extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
+extern const struct bpf_func_proto bpf_sk_storage_get_tracing_proto;
+extern const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto;
 
 struct bpf_local_storage_elem;
 struct bpf_sk_storage_diag;
index e4515b0f62a8d3324bfeaa9a86d9a5865560dee7..cfce60ad1cb5c2a6b6564fce178216bf7ffb470c 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/syscalls.h>
 #include <linux/error-injection.h>
 #include <linux/btf_ids.h>
+#include <net/bpf_sk_storage.h>
 
 #include <uapi/linux/bpf.h>
 #include <uapi/linux/btf.h>
@@ -1735,6 +1736,10 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_skc_to_tcp_request_sock_proto;
        case BPF_FUNC_skc_to_udp6_sock:
                return &bpf_skc_to_udp6_sock_proto;
+       case BPF_FUNC_sk_storage_get:
+               return &bpf_sk_storage_get_tracing_proto;
+       case BPF_FUNC_sk_storage_delete:
+               return &bpf_sk_storage_delete_tracing_proto;
 #endif
        case BPF_FUNC_seq_printf:
                return prog->expected_attach_type == BPF_TRACE_ITER ?
index fd416678f2363dfd328a363d8cf4b4eb3509b963..359908a7d3c12fb7cbd5935ac93b9a167be3e3b2 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/types.h>
 #include <linux/spinlock.h>
 #include <linux/bpf.h>
+#include <linux/btf.h>
 #include <linux/btf_ids.h>
 #include <linux/bpf_local_storage.h>
 #include <net/bpf_sk_storage.h>
@@ -378,6 +379,79 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
        .arg2_type      = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
 };
 
+static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
+{
+       const struct btf *btf_vmlinux;
+       const struct btf_type *t;
+       const char *tname;
+       u32 btf_id;
+
+       if (prog->aux->dst_prog)
+               return false;
+
+       /* Ensure the tracing program is not tracing
+        * any bpf_sk_storage*() function and also
+        * use the bpf_sk_storage_(get|delete) helper.
+        */
+       switch (prog->expected_attach_type) {
+       case BPF_TRACE_RAW_TP:
+               /* bpf_sk_storage has no trace point */
+               return true;
+       case BPF_TRACE_FENTRY:
+       case BPF_TRACE_FEXIT:
+               btf_vmlinux = bpf_get_btf_vmlinux();
+               btf_id = prog->aux->attach_btf_id;
+               t = btf_type_by_id(btf_vmlinux, btf_id);
+               tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+               return !!strncmp(tname, "bpf_sk_storage",
+                                strlen("bpf_sk_storage"));
+       default:
+               return false;
+       }
+
+       return false;
+}
+
+BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
+          void *, value, u64, flags)
+{
+       if (!in_serving_softirq() && !in_task())
+               return (unsigned long)NULL;
+
+       return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags);
+}
+
+BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
+          struct sock *, sk)
+{
+       if (!in_serving_softirq() && !in_task())
+               return -EPERM;
+
+       return ____bpf_sk_storage_delete(map, sk);
+}
+
+const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
+       .func           = bpf_sk_storage_get_tracing,
+       .gpl_only       = false,
+       .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
+       .arg1_type      = ARG_CONST_MAP_PTR,
+       .arg2_type      = ARG_PTR_TO_BTF_ID,
+       .arg2_btf_id    = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
+       .arg3_type      = ARG_PTR_TO_MAP_VALUE_OR_NULL,
+       .arg4_type      = ARG_ANYTHING,
+       .allowed        = bpf_sk_storage_tracing_allowed,
+};
+
+const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
+       .func           = bpf_sk_storage_delete_tracing,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_CONST_MAP_PTR,
+       .arg2_type      = ARG_PTR_TO_BTF_ID,
+       .arg2_btf_id    = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
+       .allowed        = bpf_sk_storage_tracing_allowed,
+};
+
 struct bpf_sk_storage_diag {
        u32 nr_maps;
        struct bpf_map *maps[];