selftests/bpf: Test a BPF CC writing sk_pacing_*
authorJörn-Thorben Hinz <jthinz@mailbox.tu-berlin.de>
Wed, 22 Jun 2022 19:12:25 +0000 (21:12 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 23 Jun 2022 16:49:57 +0000 (09:49 -0700)
Test whether a TCP CC implemented in BPF is allowed to write
sk_pacing_rate and sk_pacing_status in struct sock. This is needed when
cong_control() is implemented and used.

Signed-off-by: Jörn-Thorben Hinz <jthinz@mailbox.tu-berlin.de>
Link: https://lore.kernel.org/r/20220622191227.898118-4-jthinz@mailbox.tu-berlin.de
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c [new file with mode: 0644]

index e9a9a31b2ffecfbb41af8156173869ea74cb129d..e79f3f5a9d333481c7c074a24a26f857fd874065 100644 (file)
@@ -9,6 +9,7 @@
 #include "bpf_cubic.skel.h"
 #include "bpf_tcp_nogpl.skel.h"
 #include "bpf_dctcp_release.skel.h"
+#include "tcp_ca_write_sk_pacing.skel.h"
 
 #ifndef ENOTSUPP
 #define ENOTSUPP 524
@@ -322,6 +323,22 @@ static void test_rel_setsockopt(void)
        bpf_dctcp_release__destroy(rel_skel);
 }
 
+static void test_write_sk_pacing(void)
+{
+       struct tcp_ca_write_sk_pacing *skel;
+       struct bpf_link *link;
+
+       skel = tcp_ca_write_sk_pacing__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "open_and_load"))
+               return;
+
+       link = bpf_map__attach_struct_ops(skel->maps.write_sk_pacing);
+       ASSERT_OK_PTR(link, "attach_struct_ops");
+
+       bpf_link__destroy(link);
+       tcp_ca_write_sk_pacing__destroy(skel);
+}
+
 void test_bpf_tcp_ca(void)
 {
        if (test__start_subtest("dctcp"))
@@ -334,4 +351,6 @@ void test_bpf_tcp_ca(void)
                test_dctcp_fallback();
        if (test__start_subtest("rel_setsockopt"))
                test_rel_setsockopt();
+       if (test__start_subtest("write_sk_pacing"))
+               test_write_sk_pacing();
 }
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c
new file mode 100644 (file)
index 0000000..4344770
--- /dev/null
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+#define USEC_PER_SEC 1000000UL
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+static inline struct tcp_sock *tcp_sk(const struct sock *sk)
+{
+       return (struct tcp_sock *)sk;
+}
+
+SEC("struct_ops/write_sk_pacing_init")
+void BPF_PROG(write_sk_pacing_init, struct sock *sk)
+{
+#ifdef ENABLE_ATOMICS_TESTS
+       __sync_bool_compare_and_swap(&sk->sk_pacing_status, SK_PACING_NONE,
+                                    SK_PACING_NEEDED);
+#else
+       sk->sk_pacing_status = SK_PACING_NEEDED;
+#endif
+}
+
+SEC("struct_ops/write_sk_pacing_cong_control")
+void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk,
+             const struct rate_sample *rs)
+{
+       const struct tcp_sock *tp = tcp_sk(sk);
+       unsigned long rate =
+               ((tp->snd_cwnd * tp->mss_cache * USEC_PER_SEC) << 3) /
+               (tp->srtt_us ?: 1U << 3);
+       sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate);
+}
+
+SEC("struct_ops/write_sk_pacing_ssthresh")
+__u32 BPF_PROG(write_sk_pacing_ssthresh, struct sock *sk)
+{
+       return tcp_sk(sk)->snd_ssthresh;
+}
+
+SEC("struct_ops/write_sk_pacing_undo_cwnd")
+__u32 BPF_PROG(write_sk_pacing_undo_cwnd, struct sock *sk)
+{
+       return tcp_sk(sk)->snd_cwnd;
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops write_sk_pacing = {
+       .init = (void *)write_sk_pacing_init,
+       .cong_control = (void *)write_sk_pacing_cong_control,
+       .ssthresh = (void *)write_sk_pacing_ssthresh,
+       .undo_cwnd = (void *)write_sk_pacing_undo_cwnd,
+       .name = "bpf_w_sk_pacing",
+};