bpf, net: sock_map memory usage
authorYafang Shao <laoar.shao@gmail.com>
Sun, 5 Mar 2023 12:46:12 +0000 (12:46 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Tue, 7 Mar 2023 17:33:43 +0000 (09:33 -0800)
sockmap and sockhash don't have something in common in allocation, so let's
introduce different helpers to calculate their memory usage.

The reuslt as follows,

- before
28: sockmap  name count_map  flags 0x0
        key 4B  value 4B  max_entries 65536  memlock 524288B
29: sockhash  name count_map  flags 0x0
        key 4B  value 4B  max_entries 65536  memlock 524288B

- after
28: sockmap  name count_map  flags 0x0
        key 4B  value 4B  max_entries 65536  memlock 524608B
29: sockhash  name count_map  flags 0x0  <<<< no updated elements
        key 4B  value 4B  max_entries 65536  memlock 1048896B

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
Link: https://lore.kernel.org/r/20230305124615.12358-16-laoar.shao@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
net/core/sock_map.c

index a68a7290a3b2b1f618960a0ac91961418ee13449..9b854e236d2360435c74dbbd9cb07c31a58594bd 100644 (file)
@@ -797,6 +797,14 @@ static void sock_map_fini_seq_private(void *priv_data)
        bpf_map_put_with_uref(info->map);
 }
 
+static u64 sock_map_mem_usage(const struct bpf_map *map)
+{
+       u64 usage = sizeof(struct bpf_stab);
+
+       usage += (u64)map->max_entries * sizeof(struct sock *);
+       return usage;
+}
+
 static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
        .seq_ops                = &sock_map_seq_ops,
        .init_seq_private       = sock_map_init_seq_private,
@@ -816,6 +824,7 @@ const struct bpf_map_ops sock_map_ops = {
        .map_lookup_elem        = sock_map_lookup,
        .map_release_uref       = sock_map_release_progs,
        .map_check_btf          = map_check_no_btf,
+       .map_mem_usage          = sock_map_mem_usage,
        .map_btf_id             = &sock_map_btf_ids[0],
        .iter_seq_info          = &sock_map_iter_seq_info,
 };
@@ -1397,6 +1406,16 @@ static void sock_hash_fini_seq_private(void *priv_data)
        bpf_map_put_with_uref(info->map);
 }
 
+static u64 sock_hash_mem_usage(const struct bpf_map *map)
+{
+       struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
+       u64 usage = sizeof(*htab);
+
+       usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket);
+       usage += atomic_read(&htab->count) * (u64)htab->elem_size;
+       return usage;
+}
+
 static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
        .seq_ops                = &sock_hash_seq_ops,
        .init_seq_private       = sock_hash_init_seq_private,
@@ -1416,6 +1435,7 @@ const struct bpf_map_ops sock_hash_ops = {
        .map_lookup_elem_sys_only = sock_hash_lookup_sys,
        .map_release_uref       = sock_hash_release_progs,
        .map_check_btf          = map_check_no_btf,
+       .map_mem_usage          = sock_hash_mem_usage,
        .map_btf_id             = &sock_hash_map_btf_ids[0],
        .iter_seq_info          = &sock_hash_iter_seq_info,
 };