bpf: use bpf_map_kvcalloc in bpf_local_storage
authorYafang Shao <laoar.shao@gmail.com>
Fri, 10 Feb 2023 15:47:32 +0000 (15:47 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Sat, 11 Feb 2023 02:59:56 +0000 (18:59 -0800)
Introduce new helper bpf_map_kvcalloc() for the memory allocation in
bpf_local_storage(). Then the allocation will charge the memory from the
map instead of from current, though currently they are the same thing as
it is only used in map creation path now. By charging map's memory into
the memcg from the map, it will be more clear.

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Link: https://lore.kernel.org/r/20230210154734.4416-3-laoar.shao@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
kernel/bpf/bpf_local_storage.c
kernel/bpf/syscall.c

index 35c18a98c21a735b518ee62b91d9753cf3c36f82..fe0bf482fdf892e126636f94b5260cf89f888648 100644 (file)
@@ -1886,6 +1886,8 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
                           int node);
 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
+void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
+                      gfp_t flags);
 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
                                    size_t align, gfp_t flags);
 #else
@@ -1902,6 +1904,12 @@ bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
        return kzalloc(size, flags);
 }
 
+static inline void *
+bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags)
+{
+       return kvcalloc(n, size, flags);
+}
+
 static inline void __percpu *
 bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
                     gfp_t flags)
index 373c3c2c75bc0ac07c12fec4837a8e3bf1d702c9..35f4138a54dc1a198ed9e3b7ff0cda8209993d74 100644 (file)
@@ -568,8 +568,8 @@ static struct bpf_local_storage_map *__bpf_local_storage_map_alloc(union bpf_att
        nbuckets = max_t(u32, 2, nbuckets);
        smap->bucket_log = ilog2(nbuckets);
 
-       smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
-                                GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
+       smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
+                                        nbuckets, GFP_USER | __GFP_NOWARN);
        if (!smap->buckets) {
                bpf_map_area_free(smap);
                return ERR_PTR(-ENOMEM);
index bcc97613de768a912c7533504571efbd7219e410..9d94a35d8b0f93be7e67553551db8cf657c0aa6a 100644 (file)
@@ -464,6 +464,21 @@ void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
        return ptr;
 }
 
+void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
+                      gfp_t flags)
+{
+       struct mem_cgroup *memcg, *old_memcg;
+       void *ptr;
+
+       memcg = bpf_map_get_memcg(map);
+       old_memcg = set_active_memcg(memcg);
+       ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
+       set_active_memcg(old_memcg);
+       mem_cgroup_put(memcg);
+
+       return ptr;
+}
+
 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
                                    size_t align, gfp_t flags)
 {