kasan: rename and document kasan_(un)poison_object_data
authorAndrey Konovalov <andreyknvl@google.com>
Tue, 19 Dec 2023 22:29:03 +0000 (23:29 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 29 Dec 2023 19:58:40 +0000 (11:58 -0800)
Rename kasan_unpoison_object_data to kasan_unpoison_new_object and add a
documentation comment.  Do the same for kasan_poison_object_data.

The new names and the comments should suggest the users that these hooks
are intended for internal use by the slab allocator.

The following patch will remove non-slab-internal uses of these hooks.

No functional changes.

[andreyknvl@google.com: update references to renamed functions in comments]
Link: https://lkml.kernel.org/r/20231221180637.105098-1-andrey.konovalov@linux.dev
Link: https://lkml.kernel.org/r/eab156ebbd635f9635ef67d1a4271f716994e628.1703024586.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Lobakin <alobakin@pm.me>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Breno Leitao <leitao@debian.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/kasan.h
mm/kasan/common.c
mm/kasan/shadow.c
mm/slab.c
mm/slub.c
net/core/skbuff.c

index 7392c5d89b920a1bd63f34e27b2d677b51b72cf1..d49e3d4c099efec78e3e2eb116fe260a33b5efdc 100644 (file)
@@ -129,20 +129,39 @@ static __always_inline void kasan_poison_slab(struct slab *slab)
                __kasan_poison_slab(slab);
 }
 
-void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * temporarily unpoisons an object from a newly allocated slab without doing
+ * anything else. The object must later be repoisoned by
+ * kasan_poison_new_object().
+ */
+static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
                                                        void *object)
 {
        if (kasan_enabled())
-               __kasan_unpoison_object_data(cache, object);
+               __kasan_unpoison_new_object(cache, object);
 }
 
-void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Repoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * repoisons an object that was previously unpoisoned by
+ * kasan_unpoison_new_object() without doing anything else.
+ */
+static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
                                                        void *object)
 {
        if (kasan_enabled())
-               __kasan_poison_object_data(cache, object);
+               __kasan_poison_new_object(cache, object);
 }
 
 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
@@ -342,9 +361,9 @@ static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
        return false;
 }
 static inline void kasan_poison_slab(struct slab *slab) {}
-static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
                                        void *object) {}
-static inline void kasan_poison_object_data(struct kmem_cache *cache,
+static inline void kasan_poison_new_object(struct kmem_cache *cache,
                                        void *object) {}
 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
                                const void *object)
index bf16c2dfa8e7168f217e15ff5bf7cd57e7d5c3b9..f4255e807b742b58fb2ea043d7e919fbaf4485a1 100644 (file)
@@ -143,12 +143,12 @@ void __kasan_poison_slab(struct slab *slab)
                     KASAN_SLAB_REDZONE, false);
 }
 
-void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object)
 {
        kasan_unpoison(object, cache->object_size, false);
 }
 
-void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object)
 {
        kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
                        KASAN_SLAB_REDZONE, false);
index d687f09a7ae37f3227cb57f3ada0517b66afb777..0154d200be40368ff06451dc5da72dcdf1ef61f2 100644 (file)
@@ -130,7 +130,7 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init)
 
        /*
         * Perform shadow offset calculation based on untagged address, as
-        * some of the callers (e.g. kasan_poison_object_data) pass tagged
+        * some of the callers (e.g. kasan_poison_new_object) pass tagged
         * addresses to this function.
         */
        addr = kasan_reset_tag(addr);
@@ -170,7 +170,7 @@ void kasan_unpoison(const void *addr, size_t size, bool init)
 
        /*
         * Perform shadow offset calculation based on untagged address, as
-        * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
+        * some of the callers (e.g. kasan_unpoison_new_object) pass tagged
         * addresses to this function.
         */
        addr = kasan_reset_tag(addr);
index 9ad3d0f2d1a5e0b32dcad0c544975cdb6431f648..773c79e153f32cfcbf638f2d3b887ee0c3e5f1fd 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2327,11 +2327,9 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab)
                 * They must also be threaded.
                 */
                if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
-                       kasan_unpoison_object_data(cachep,
-                                                  objp + obj_offset(cachep));
+                       kasan_unpoison_new_object(cachep, objp + obj_offset(cachep));
                        cachep->ctor(objp + obj_offset(cachep));
-                       kasan_poison_object_data(
-                               cachep, objp + obj_offset(cachep));
+                       kasan_poison_new_object(cachep, objp + obj_offset(cachep));
                }
 
                if (cachep->flags & SLAB_RED_ZONE) {
@@ -2472,9 +2470,9 @@ static void cache_init_objs(struct kmem_cache *cachep,
 
                /* constructor could break poison info */
                if (DEBUG == 0 && cachep->ctor) {
-                       kasan_unpoison_object_data(cachep, objp);
+                       kasan_unpoison_new_object(cachep, objp);
                        cachep->ctor(objp);
-                       kasan_poison_object_data(cachep, objp);
+                       kasan_poison_new_object(cachep, objp);
                }
 
                if (!shuffled)
index 782bd8a6bd34907438bd5756c5d6d3412be231b7..891742e5932a8f52974aa045643f827ad7a8d0a7 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1860,9 +1860,9 @@ static void *setup_object(struct kmem_cache *s, void *object)
        setup_object_debug(s, object);
        object = kasan_init_slab_obj(s, object);
        if (unlikely(s->ctor)) {
-               kasan_unpoison_object_data(s, object);
+               kasan_unpoison_new_object(s, object);
                s->ctor(object);
-               kasan_poison_object_data(s, object);
+               kasan_poison_new_object(s, object);
        }
        return object;
 }
index b157efea5dea88745f9a2ae547d39fdf7e622627..63bb6526399db76fd012f72330bcfdebaa6da9c3 100644 (file)
@@ -337,7 +337,7 @@ static struct sk_buff *napi_skb_cache_get(void)
        }
 
        skb = nc->skb_cache[--nc->skb_count];
-       kasan_unpoison_object_data(skbuff_cache, skb);
+       kasan_unpoison_new_object(skbuff_cache, skb);
 
        return skb;
 }
@@ -1309,13 +1309,13 @@ static void napi_skb_cache_put(struct sk_buff *skb)
        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
        u32 i;
 
-       kasan_poison_object_data(skbuff_cache, skb);
+       kasan_poison_new_object(skbuff_cache, skb);
        nc->skb_cache[nc->skb_count++] = skb;
 
        if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
                for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
-                       kasan_unpoison_object_data(skbuff_cache,
-                                                  nc->skb_cache[i]);
+                       kasan_unpoison_new_object(skbuff_cache,
+                                                 nc->skb_cache[i]);
 
                kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
                                     nc->skb_cache + NAPI_SKB_CACHE_HALF);