void __kasan_unpoison_range(const void *address, size_t size)
{
+ if (is_kfence_address(address))
+ return;
+
kasan_unpoison(address, size, false);
}
tagged_object = object;
object = kasan_reset_tag(object);
- if (is_kfence_address(object))
- return false;
-
if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
return true;
bool __kasan_slab_free(struct kmem_cache *cache, void *object,
unsigned long ip, bool init)
{
- bool buggy_object = poison_slab_object(cache, object, ip, init);
+ bool buggy_object;
+
+ if (is_kfence_address(object))
+ return false;
+
+ buggy_object = poison_slab_object(cache, object, ip, init);
return buggy_object ? true : kasan_quarantine_put(cache, object);
}
if (unlikely(object == NULL))
return NULL;
- if (is_kfence_address(kasan_reset_tag(object)))
+ if (is_kfence_address(object))
return (void *)object;
/* The object has already been unpoisoned by kasan_slab_alloc(). */
if (unlikely(object == ZERO_SIZE_PTR))
return (void *)object;
- if (is_kfence_address(kasan_reset_tag(object)))
+ if (is_kfence_address(object))
return (void *)object;
/*
return true;
}
+ if (is_kfence_address(ptr))
+ return false;
+
slab = folio_slab(folio);
return !poison_slab_object(slab->slab_cache, ptr, ip, false);
}
struct slab *slab;
gfp_t flags = 0; /* Might be executing under a lock. */
- if (is_kfence_address(kasan_reset_tag(ptr)))
- return;
-
slab = virt_to_slab(ptr);
/*
return;
}
+ if (is_kfence_address(ptr))
+ return;
+
/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
unpoison_slab_object(slab->slab_cache, ptr, size, flags);
static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init)
{
- addr = kasan_reset_tag(addr);
-
- /* Skip KFENCE memory if called explicitly outside of sl*b. */
- if (is_kfence_address(addr))
- return;
-
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
if (WARN_ON(size & KASAN_GRANULE_MASK))
return;
- hw_set_mem_tag_range((void *)addr, size, value, init);
+ hw_set_mem_tag_range(kasan_reset_tag(addr), size, value, init);
}
static inline void kasan_unpoison(const void *addr, size_t size, bool init)
{
u8 tag = get_tag(addr);
- addr = kasan_reset_tag(addr);
-
- /* Skip KFENCE memory if called explicitly outside of sl*b. */
- if (is_kfence_address(addr))
- return;
-
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
size = round_up(size, KASAN_GRANULE_SIZE);
- hw_set_mem_tag_range((void *)addr, size, tag, init);
+ hw_set_mem_tag_range(kasan_reset_tag(addr), size, tag, init);
}
static inline bool kasan_byte_accessible(const void *addr)
*/
addr = kasan_reset_tag(addr);
- /* Skip KFENCE memory if called explicitly outside of sl*b. */
- if (is_kfence_address(addr))
- return;
-
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
if (WARN_ON(size & KASAN_GRANULE_MASK))
*/
addr = kasan_reset_tag(addr);
- /*
- * Skip KFENCE memory if called explicitly outside of sl*b. Also note
- * that calls to ksize(), where size is not a multiple of machine-word
- * size, would otherwise poison the invalid portion of the word.
- */
- if (is_kfence_address(addr))
- return;
-
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;