mm/slab: move [__]ksize and slab_ksize() to mm/slub.c

To access SLUB's internal implementation details beyond cache flags in
ksize(), move __ksize(), ksize(), and slab_ksize() to mm/slub.c.

[vbabka@suse.cz: also make __ksize() static and move its kerneldoc to
 ksize() ]

Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20260113061845.159790-9-harry.yoo@oracle.com
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Harry Yoo
2026-01-13 15:18:44 +09:00
committed by Vlastimil Babka
parent 70089d0188
commit fab0694646
4 changed files with 86 additions and 89 deletions

View File

@@ -509,7 +509,6 @@ void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size
void kfree(const void *objp);
void kfree_nolock(const void *objp);
void kfree_sensitive(const void *objp);
size_t __ksize(const void *objp);
DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))

View File

@@ -661,33 +661,6 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
void kvfree_rcu_cb(struct rcu_head *head);
size_t __ksize(const void *objp);
static inline size_t slab_ksize(const struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_DEBUG
/*
* Debugging requires use of the padding between object
* and whatever may come after it.
*/
if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
return s->object_size;
#endif
if (s->flags & SLAB_KASAN)
return s->object_size;
/*
* If we have the need to store the freelist pointer
* back there or track user information then we can
* only use the space before that information.
*/
if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
return s->inuse;
/*
* Else we can use all the padding etc for the allocation
*/
return s->size;
}
static inline unsigned int large_kmalloc_order(const struct page *page)
{
return page[1].flags.f & 0xff;

View File

@@ -1021,43 +1021,6 @@ void __init create_kmalloc_caches(void)
0, SLAB_NO_MERGE, NULL);
}
/**
* __ksize -- Report full size of underlying allocation
* @object: pointer to the object
*
* This should only be used internally to query the true size of allocations.
* It is not meant to be a way to discover the usable size of an allocation
* after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
* the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
* and/or FORTIFY_SOURCE.
*
* Return: size of the actual memory used by @object in bytes
*/
size_t __ksize(const void *object)
{
const struct page *page;
const struct slab *slab;
if (unlikely(object == ZERO_SIZE_PTR))
return 0;
page = virt_to_page(object);
if (unlikely(PageLargeKmalloc(page)))
return large_kmalloc_size(page);
slab = page_slab(page);
/* Delete this after we're sure there are no users */
if (WARN_ON(!slab))
return page_size(page);
#ifdef CONFIG_SLUB_DEBUG
skip_orig_size_check(slab->slab_cache, object);
#endif
return slab_ksize(slab->slab_cache);
}
gfp_t kmalloc_fix_flags(gfp_t flags)
{
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
@@ -1273,30 +1236,6 @@ void kfree_sensitive(const void *p)
}
EXPORT_SYMBOL(kfree_sensitive);
size_t ksize(const void *objp)
{
/*
* We need to first check that the pointer to the object is valid.
* The KASAN report printed from ksize() is more useful, then when
* it's printed later when the behaviour could be undefined due to
* a potential use-after-free or double-free.
*
* We use kasan_check_byte(), which is supported for the hardware
* tag-based KASAN mode, unlike kasan_check_read/write().
*
* If the pointed to memory is invalid, we return 0 to avoid users of
* ksize() writing to and potentially corrupting the memory region.
*
* We want to perform the check before __ksize(), to avoid potentially
* crashing in __ksize() due to accessing invalid metadata.
*/
if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
return 0;
return kfence_ksize(objp) ?: __ksize(objp);
}
EXPORT_SYMBOL(ksize);
#ifdef CONFIG_BPF_SYSCALL
#include <linux/btf.h>

View File

@@ -7028,6 +7028,92 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
}
EXPORT_SYMBOL(kmem_cache_free);
static inline size_t slab_ksize(const struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_DEBUG
/*
* Debugging requires use of the padding between object
* and whatever may come after it.
*/
if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
return s->object_size;
#endif
if (s->flags & SLAB_KASAN)
return s->object_size;
/*
* If we have the need to store the freelist pointer
* back there or track user information then we can
* only use the space before that information.
*/
if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
return s->inuse;
/*
* Else we can use all the padding etc for the allocation
*/
return s->size;
}
static size_t __ksize(const void *object)
{
const struct page *page;
const struct slab *slab;
if (unlikely(object == ZERO_SIZE_PTR))
return 0;
page = virt_to_page(object);
if (unlikely(PageLargeKmalloc(page)))
return large_kmalloc_size(page);
slab = page_slab(page);
/* Delete this after we're sure there are no users */
if (WARN_ON(!slab))
return page_size(page);
#ifdef CONFIG_SLUB_DEBUG
skip_orig_size_check(slab->slab_cache, object);
#endif
return slab_ksize(slab->slab_cache);
}
/**
* ksize -- Report full size of underlying allocation
* @objp: pointer to the object
*
* This should only be used internally to query the true size of allocations.
* It is not meant to be a way to discover the usable size of an allocation
* after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
* the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
* and/or FORTIFY_SOURCE.
*
* Return: size of the actual memory used by @objp in bytes
*/
size_t ksize(const void *objp)
{
/*
* We need to first check that the pointer to the object is valid.
* The KASAN report printed from ksize() is more useful, then when
* it's printed later when the behaviour could be undefined due to
* a potential use-after-free or double-free.
*
* We use kasan_check_byte(), which is supported for the hardware
* tag-based KASAN mode, unlike kasan_check_read/write().
*
* If the pointed to memory is invalid, we return 0 to avoid users of
* ksize() writing to and potentially corrupting the memory region.
*
* We want to perform the check before __ksize(), to avoid potentially
* crashing in __ksize() due to accessing invalid metadata.
*/
if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
return 0;
return kfence_ksize(objp) ?: __ksize(objp);
}
EXPORT_SYMBOL(ksize);
static void free_large_kmalloc(struct page *page, void *object)
{
unsigned int order = compound_order(page);