mm/slab: use stride to access slabobj_ext

Use a configurable stride value when accessing slab object extension
metadata instead of assuming a fixed sizeof(struct slabobj_ext).

Store stride value in free bits of slab->counters field. This allows
for flexibility in cases where the extension is embedded within
slab objects.

Since these free bits exist only on 64-bit, any future optimizations
that need to change stride value cannot be enabled on 32-bit architectures.

Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20260113061845.159790-6-harry.yoo@oracle.com
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Harry Yoo
2026-01-13 15:18:41 +09:00
committed by Vlastimil Babka
parent 52f1ca8a45
commit 7a8e71bc61
2 changed files with 35 additions and 4 deletions

View File

@@ -55,6 +55,14 @@ struct freelist_counters {
* that the slab was corrupted
*/
unsigned frozen:1;
#ifdef CONFIG_64BIT
/*
* Some optimizations use free bits in 'counters' field
* to save memory. In case ->stride field is not available,
* such optimizations are disabled.
*/
unsigned short stride;
#endif
};
};
};
@@ -529,6 +537,26 @@ static inline unsigned long slab_obj_exts(struct slab *slab)
return obj_exts & ~OBJEXTS_FLAGS_MASK;
}
#ifdef CONFIG_64BIT
static inline void slab_set_stride(struct slab *slab, unsigned short stride)
{
slab->stride = stride;
}
static inline unsigned short slab_get_stride(struct slab *slab)
{
return slab->stride;
}
#else
static inline void slab_set_stride(struct slab *slab, unsigned short stride)
{
VM_WARN_ON_ONCE(stride != sizeof(struct slabobj_ext));
}
static inline unsigned short slab_get_stride(struct slab *slab)
{
return sizeof(struct slabobj_ext);
}
#endif
/*
* slab_obj_ext - get the pointer to the slab object extension metadata
* associated with an object in a slab.
@@ -542,12 +570,9 @@ static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
unsigned long obj_exts,
unsigned int index)
{
struct slabobj_ext *obj_ext;
VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab));
obj_ext = (struct slabobj_ext *)obj_exts;
return &obj_ext[index];
return (struct slabobj_ext *)(obj_exts + slab_get_stride(slab) * index);
}
int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
@@ -567,6 +592,10 @@ static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
return NULL;
}
static inline void slab_set_stride(struct slab *slab, unsigned int stride) { }
static inline unsigned int slab_get_stride(struct slab *slab) { return 0; }
#endif /* CONFIG_SLAB_OBJ_EXT */
static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)

View File

@@ -2206,6 +2206,8 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
retry:
old_exts = READ_ONCE(slab->obj_exts);
handle_failed_objexts_alloc(old_exts, vec, objects);
slab_set_stride(slab, sizeof(struct slabobj_ext));
if (new_slab) {
/*
* If the slab is brand new and nobody can yet access its