Skip to content

Commit

Permalink
mm, slab: add kerneldocs for common SLAB_ flags
Browse files Browse the repository at this point in the history
We have many SLAB_ flags but many are used only internally, by kunit
tests or debugging subsystems cooperating with slab, or are set
according to slab_debug boot parameter.

Create kerneldocs for the commonly used flags that may be passed to
kmem_cache_create(). SLAB_TYPESAFE_BY_RCU already had a detailed
description, so turn it to a kerneldoc. Add some details for
SLAB_ACCOUNT, SLAB_RECLAIM_ACCOUNT and SLAB_HWCACHE_ALIGN. Reference
them from the __kmem_cache_create_args() kerneldoc.

Signed-off-by: Vlastimil Babka <[email protected]>
  • Loading branch information
tehcaster committed Oct 29, 2024
1 parent b4b797d commit b6da940
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 20 deletions.
60 changes: 41 additions & 19 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,17 @@ enum _slab_flag_bits {
#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON)
/* Indicate a kmalloc slab */
#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC)
/* Align objs on cache lines */
/**
* define SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries.
*
* Sufficiently large objects are aligned on cache line boundary. For object
* size smaller than a half of cache line size, the alignment is on the half of
* cache line size. In general, if object size is smaller than 1/2^n of cache
* line size, the alignment is adjusted to 1/2^n.
*
* If explicit alignment is also requested by the respective
* &struct kmem_cache_args field, the greater of both is alignments is applied.
*/
#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
/* Use GFP_DMA memory */
#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
Expand All @@ -87,8 +97,8 @@ enum _slab_flag_bits {
#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER)
/* Panic if kmem_cache_create() fails */
#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
/*
* SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
/**
* define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
*
* This delays freeing the SLAB page by a grace period, it does _NOT_
* delay object freeing. This means that if you do kmem_cache_free()
Expand All @@ -99,20 +109,22 @@ enum _slab_flag_bits {
* stays valid, the trick to using this is relying on an independent
* object validation pass. Something like:
*
* begin:
* rcu_read_lock();
* obj = lockless_lookup(key);
* if (obj) {
* if (!try_get_ref(obj)) // might fail for free objects
* rcu_read_unlock();
* goto begin;
* ::
*
* begin:
* rcu_read_lock();
* obj = lockless_lookup(key);
* if (obj) {
* if (!try_get_ref(obj)) // might fail for free objects
* rcu_read_unlock();
* goto begin;
*
* if (obj->key != key) { // not the object we expected
* put_ref(obj);
* rcu_read_unlock();
* goto begin;
* }
* }
* if (obj->key != key) { // not the object we expected
* put_ref(obj);
* rcu_read_unlock();
* goto begin;
* }
* }
* rcu_read_unlock();
*
* This is useful if we need to approach a kernel structure obliquely,
Expand All @@ -137,7 +149,6 @@ enum _slab_flag_bits {
*
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/
/* Defer freeing slabs to RCU */
#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
/* Trace allocations and frees */
#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE)
Expand Down Expand Up @@ -170,7 +181,12 @@ enum _slab_flag_bits {
#else
# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
#endif
/* Account to memcg */
/**
* define SLAB_ACCOUNT - Account allocations to memcg.
*
* All object allocations from this cache will be memcg accounted, regardless of
* __GFP_ACCOUNT being or not being passed to individual allocations.
*/
#ifdef CONFIG_MEMCG
# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
#else
Expand All @@ -197,7 +213,13 @@ enum _slab_flag_bits {
#endif

/* The following flags affect the page allocator grouping pages by mobility */
/* Objects are reclaimable */
/**
* define SLAB_RECLAIM_ACCOUNT - Objects are reclaimable.
*
* Use this flag for caches that have an associated shrinker. As a result, slab
* pages are allocated with __GFP_RECLAIMABLE, which affects grouping pages by
* mobility, and are accounted in SReclaimable counter in /proc/meminfo
*/
#ifndef CONFIG_SLUB_TINY
#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
#else
Expand Down
14 changes: 13 additions & 1 deletion mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -254,11 +254,23 @@ static struct kmem_cache *create_cache(const char *name,
* @object_size: The size of objects to be created in this cache.
* @args: Additional arguments for the cache creation (see
* &struct kmem_cache_args).
* @flags: See %SLAB_* flags for an explanation of individual @flags.
* @flags: See the desriptions of individual flags. The common ones are listed
* in the description below.
*
* Not to be called directly, use the kmem_cache_create() wrapper with the same
* parameters.
*
* Commonly used @flags:
*
* &SLAB_ACCOUNT - Account allocations to memcg.
*
* &SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries.
*
* &SLAB_RECLAIM_ACCOUNT - Objects are reclaimable.
*
* &SLAB_TYPESAFE_BY_RCU - Slab page (not individual objects) freeing delayed
* by a grace period - see the full description before using.
*
* Context: Cannot be called within a interrupt, but can be interrupted.
*
* Return: a pointer to the cache on success, NULL on failure.
Expand Down

0 comments on commit b6da940

Please sign in to comment.