Skip to content

Commit

Permalink
rhashtable: plumb through alloc tag
Browse files Browse the repository at this point in the history
This gives better memory allocation profiling results; rhashtable
allocations will be accounted to the code that initialized the rhashtable.

[[email protected]: undo _noprof additions in the documentation]
  Link: https://lkml.kernel.org/r/[email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Kent Overstreet <[email protected]>
Signed-off-by: Suren Baghdasaryan <[email protected]>
Tested-by: Kees Cook <[email protected]>
Cc: Alexander Viro <[email protected]>
Cc: Alex Gaynor <[email protected]>
Cc: Alice Ryhl <[email protected]>
Cc: Andreas Hindborg <[email protected]>
Cc: Benno Lossin <[email protected]>
Cc: "Björn Roy Baron" <[email protected]>
Cc: Boqun Feng <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Dennis Zhou <[email protected]>
Cc: Gary Guo <[email protected]>
Cc: Miguel Ojeda <[email protected]>
Cc: Pasha Tatashin <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Wedson Almeida Filho <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
Kent Overstreet authored and akpm00 committed Apr 26, 2024
1 parent 88ae5fb commit 9e54dd8
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 10 deletions.
3 changes: 3 additions & 0 deletions include/linux/alloc_tag.h
Original file line number Diff line number Diff line change
Expand Up @@ -152,13 +152,16 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
ref->ct = NULL;
}

#define alloc_tag_record(p) ((p) = current->alloc_tag)

#else /* CONFIG_MEM_ALLOC_PROFILING */

#define DEFINE_ALLOC_TAG(_alloc_tag)
static inline bool mem_alloc_profiling_enabled(void) { return false; }
static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
size_t bytes) {}
static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
#define alloc_tag_record(p) do {} while (0)

#endif /* CONFIG_MEM_ALLOC_PROFILING */

Expand Down
11 changes: 9 additions & 2 deletions include/linux/rhashtable-types.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#ifndef _LINUX_RHASHTABLE_TYPES_H
#define _LINUX_RHASHTABLE_TYPES_H

#include <linux/alloc_tag.h>
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
Expand Down Expand Up @@ -88,6 +89,9 @@ struct rhashtable {
struct mutex mutex;
spinlock_t lock;
atomic_t nelems;
#ifdef CONFIG_MEM_ALLOC_PROFILING
struct alloc_tag *alloc_tag;
#endif
};

/**
Expand Down Expand Up @@ -127,9 +131,12 @@ struct rhashtable_iter {
bool end_of_table;
};

int rhashtable_init(struct rhashtable *ht,
int rhashtable_init_noprof(struct rhashtable *ht,
const struct rhashtable_params *params);
int rhltable_init(struct rhltable *hlt,
#define rhashtable_init(...) alloc_hooks(rhashtable_init_noprof(__VA_ARGS__))

int rhltable_init_noprof(struct rhltable *hlt,
const struct rhashtable_params *params);
#define rhltable_init(...) alloc_hooks(rhltable_init_noprof(__VA_ARGS__))

#endif /* _LINUX_RHASHTABLE_TYPES_H */
22 changes: 14 additions & 8 deletions lib/rhashtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
if (ntbl)
return ntbl;

ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
ntbl = alloc_hooks_tag(ht->alloc_tag,
kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO));

if (ntbl && leaf) {
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
Expand All @@ -157,7 +158,8 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,

size = sizeof(*tbl) + sizeof(tbl->buckets[0]);

tbl = kzalloc(size, gfp);
tbl = alloc_hooks_tag(ht->alloc_tag,
kmalloc_noprof(size, gfp|__GFP_ZERO));
if (!tbl)
return NULL;

Expand All @@ -181,7 +183,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
int i;
static struct lock_class_key __key;

tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
tbl = alloc_hooks_tag(ht->alloc_tag,
kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets),
gfp|__GFP_ZERO, NUMA_NO_NODE));

size = nbuckets;

Expand Down Expand Up @@ -1016,7 +1020,7 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
* .obj_hashfn = my_hash_fn,
* };
*/
int rhashtable_init(struct rhashtable *ht,
int rhashtable_init_noprof(struct rhashtable *ht,
const struct rhashtable_params *params)
{
struct bucket_table *tbl;
Expand All @@ -1031,6 +1035,8 @@ int rhashtable_init(struct rhashtable *ht,
spin_lock_init(&ht->lock);
memcpy(&ht->p, params, sizeof(*params));

alloc_tag_record(ht->alloc_tag);

if (params->min_size)
ht->p.min_size = roundup_pow_of_two(params->min_size);

Expand Down Expand Up @@ -1076,7 +1082,7 @@ int rhashtable_init(struct rhashtable *ht,

return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);
EXPORT_SYMBOL_GPL(rhashtable_init_noprof);

/**
* rhltable_init - initialize a new hash list table
Expand All @@ -1087,15 +1093,15 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
*
* See documentation for rhashtable_init.
*/
int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params)
{
int err;

err = rhashtable_init(&hlt->ht, params);
err = rhashtable_init_noprof(&hlt->ht, params);
hlt->ht.rhlist = true;
return err;
}
EXPORT_SYMBOL_GPL(rhltable_init);
EXPORT_SYMBOL_GPL(rhltable_init_noprof);

static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
void (*free_fn)(void *ptr, void *arg),
Expand Down

0 comments on commit 9e54dd8

Please sign in to comment.