Skip to content

Commit

Permalink
mm: memcg: group cgroup v1 memcg related declarations
Browse files Browse the repository at this point in the history
Group all cgroup v1-related declarations at the end of memcontrol.h and
mm/memcontrol-v1.h with an intention to put them all together under a
config option later on.  It should make things easier to follow and
maintain too.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Roman Gushchin <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Acked-by: Shakeel Butt <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Matthew Wilcox (Oracle) <[email protected]>
Cc: Muchun Song <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
rgushchin authored and akpm00 committed Jul 5, 2024
1 parent 34926e1 commit 6f1173d
Show file tree
Hide file tree
Showing 2 changed files with 122 additions and 109 deletions.
144 changes: 76 additions & 68 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -950,39 +950,13 @@ static inline void mem_cgroup_exit_user_fault(void)
current->in_user_fault = 0;
}

static inline bool task_in_memcg_oom(struct task_struct *p)
{
return p->memcg_in_oom;
}

bool mem_cgroup_oom_synchronize(bool wait);
struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
struct mem_cgroup *oom_domain);
void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);

void folio_memcg_lock(struct folio *folio);
void folio_memcg_unlock(struct folio *folio);

void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
int val);

/* try to stablize folio_memcg() for all the pages in a memcg */
static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
{
rcu_read_lock();

if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
return true;

rcu_read_unlock();
return false;
}

static inline void mem_cgroup_unlock_pages(void)
{
rcu_read_unlock();
}

/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void mod_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx, int val)
Expand Down Expand Up @@ -1109,10 +1083,6 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,

void split_page_memcg(struct page *head, int old_order, int new_order);

unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);

#else /* CONFIG_MEMCG */

#define MEM_CGROUP_ID_SHIFT 0
Expand Down Expand Up @@ -1423,26 +1393,6 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
}

static inline void folio_memcg_lock(struct folio *folio)
{
}

static inline void folio_memcg_unlock(struct folio *folio)
{
}

static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
{
/* to match folio_memcg_rcu() */
rcu_read_lock();
return true;
}

static inline void mem_cgroup_unlock_pages(void)
{
rcu_read_unlock();
}

static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
}
Expand All @@ -1455,16 +1405,6 @@ static inline void mem_cgroup_exit_user_fault(void)
{
}

static inline bool task_in_memcg_oom(struct task_struct *p)
{
return false;
}

static inline bool mem_cgroup_oom_synchronize(bool wait)
{
return false;
}

static inline struct mem_cgroup *mem_cgroup_get_oom_group(
struct task_struct *victim, struct mem_cgroup *oom_domain)
{
Expand Down Expand Up @@ -1558,14 +1498,6 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
static inline void split_page_memcg(struct page *head, int old_order, int new_order)
{
}

static inline
unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned)
{
return 0;
}
#endif /* CONFIG_MEMCG */

/*
Expand Down Expand Up @@ -1916,4 +1848,80 @@ static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
}
#endif


/* Cgroup v1-related declarations */

#ifdef CONFIG_MEMCG
unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);

bool mem_cgroup_oom_synchronize(bool wait);

static inline bool task_in_memcg_oom(struct task_struct *p)
{
return p->memcg_in_oom;
}

void folio_memcg_lock(struct folio *folio);
void folio_memcg_unlock(struct folio *folio);

/* try to stablize folio_memcg() for all the pages in a memcg */
static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
{
rcu_read_lock();

if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
return true;

rcu_read_unlock();
return false;
}

static inline void mem_cgroup_unlock_pages(void)
{
rcu_read_unlock();
}

#else /* CONFIG_MEMCG */
static inline
unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned)
{
return 0;
}

static inline void folio_memcg_lock(struct folio *folio)
{
}

static inline void folio_memcg_unlock(struct folio *folio)
{
}

static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
{
/* to match folio_memcg_rcu() */
rcu_read_lock();
return true;
}

static inline void mem_cgroup_unlock_pages(void)
{
rcu_read_unlock();
}

static inline bool task_in_memcg_oom(struct task_struct *p)
{
return false;
}

static inline bool mem_cgroup_oom_synchronize(bool wait)
{
return false;
}

#endif /* CONFIG_MEMCG */

#endif /* _LINUX_MEMCONTROL_H */
87 changes: 46 additions & 41 deletions mm/memcontrol-v1.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,9 @@

#include <linux/cgroup-defs.h>

void memcg1_remove_from_trees(struct mem_cgroup *memcg);

static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
{
WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
}
/* Cgroup v1 and v2 common declarations */

void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
void memcg1_check_events(struct mem_cgroup *memcg, int nid);
int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
unsigned int nr_pages);

Expand All @@ -29,11 +23,26 @@ static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n);
void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n);

bool memcg1_wait_acct_move(struct mem_cgroup *memcg);
struct cgroup_taskset;
int memcg1_can_attach(struct cgroup_taskset *tset);
void memcg1_cancel_attach(struct cgroup_taskset *tset);
void memcg1_move_task(void);
/*
* Iteration constructs for visiting all cgroups (under a tree). If
* loops are exited prematurely (break), mem_cgroup_iter_break() must
* be used for reference counting.
*/
#define for_each_mem_cgroup_tree(iter, root) \
for (iter = mem_cgroup_iter(root, NULL, NULL); \
iter != NULL; \
iter = mem_cgroup_iter(root, iter, NULL))

#define for_each_mem_cgroup(iter) \
for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
iter != NULL; \
iter = mem_cgroup_iter(NULL, iter, NULL))

/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
}

/*
* Per memcg event counter is incremented at every pagein/pageout. With THP,
Expand All @@ -47,27 +56,34 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS,
};

/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
enum mem_cgroup_events_target target);
unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap);

void drain_all_stock(struct mem_cgroup *root_memcg);

unsigned long memcg_events(struct mem_cgroup *memcg, int event);
unsigned long memcg_events_local(struct mem_cgroup *memcg, int event);
unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx);
unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item);
unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item);
int memory_stat_show(struct seq_file *m, void *v);

/* Cgroup v1-specific declarations */

void memcg1_remove_from_trees(struct mem_cgroup *memcg);

static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
{
return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
}

/*
* Iteration constructs for visiting all cgroups (under a tree). If
* loops are exited prematurely (break), mem_cgroup_iter_break() must
* be used for reference counting.
*/
#define for_each_mem_cgroup_tree(iter, root) \
for (iter = mem_cgroup_iter(root, NULL, NULL); \
iter != NULL; \
iter = mem_cgroup_iter(root, iter, NULL))

#define for_each_mem_cgroup(iter) \
for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
iter != NULL; \
iter = mem_cgroup_iter(NULL, iter, NULL))
bool memcg1_wait_acct_move(struct mem_cgroup *memcg);

struct cgroup_taskset;
int memcg1_can_attach(struct cgroup_taskset *tset);
void memcg1_cancel_attach(struct cgroup_taskset *tset);
void memcg1_move_task(void);
void memcg1_css_offline(struct mem_cgroup *memcg);

/* for encoding cft->private value on file */
Expand All @@ -78,22 +94,11 @@ enum res_type {
_TCP,
};

bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
enum mem_cgroup_events_target target);
unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap);

bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked);
void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
void memcg1_oom_recover(struct mem_cgroup *memcg);

void drain_all_stock(struct mem_cgroup *root_memcg);

unsigned long memcg_events(struct mem_cgroup *memcg, int event);
unsigned long memcg_events_local(struct mem_cgroup *memcg, int event);
unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx);
unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item);
unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item);
int memory_stat_show(struct seq_file *m, void *v);
void memcg1_check_events(struct mem_cgroup *memcg, int nid);

void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);

Expand Down

0 comments on commit 6f1173d

Please sign in to comment.