Skip to content

Commit

Permalink
zsmalloc: add a LRU to zs_pool to keep track of zspages in LRU order
Browse files Browse the repository at this point in the history
This helps determines the coldest zspages as candidates for writeback.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Nhat Pham <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Reviewed-by: Sergey Senozhatsky <[email protected]>
Cc: Dan Streetman <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Nitin Gupta <[email protected]>
Cc: Seth Jennings <[email protected]>
Cc: Vitaly Wool <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
nhatsmrt authored and akpm00 committed Dec 12, 2022
1 parent c0547d0 commit 64f768c
Showing 1 changed file with 50 additions and 0 deletions.
50 changes: 50 additions & 0 deletions mm/zsmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,11 @@ struct zs_pool {
/* Compact classes */
struct shrinker shrinker;

#ifdef CONFIG_ZPOOL
/* List tracking the zspages in LRU order by most recently added object */
struct list_head lru;
#endif

#ifdef CONFIG_ZSMALLOC_STAT
struct dentry *stat_dentry;
#endif
Expand All @@ -260,6 +265,12 @@ struct zspage {
unsigned int freeobj;
struct page *first_page;
struct list_head list; /* fullness list */

#ifdef CONFIG_ZPOOL
/* links the zspage to the lru list in the pool */
struct list_head lru;
#endif

struct zs_pool *pool;
#ifdef CONFIG_COMPACTION
rwlock_t lock;
Expand Down Expand Up @@ -953,6 +964,9 @@ static void free_zspage(struct zs_pool *pool, struct size_class *class,
}

remove_zspage(class, zspage, ZS_EMPTY);
#ifdef CONFIG_ZPOOL
list_del(&zspage->lru);
#endif
__free_zspage(pool, class, zspage);
}

Expand Down Expand Up @@ -998,6 +1012,10 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
off %= PAGE_SIZE;
}

#ifdef CONFIG_ZPOOL
INIT_LIST_HEAD(&zspage->lru);
#endif

set_freeobj(zspage, 0);
}

Expand Down Expand Up @@ -1270,6 +1288,31 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
obj_to_location(obj, &page, &obj_idx);
zspage = get_zspage(page);

#ifdef CONFIG_ZPOOL
/*
* Move the zspage to front of pool's LRU.
*
* Note that this is swap-specific, so by definition there are no ongoing
* accesses to the memory while the page is swapped out that would make
* it "hot". A new entry is hot, then ages to the tail until it gets either
* written back or swaps back in.
*
* Furthermore, map is also called during writeback. We must not put an
* isolated page on the LRU mid-reclaim.
*
* As a result, only update the LRU when the page is mapped for write
* when it's first instantiated.
*
* This is a deviation from the other backends, which perform this update
* in the allocation function (zbud_alloc, z3fold_alloc).
*/
if (mm == ZS_MM_WO) {
if (!list_empty(&zspage->lru))
list_del(&zspage->lru);
list_add(&zspage->lru, &pool->lru);
}
#endif

/*
* migration cannot move any zpages in this zspage. Here, pool->lock
* is too heavy since callers would take some time until they calls
Expand Down Expand Up @@ -1988,6 +2031,9 @@ static void async_free_zspage(struct work_struct *work)
VM_BUG_ON(fullness != ZS_EMPTY);
class = pool->size_class[class_idx];
spin_lock(&pool->lock);
#ifdef CONFIG_ZPOOL
list_del(&zspage->lru);
#endif
__free_zspage(pool, class, zspage);
spin_unlock(&pool->lock);
}
Expand Down Expand Up @@ -2299,6 +2345,10 @@ struct zs_pool *zs_create_pool(const char *name)
*/
zs_register_shrinker(pool);

#ifdef CONFIG_ZPOOL
INIT_LIST_HEAD(&pool->lru);
#endif

return pool;

err:
Expand Down

0 comments on commit 64f768c

Please sign in to comment.