mm/zsmalloc: remove get_zspage_mapping()
authorChengming Zhou <zhouchengming@bytedance.com>
Tue, 20 Feb 2024 06:53:02 +0000 (06:53 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 24 Feb 2024 01:48:32 +0000 (17:48 -0800)
Actually we seldom use the class_idx returned from get_zspage_mapping(),
only the zspage->fullness is useful, just use zspage->fullness to remove
this helper.

Note zspage->fullness is not stable outside pool->lock, remove redundant
"VM_BUG_ON(fullness != ZS_INUSE_RATIO_0)" in async_free_zspage() since we
already have the same VM_BUG_ON() in __free_zspage(), which is safe to
access zspage->fullness with pool->lock held.

Link: https://lkml.kernel.org/r/20240220-b4-zsmalloc-cleanup-v1-3-5c5ee4ccdd87@bytedance.com
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/zsmalloc.c

index c39fac9361d7006ff569a79ea782e5ea26bd5e42..63ec385cd670f3e9c55ee24a6bc0a31f13a447de 100644 (file)
@@ -470,16 +470,6 @@ static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
        zspage->freeobj = obj;
 }
 
-static void get_zspage_mapping(struct zspage *zspage,
-                              unsigned int *class_idx,
-                              int *fullness)
-{
-       BUG_ON(zspage->magic != ZSPAGE_MAGIC);
-
-       *fullness = zspage->fullness;
-       *class_idx = zspage->class;
-}
-
 static struct size_class *zspage_class(struct zs_pool *pool,
                                       struct zspage *zspage)
 {
@@ -708,12 +698,10 @@ static void remove_zspage(struct size_class *class, struct zspage *zspage)
  */
 static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
 {
-       int class_idx;
-       int currfg, newfg;
+       int newfg;
 
-       get_zspage_mapping(zspage, &class_idx, &currfg);
        newfg = get_fullness_group(class, zspage);
-       if (newfg == currfg)
+       if (newfg == zspage->fullness)
                goto out;
 
        remove_zspage(class, zspage);
@@ -835,15 +823,11 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
                                struct zspage *zspage)
 {
        struct page *page, *next;
-       int fg;
-       unsigned int class_idx;
-
-       get_zspage_mapping(zspage, &class_idx, &fg);
 
        assert_spin_locked(&pool->lock);
 
        VM_BUG_ON(get_zspage_inuse(zspage));
-       VM_BUG_ON(fg != ZS_INUSE_RATIO_0);
+       VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0);
 
        next = page = get_first_page(zspage);
        do {
@@ -1857,8 +1841,6 @@ static void async_free_zspage(struct work_struct *work)
 {
        int i;
        struct size_class *class;
-       unsigned int class_idx;
-       int fullness;
        struct zspage *zspage, *tmp;
        LIST_HEAD(free_pages);
        struct zs_pool *pool = container_of(work, struct zs_pool,
@@ -1879,10 +1861,8 @@ static void async_free_zspage(struct work_struct *work)
                list_del(&zspage->list);
                lock_zspage(zspage);
 
-               get_zspage_mapping(zspage, &class_idx, &fullness);
-               VM_BUG_ON(fullness != ZS_INUSE_RATIO_0);
-               class = pool->size_class[class_idx];
                spin_lock(&pool->lock);
+               class = zspage_class(pool, zspage);
                __free_zspage(pool, class, zspage);
                spin_unlock(&pool->lock);
        }