erofs: get rid of ->lru usage
authorGao Xiang <hsiangkao@linux.alibaba.com>
Fri, 22 Oct 2021 09:01:20 +0000 (17:01 +0800)
committerGao Xiang <hsiangkao@linux.alibaba.com>
Mon, 25 Oct 2021 00:22:59 +0000 (08:22 +0800)
Currently, ->lru is a way to arrange non-LRU pages and has some
in-kernel users. In order to minimize noticable issues of page
reclaim and cache thrashing under high memory presure, limited
temporary pages were all chained with ->lru and can be reused
during the request. However, it seems that ->lru could be removed
when folio is landing.

Let's use page->private to chain temporary pages for now instead
and transform EROFS formally after the topic of the folio / file
page design is finalized.

Link: https://lore.kernel.org/r/20211022090120.14675-1-hsiangkao@linux.alibaba.com
Cc: Matthew Wilcox <willy@infradead.org>
Reviewed-by: Kent Overstreet <kent.overstreet@gmail.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
fs/erofs/compress.h
fs/erofs/decompressor.c
fs/erofs/decompressor_lzma.c
fs/erofs/internal.h
fs/erofs/pcpubuf.c
fs/erofs/utils.c
fs/erofs/zdata.c

index 8ea6a9b149626b860c94f2792b02555550ea60cf..5794065049190500764004360e9476f3d829aaaf 100644 (file)
@@ -22,7 +22,7 @@ struct z_erofs_decompress_req {
 
 struct z_erofs_decompressor {
        int (*decompress)(struct z_erofs_decompress_req *rq,
-                         struct list_head *pagepool);
+                         struct page **pagepool);
        char *name;
 };
 
@@ -64,7 +64,7 @@ static inline bool z_erofs_is_shortlived_page(struct page *page)
        return true;
 }
 
-static inline bool z_erofs_put_shortlivedpage(struct list_head *pagepool,
+static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
                                              struct page *page)
 {
        if (!z_erofs_is_shortlived_page(page))
@@ -75,8 +75,7 @@ static inline bool z_erofs_put_shortlivedpage(struct list_head *pagepool,
                put_page(page);
        } else {
                /* follow the pcluster rule above. */
-               set_page_private(page, 0);
-               list_add(&page->lru, pagepool);
+               erofs_pagepool_add(pagepool, page);
        }
        return true;
 }
@@ -89,9 +88,9 @@ static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
 }
 
 int z_erofs_decompress(struct z_erofs_decompress_req *rq,
-                      struct list_head *pagepool);
+                      struct page **pagepool);
 
 /* prototypes for specific algorithms */
 int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
-                           struct list_head *pagepool);
+                           struct page **pagepool);
 #endif
index 8a624d73c1856755b2a31b9303fdb2e1ee1d7c0e..a0786b95cdf99c7a350fecaf4b2d72b869c750c2 100644 (file)
@@ -57,7 +57,7 @@ int z_erofs_load_lz4_config(struct super_block *sb,
  * all physical pages are consecutive, which can be seen for moderate CR.
  */
 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
-                                       struct list_head *pagepool)
+                                       struct page **pagepool)
 {
        const unsigned int nr =
                PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
@@ -254,7 +254,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq,
 }
 
 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
-                                 struct list_head *pagepool)
+                                 struct page **pagepool)
 {
        const unsigned int nrpages_out =
                PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
@@ -296,7 +296,7 @@ dstmap_out:
 }
 
 static int z_erofs_shifted_transform(struct z_erofs_decompress_req *rq,
-                                    struct list_head *pagepool)
+                                    struct page **pagepool)
 {
        const unsigned int nrpages_out =
                PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
@@ -352,7 +352,7 @@ static struct z_erofs_decompressor decompressors[] = {
 };
 
 int z_erofs_decompress(struct z_erofs_decompress_req *rq,
-                      struct list_head *pagepool)
+                      struct page **pagepool)
 {
        return decompressors[rq->alg].decompress(rq, pagepool);
 }
index bd7d9809ecf7c846bdc245ca68551dd4fd78a162..50045510a1f4161876945c31ab72a8970563d788 100644 (file)
@@ -150,7 +150,7 @@ again:
 }
 
 int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
-                           struct list_head *pagepool)
+                           struct page **pagepool)
 {
        const unsigned int nrpages_out =
                PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
index a6a53d22dfd6d94168d306188c889ede5c5a6299..3265688af7f9f575a40c27355f397b84bfa6aa69 100644 (file)
@@ -499,7 +499,14 @@ void erofs_pcpubuf_init(void);
 void erofs_pcpubuf_exit(void);
 
 /* utils.c / zdata.c */
-struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
+struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp);
+static inline void erofs_pagepool_add(struct page **pagepool,
+               struct page *page)
+{
+       set_page_private(page, (unsigned long)*pagepool);
+       *pagepool = page;
+}
+void erofs_release_pages(struct page **pagepool);
 
 #ifdef CONFIG_EROFS_FS_ZIP
 int erofs_workgroup_put(struct erofs_workgroup *grp);
index 6c885575128ac1a4d9512832771815f4c0ccb847..a2efd833d1b6c32791c3ed380d5792361160fd05 100644 (file)
@@ -49,7 +49,7 @@ int erofs_pcpubuf_growsize(unsigned int nrpages)
 {
        static DEFINE_MUTEX(pcb_resize_mutex);
        static unsigned int pcb_nrpages;
-       LIST_HEAD(pagepool);
+       struct page *pagepool = NULL;
        int delta, cpu, ret, i;
 
        mutex_lock(&pcb_resize_mutex);
@@ -102,13 +102,13 @@ int erofs_pcpubuf_growsize(unsigned int nrpages)
                        vunmap(old_ptr);
 free_pagearray:
                while (i)
-                       list_add(&oldpages[--i]->lru, &pagepool);
+                       erofs_pagepool_add(&pagepool, oldpages[--i]);
                kfree(oldpages);
                if (ret)
                        break;
        }
        pcb_nrpages = nrpages;
-       put_pages_list(&pagepool);
+       erofs_release_pages(&pagepool);
 out:
        mutex_unlock(&pcb_resize_mutex);
        return ret;
index bd86067a63f7fc5e2e2310294241e50cf8af8ab2..84da2c28001298848bbf3eca237f5ff4349422c5 100644 (file)
@@ -6,20 +6,29 @@
 #include "internal.h"
 #include <linux/pagevec.h>
 
-struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
+struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
 {
-       struct page *page;
+       struct page *page = *pagepool;
 
-       if (!list_empty(pool)) {
-               page = lru_to_page(pool);
+       if (page) {
                DBG_BUGON(page_ref_count(page) != 1);
-               list_del(&page->lru);
+               *pagepool = (struct page *)page_private(page);
        } else {
                page = alloc_page(gfp);
        }
        return page;
 }
 
+void erofs_release_pages(struct page **pagepool)
+{
+       while (*pagepool) {
+               struct page *page = *pagepool;
+
+               *pagepool = (struct page *)page_private(page);
+               put_page(page);
+       }
+}
+
 #ifdef CONFIG_EROFS_FS_ZIP
 /* global shrink count (for all mounted EROFS instances) */
 static atomic_long_t erofs_global_shrink_cnt;
index d55e6215cd444c0289e9f2a578ec368579dd1193..bcb1b91b234fb3ed8363753860d3f7e3a5cb01f2 100644 (file)
@@ -236,7 +236,7 @@ static DEFINE_MUTEX(z_pagemap_global_lock);
 static void preload_compressed_pages(struct z_erofs_collector *clt,
                                     struct address_space *mc,
                                     enum z_erofs_cache_alloctype type,
-                                    struct list_head *pagepool)
+                                    struct page **pagepool)
 {
        struct z_erofs_pcluster *pcl = clt->pcl;
        bool standalone = true;
@@ -287,12 +287,10 @@ static void preload_compressed_pages(struct z_erofs_collector *clt,
                if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t)))
                        continue;
 
-               if (page) {
+               if (page)
                        put_page(page);
-               } else if (newpage) {
-                       set_page_private(newpage, 0);
-                       list_add(&newpage->lru, pagepool);
-               }
+               else if (newpage)
+                       erofs_pagepool_add(pagepool, newpage);
        }
 
        /*
@@ -643,7 +641,7 @@ static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
 }
 
 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
-                               struct page *page, struct list_head *pagepool)
+                               struct page *page, struct page **pagepool)
 {
        struct inode *const inode = fe->inode;
        struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
@@ -836,7 +834,7 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
 
 static int z_erofs_decompress_pcluster(struct super_block *sb,
                                       struct z_erofs_pcluster *pcl,
-                                      struct list_head *pagepool)
+                                      struct page **pagepool)
 {
        struct erofs_sb_info *const sbi = EROFS_SB(sb);
        struct z_erofs_pagevec_ctor ctor;
@@ -1036,7 +1034,7 @@ out:
 }
 
 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
-                                    struct list_head *pagepool)
+                                    struct page **pagepool)
 {
        z_erofs_next_pcluster_t owned = io->head;
 
@@ -1060,18 +1058,18 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
 {
        struct z_erofs_decompressqueue *bgq =
                container_of(work, struct z_erofs_decompressqueue, u.work);
-       LIST_HEAD(pagepool);
+       struct page *pagepool = NULL;
 
        DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
        z_erofs_decompress_queue(bgq, &pagepool);
 
-       put_pages_list(&pagepool);
+       erofs_release_pages(&pagepool);
        kvfree(bgq);
 }
 
 static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
                                               unsigned int nr,
-                                              struct list_head *pagepool,
+                                              struct page **pagepool,
                                               struct address_space *mc,
                                               gfp_t gfp)
 {
@@ -1173,7 +1171,7 @@ repeat:
 out_allocpage:
        page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
        if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
-               list_add(&page->lru, pagepool);
+               erofs_pagepool_add(pagepool, page);
                cond_resched();
                goto repeat;
        }
@@ -1257,7 +1255,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
 
 static void z_erofs_submit_queue(struct super_block *sb,
                                 struct z_erofs_decompress_frontend *f,
-                                struct list_head *pagepool,
+                                struct page **pagepool,
                                 struct z_erofs_decompressqueue *fgq,
                                 bool *force_fg)
 {
@@ -1365,7 +1363,7 @@ submit_bio_retry:
 
 static void z_erofs_runqueue(struct super_block *sb,
                             struct z_erofs_decompress_frontend *f,
-                            struct list_head *pagepool, bool force_fg)
+                            struct page **pagepool, bool force_fg)
 {
        struct z_erofs_decompressqueue io[NR_JOBQUEUES];
 
@@ -1394,7 +1392,7 @@ static void z_erofs_runqueue(struct super_block *sb,
 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
                                      struct readahead_control *rac,
                                      erofs_off_t end,
-                                     struct list_head *pagepool,
+                                     struct page **pagepool,
                                      bool backmost)
 {
        struct inode *inode = f->inode;
@@ -1457,8 +1455,8 @@ static int z_erofs_readpage(struct file *file, struct page *page)
 {
        struct inode *const inode = page->mapping->host;
        struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
+       struct page *pagepool = NULL;
        int err;
-       LIST_HEAD(pagepool);
 
        trace_erofs_readpage(page, false);
        f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
@@ -1479,8 +1477,7 @@ static int z_erofs_readpage(struct file *file, struct page *page)
        if (f.map.mpage)
                put_page(f.map.mpage);
 
-       /* clean up the remaining free pages */
-       put_pages_list(&pagepool);
+       erofs_release_pages(&pagepool);
        return err;
 }
 
@@ -1489,9 +1486,8 @@ static void z_erofs_readahead(struct readahead_control *rac)
        struct inode *const inode = rac->mapping->host;
        struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
        struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
-       struct page *page, *head = NULL;
+       struct page *pagepool = NULL, *head = NULL, *page;
        unsigned int nr_pages;
-       LIST_HEAD(pagepool);
 
        f.readahead = true;
        f.headoffset = readahead_pos(rac);
@@ -1528,9 +1524,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
                         nr_pages <= sbi->opt.max_sync_decompress_pages);
        if (f.map.mpage)
                put_page(f.map.mpage);
-
-       /* clean up the remaining free pages */
-       put_pages_list(&pagepool);
+       erofs_release_pages(&pagepool);
 }
 
 const struct address_space_operations z_erofs_aops = {