erofs: get rid of __stagingpage_alloc helper
authorGao Xiang <gaoxiang25@huawei.com>
Thu, 21 Nov 2019 13:59:54 +0000 (21:59 +0800)
committerGao Xiang <gaoxiang25@huawei.com>
Sun, 24 Nov 2019 02:57:48 +0000 (10:57 +0800)
Now open code is much cleaner due to iterative development.

Link: https://lore.kernel.org/r/20191124025217.12345-1-hsiangkao@aol.com
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
fs/erofs/decompressor.c
fs/erofs/internal.h
fs/erofs/utils.c
fs/erofs/zdata.c

index 19f89f9fb10c00b395ab82108a01c50d7b35908a..2890a67a1ded61d4eca977c64a3b19eb8319b801 100644 (file)
@@ -73,7 +73,7 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
                        victim = availables[--top];
                        get_page(victim);
                } else {
-                       victim = erofs_allocpage(pagepool, GFP_KERNEL, false);
+                       victim = erofs_allocpage(pagepool, GFP_KERNEL);
                        if (!victim)
                                return -ENOMEM;
                        victim->mapping = Z_EROFS_MAPPING_STAGING;
index 544a453f3076ca8c288fdf98f78ab16260fc7cde..0c1175a08e54c8c81e37294fe07ddf5fc20b7437 100644 (file)
@@ -382,7 +382,7 @@ int erofs_namei(struct inode *dir, struct qstr *name,
 extern const struct file_operations erofs_dir_fops;
 
 /* utils.c / zdata.c */
-struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail);
+struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
 
 #if (EROFS_PCPUBUF_NR_PAGES > 0)
 void *erofs_get_pcpubuf(unsigned int pagenr);
index f66043ee16b9c66590bd860fa42c8aa37a0901a5..1e8e1450d5b07ad3a1956b6836ef07435da12b55 100644 (file)
@@ -7,7 +7,7 @@
 #include "internal.h"
 #include <linux/pagevec.h>
 
-struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
+struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
 {
        struct page *page;
 
@@ -16,7 +16,7 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
                DBG_BUGON(page_ref_count(page) != 1);
                list_del(&page->lru);
        } else {
-               page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0);
+               page = alloc_page(gfp);
        }
        return page;
 }
index 93f8bc1a64f61b0cfcd618a9bee14172d4a566e4..1c582a3a40a38aee184d8cc8a3c4dc1da1056c2c 100644 (file)
@@ -546,15 +546,6 @@ static bool z_erofs_collector_end(struct z_erofs_collector *clt)
        return true;
 }
 
-static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
-                                              gfp_t gfp)
-{
-       struct page *page = erofs_allocpage(pagepool, gfp, true);
-
-       page->mapping = Z_EROFS_MAPPING_STAGING;
-       return page;
-}
-
 static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
                                       unsigned int cachestrategy,
                                       erofs_off_t la)
@@ -661,8 +652,9 @@ retry:
        /* should allocate an additional staging page for pagevec */
        if (err == -EAGAIN) {
                struct page *const newpage =
-                       __stagingpage_alloc(pagepool, GFP_NOFS);
+                       erofs_allocpage(pagepool, GFP_NOFS | __GFP_NOFAIL);
 
+               newpage->mapping = Z_EROFS_MAPPING_STAGING;
                err = z_erofs_attach_page(clt, newpage,
                                          Z_EROFS_PAGE_TYPE_EXCLUSIVE);
                if (!err)
@@ -1079,19 +1071,24 @@ repeat:
        unlock_page(page);
        put_page(page);
 out_allocpage:
-       page = __stagingpage_alloc(pagepool, gfp);
-       if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
-               list_add(&page->lru, pagepool);
-               cpu_relax();
-               goto repeat;
-       }
-       if (!tocache)
-               goto out;
-       if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
+       page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
+       if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
+               /* non-LRU / non-movable temporary page is needed */
                page->mapping = Z_EROFS_MAPPING_STAGING;
-               goto out;
+               tocache = false;
        }
 
+       if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
+               if (tocache) {
+                       /* since it added to managed cache successfully */
+                       unlock_page(page);
+                       put_page(page);
+               } else {
+                       list_add(&page->lru, pagepool);
+               }
+               cond_resched();
+               goto repeat;
+       }
        set_page_private(page, (unsigned long)pcl);
        SetPagePrivate(page);
 out:   /* the only exit (for tracing and debugging) */