erofs: use sync decompression for atomic contexts only
authorHuang Jianan <huangjianan@oppo.com>
Wed, 17 Mar 2021 03:54:48 +0000 (11:54 +0800)
committerGao Xiang <hsiangkao@redhat.com>
Mon, 29 Mar 2021 02:18:01 +0000 (10:18 +0800)
Sync decompression was introduced to get rid of additional kworker
scheduling overhead. But there is no such overhead in non-atomic
contexts. Therefore, it should be better to turn off sync decompression
to avoid the current thread waiting in z_erofs_runqueue.

Link: https://lore.kernel.org/r/20210317035448.13921-3-huangjianan@oppo.com
Reviewed-by: Gao Xiang <hsiangkao@redhat.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Huang Jianan <huangjianan@oppo.com>
Signed-off-by: Guo Weichao <guoweichao@oppo.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
fs/erofs/internal.h
fs/erofs/super.c
fs/erofs/zdata.c

index 351dae524a0c9542d4afae678d9e4a54c331423f..30e63b73a675afef0b75aea1ab3d7743a3c82b6f 100644 (file)
@@ -50,6 +50,8 @@ struct erofs_fs_context {
 #ifdef CONFIG_EROFS_FS_ZIP
        /* current strategy of how to use managed cache */
        unsigned char cache_strategy;
+       /* strategy of sync decompression (false - auto, true - force on) */
+       bool readahead_sync_decompress;
 
        /* threshold for decompression synchronously */
        unsigned int max_sync_decompress_pages;
index d5a6b9b888a562bae6b30e80162be2bea7f7fc55..0445d09b633103de511dd0c3fd1ead1cb5364be4 100644 (file)
@@ -200,6 +200,7 @@ static void erofs_default_options(struct erofs_fs_context *ctx)
 #ifdef CONFIG_EROFS_FS_ZIP
        ctx->cache_strategy = EROFS_ZIP_CACHE_READAROUND;
        ctx->max_sync_decompress_pages = 3;
+       ctx->readahead_sync_decompress = false;
 #endif
 #ifdef CONFIG_EROFS_FS_XATTR
        set_opt(ctx, XATTR_USER);
index 9530b611f94b54ba82dba39e517491fe0a12dec5..7ab8a4e3dfcb824b75e4777546238bcf73046952 100644 (file)
@@ -710,6 +710,8 @@ static void z_erofs_decompressqueue_work(struct work_struct *work);
 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
                                       bool sync, int bios)
 {
+       struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
+
        /* wake up the caller thread for sync decompression */
        if (sync) {
                unsigned long flags;
@@ -723,9 +725,10 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
 
        if (atomic_add_return(bios, &io->pending_bios))
                return;
-       /* Use workqueue decompression for atomic contexts only */
+       /* Use workqueue and sync decompression for atomic contexts only */
        if (in_atomic() || irqs_disabled()) {
                queue_work(z_erofs_workqueue, &io->u.work);
+               sbi->ctx.readahead_sync_decompress = true;
                return;
        }
        z_erofs_decompressqueue_work(&io->u.work);
@@ -1340,7 +1343,8 @@ static void z_erofs_readahead(struct readahead_control *rac)
        struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
 
        unsigned int nr_pages = readahead_count(rac);
-       bool sync = (nr_pages <= sbi->ctx.max_sync_decompress_pages);
+       bool sync = (sbi->ctx.readahead_sync_decompress &&
+                       nr_pages <= sbi->ctx.max_sync_decompress_pages);
        struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
        struct page *page, *head = NULL;
        LIST_HEAD(pagepool);