/*
  * Copyright (C) 2017-2018 HUAWEI, Inc.
  *             https://www.huawei.com/
+ * Copyright (C) 2021, Alibaba Cloud
  */
 #include "internal.h"
 #include <linux/prefetch.h>
        nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
        lastblk = nblocks - tailendpacking;
 
-       if (offset >= inode->i_size) {
-               /* leave out-of-bound access unmapped */
-               map->m_flags = 0;
-               map->m_plen = 0;
-               goto out;
-       }
-
        /* there is no hole in flatmode */
        map->m_flags = EROFS_MAP_MAPPED;
 
                goto err_out;
        }
 
-out:
        map->m_llen = map->m_plen;
-
 err_out:
        trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
        return err;
 }
 
+static int erofs_map_blocks(struct inode *inode,
+                           struct erofs_map_blocks *map, int flags)
+{
+       struct super_block *sb = inode->i_sb;
+       struct erofs_inode *vi = EROFS_I(inode);
+       struct erofs_inode_chunk_index *idx;
+       struct page *page;
+       u64 chunknr;
+       unsigned int unit;
+       erofs_off_t pos;
+       int err = 0;
+
+       if (map->m_la >= inode->i_size) {
+               /* leave out-of-bound access unmapped */
+               map->m_flags = 0;
+               map->m_plen = 0;
+               goto out;
+       }
+
+       if (vi->datalayout != EROFS_INODE_CHUNK_BASED)
+               return erofs_map_blocks_flatmode(inode, map, flags);
+
+       if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
+               unit = sizeof(*idx);                    /* chunk index */
+       else
+               unit = EROFS_BLOCK_MAP_ENTRY_SIZE;      /* block map */
+
+       chunknr = map->m_la >> vi->chunkbits;
+       pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
+                   vi->xattr_isize, unit) + unit * chunknr;
+
+       page = erofs_get_meta_page(inode->i_sb, erofs_blknr(pos));
+       if (IS_ERR(page))
+               return PTR_ERR(page);
+
+       map->m_la = chunknr << vi->chunkbits;
+       map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
+                           roundup(inode->i_size - map->m_la, EROFS_BLKSIZ));
+
+       /* handle block map */
+       if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
+               __le32 *blkaddr = page_address(page) + erofs_blkoff(pos);
+
+               if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
+                       map->m_flags = 0;
+               } else {
+                       map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr));
+                       map->m_flags = EROFS_MAP_MAPPED;
+               }
+               goto out_unlock;
+       }
+       /* parse chunk indexes */
+       idx = page_address(page) + erofs_blkoff(pos);
+       switch (le32_to_cpu(idx->blkaddr)) {
+       case EROFS_NULL_ADDR:
+               map->m_flags = 0;
+               break;
+       default:
+               /* only one device is supported for now */
+               if (idx->device_id) {
+                       erofs_err(sb, "invalid device id %u @ %llu for nid %llu",
+                                 le16_to_cpu(idx->device_id),
+                                 chunknr, vi->nid);
+                       err = -EFSCORRUPTED;
+                       goto out_unlock;
+               }
+               map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr));
+               map->m_flags = EROFS_MAP_MAPPED;
+               break;
+       }
+out_unlock:
+       unlock_page(page);
+       put_page(page);
+out:
+       map->m_llen = map->m_plen;
+       return err;
+}
+
 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
                unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
 {
        map.m_la = offset;
        map.m_llen = length;
 
-       ret = erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW);
+       ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
        if (ret < 0)
                return ret;
 
 
 /*
  * Copyright (C) 2017-2018 HUAWEI, Inc.
  *             https://www.huawei.com/
+ * Copyright (C) 2021, Alibaba Cloud
  */
 #include "xattr.h"
 
                /* total blocks for compressed files */
                if (erofs_inode_is_data_compressed(vi->datalayout))
                        nblks = le32_to_cpu(die->i_u.compressed_blocks);
-
+               else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
+                       /* fill chunked inode summary info */
+                       vi->chunkformat = le16_to_cpu(die->i_u.c.format);
                kfree(copied);
                break;
        case EROFS_INODE_LAYOUT_COMPACT:
                inode->i_size = le32_to_cpu(dic->i_size);
                if (erofs_inode_is_data_compressed(vi->datalayout))
                        nblks = le32_to_cpu(dic->i_u.compressed_blocks);
+               else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
+                       vi->chunkformat = le16_to_cpu(dic->i_u.c.format);
                break;
        default:
                erofs_err(inode->i_sb,
                goto err_out;
        }
 
+       if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
+               if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_ALL)) {
+                       erofs_err(inode->i_sb,
+                                 "unsupported chunk format %x of nid %llu",
+                                 vi->chunkformat, vi->nid);
+                       err = -EOPNOTSUPP;
+                       goto err_out;
+               }
+               vi->chunkbits = LOG_BLOCK_SIZE +
+                       (vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
+       }
        inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
        inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
        inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;