f2fs_update_iostat(F2FS_I_SB(inode), inode,
                                        APP_MAPPED_READ_IO, F2FS_BLKSIZE);
 
-       trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
+       trace_f2fs_filemap_fault(inode, vmf->pgoff, vmf->vma->vm_flags, ret);
 
        return ret;
 }
        struct dnode_of_data dn;
        bool need_alloc = true;
        int err = 0;
+       vm_fault_t ret;
 
        if (unlikely(IS_IMMUTABLE(inode)))
                return VM_FAULT_SIGBUS;
 
-       if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
-               return VM_FAULT_SIGBUS;
+       if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+               err = -EIO;
+               goto out;
+       }
 
        if (unlikely(f2fs_cp_error(sbi))) {
                err = -EIO;
-               goto err;
+               goto out;
        }
 
        if (!f2fs_is_checkpoint_ready(sbi)) {
                err = -ENOSPC;
-               goto err;
+               goto out;
        }
 
        err = f2fs_convert_inline_inode(inode);
        if (err)
-               goto err;
+               goto out;
 
 #ifdef CONFIG_F2FS_FS_COMPRESSION
        if (f2fs_compressed_file(inode)) {
 
                if (ret < 0) {
                        err = ret;
-                       goto err;
+                       goto out;
                } else if (ret) {
                        need_alloc = false;
                }
        f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
        f2fs_update_time(sbi, REQ_TIME);
 
-       trace_f2fs_vm_page_mkwrite(page, DATA);
 out_sem:
        filemap_invalidate_unlock_shared(inode->i_mapping);
 
        sb_end_pagefault(inode->i_sb);
-err:
-       return vmf_fs_error(err);
+out:
+       ret = vmf_fs_error(err);
+
+       trace_f2fs_vm_page_mkwrite(inode, page->index, vmf->vma->vm_flags, ret);
+       return ret;
 }
 
 static const struct vm_operations_struct f2fs_file_vm_ops = {
 
        TP_ARGS(page, type)
 );
 
-DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
-
-       TP_PROTO(struct page *page, int type),
-
-       TP_ARGS(page, type)
-);
-
 TRACE_EVENT(f2fs_replace_atomic_write_block,
 
        TP_PROTO(struct inode *inode, struct inode *cow_inode, pgoff_t index,
                __entry->recovery)
 );
 
-TRACE_EVENT(f2fs_filemap_fault,
+DECLARE_EVENT_CLASS(f2fs_mmap,
 
-       TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret),
+       TP_PROTO(struct inode *inode, pgoff_t index,
+                       vm_flags_t flags, vm_fault_t ret),
 
-       TP_ARGS(inode, index, ret),
+       TP_ARGS(inode, index, flags, ret),
 
        TP_STRUCT__entry(
                __field(dev_t,  dev)
                __field(ino_t,  ino)
                __field(pgoff_t, index)
-               __field(unsigned long, ret)
+               __field(vm_flags_t, flags)
+               __field(vm_fault_t, ret)
        ),
 
        TP_fast_assign(
                __entry->dev    = inode->i_sb->s_dev;
                __entry->ino    = inode->i_ino;
                __entry->index  = index;
+               __entry->flags  = flags;
                __entry->ret    = ret;
        ),
 
-       TP_printk("dev = (%d,%d), ino = %lu, index = %lu, ret = %lx",
+       TP_printk("dev = (%d,%d), ino = %lu, index = %lu, flags: %s, ret: %s",
                show_dev_ino(__entry),
                (unsigned long)__entry->index,
-               __entry->ret)
+               __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
+               __print_flags(__entry->ret, "|", VM_FAULT_RESULT_TRACE))
+);
+
+DEFINE_EVENT(f2fs_mmap, f2fs_filemap_fault,
+
+       TP_PROTO(struct inode *inode, pgoff_t index,
+                       vm_flags_t flags, vm_fault_t ret),
+
+       TP_ARGS(inode, index, flags, ret)
+);
+
+DEFINE_EVENT(f2fs_mmap, f2fs_vm_page_mkwrite,
+
+       TP_PROTO(struct inode *inode, pgoff_t index,
+                       vm_flags_t flags, vm_fault_t ret),
+
+       TP_ARGS(inode, index, flags, ret)
 );
 
 TRACE_EVENT(f2fs_writepages,