unsigned long   logic_idx;
 };
 
+enum hns_roce_mmap_type {
+       HNS_ROCE_MMAP_TYPE_DB = 1,
+       HNS_ROCE_MMAP_TYPE_TPTR,
+};
+
+struct hns_user_mmap_entry {
+       struct rdma_user_mmap_entry rdma_entry;
+       enum hns_roce_mmap_type mmap_type;
+       u64 address;
+};
+
 struct hns_roce_ucontext {
        struct ib_ucontext      ibucontext;
        struct hns_roce_uar     uar;
        struct list_head        page_list;
        struct mutex            page_mutex;
+       struct hns_user_mmap_entry *db_mmap_entry;
+       struct hns_user_mmap_entry *tptr_mmap_entry;
 };
 
 struct hns_roce_pd {
        return container_of(ibsrq, struct hns_roce_srq, ibsrq);
 }
 
+static inline struct hns_user_mmap_entry *
+to_hns_mmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+       return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry);
+}
+
 static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
 {
        writeq(*(u64 *)val, dest);
 void hns_roce_exit(struct hns_roce_dev *hr_dev);
 int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
                               struct ib_cq *ib_cq);
+struct hns_user_mmap_entry *
+hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
+                               size_t length,
+                               enum hns_roce_mmap_type mmap_type);
 #endif /* _HNS_ROCE_DEVICE_H */
 
        return 0;
 }
 
+struct hns_user_mmap_entry *
+hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
+                               size_t length,
+                               enum hns_roce_mmap_type mmap_type)
+{
+       struct hns_user_mmap_entry *entry;
+       int ret;
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return NULL;
+
+       entry->address = address;
+       entry->mmap_type = mmap_type;
+
+       ret = rdma_user_mmap_entry_insert_exact(
+               ucontext, &entry->rdma_entry, length,
+               mmap_type == HNS_ROCE_MMAP_TYPE_DB ? 0 : 1);
+       if (ret) {
+               kfree(entry);
+               return NULL;
+       }
+
+       return entry;
+}
+
+static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
+{
+       if (context->db_mmap_entry)
+               rdma_user_mmap_entry_remove(
+                       &context->db_mmap_entry->rdma_entry);
+
+       if (context->tptr_mmap_entry)
+               rdma_user_mmap_entry_remove(
+                       &context->tptr_mmap_entry->rdma_entry);
+}
+
+static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
+{
+       struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
+       struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
+       u64 address;
+       int ret;
+
+       address = context->uar.pfn << PAGE_SHIFT;
+       context->db_mmap_entry = hns_roce_user_mmap_entry_insert(
+               uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB);
+       if (!context->db_mmap_entry)
+               return -ENOMEM;
+
+       if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
+               return 0;
+
+       /*
+        * FIXME: using io_remap_pfn_range on the dma address returned
+        * by dma_alloc_coherent is totally wrong.
+        */
+       context->tptr_mmap_entry =
+               hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr,
+                                               hr_dev->tptr_size,
+                                               HNS_ROCE_MMAP_TYPE_TPTR);
+       if (!context->tptr_mmap_entry) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       return 0;
+
+err:
+       hns_roce_dealloc_uar_entry(context);
+       return ret;
+}
+
 static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
                                   struct ib_udata *udata)
 {
        if (ret)
                goto error_fail_uar_alloc;
 
+       ret = hns_roce_alloc_uar_entry(uctx);
+       if (ret)
+               goto error_fail_uar_entry;
+
        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
            hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
                INIT_LIST_HEAD(&context->page_list);
        return 0;
 
 error_fail_copy_to_udata:
+       hns_roce_dealloc_uar_entry(context);
+
+error_fail_uar_entry:
        ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
 
 error_fail_uar_alloc:
        struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
        struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
 
+       hns_roce_dealloc_uar_entry(context);
+
        ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
 }
 
-static int hns_roce_mmap(struct ib_ucontext *context,
-                        struct vm_area_struct *vma)
+static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
 {
-       struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
-
-       switch (vma->vm_pgoff) {
-       case 0:
-               return rdma_user_mmap_io(context, vma,
-                                        to_hr_ucontext(context)->uar.pfn,
-                                        PAGE_SIZE,
-                                        pgprot_noncached(vma->vm_page_prot),
-                                        NULL);
-
-       /* vm_pgoff: 1 -- TPTR */
-       case 1:
-               if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
-                       return -EINVAL;
-               /*
-                * FIXME: using io_remap_pfn_range on the dma address returned
-                * by dma_alloc_coherent is totally wrong.
-                */
-               return rdma_user_mmap_io(context, vma,
-                                        hr_dev->tptr_dma_addr >> PAGE_SHIFT,
-                                        hr_dev->tptr_size,
-                                        vma->vm_page_prot,
-                                        NULL);
+       struct rdma_user_mmap_entry *rdma_entry;
+       struct hns_user_mmap_entry *entry;
+       phys_addr_t pfn;
+       pgprot_t prot;
+       int ret;
 
-       default:
+       rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
+       if (!rdma_entry)
                return -EINVAL;
-       }
+
+       entry = to_hns_mmap(rdma_entry);
+       pfn = entry->address >> PAGE_SHIFT;
+       prot = vma->vm_page_prot;
+
+       if (entry->mmap_type != HNS_ROCE_MMAP_TYPE_TPTR)
+               prot = pgprot_noncached(prot);
+
+       ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
+                               prot, rdma_entry);
+
+       rdma_user_mmap_entry_put(rdma_entry);
+
+       return ret;
+}
+
+static void hns_roce_free_mmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+       struct hns_user_mmap_entry *entry = to_hns_mmap(rdma_entry);
+
+       kfree(entry);
 }
 
 static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num,
        .get_link_layer = hns_roce_get_link_layer,
        .get_port_immutable = hns_roce_port_immutable,
        .mmap = hns_roce_mmap,
+       .mmap_free = hns_roce_free_mmap,
        .modify_device = hns_roce_modify_device,
        .modify_qp = hns_roce_modify_qp,
        .query_ah = hns_roce_query_ah,