return  paddr >> HV_HYP_PAGE_SHIFT;
 }
 
+/*
+ * hv_gpadl_size - Return the real size of a gpadl, the size that Hyper-V uses
+ *
+ * For BUFFER gpadl, Hyper-V uses the exact same size as the guest does.
+ *
+ * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the header
+ * (because of the alignment requirement), however, the hypervisor only
+ * uses the first HV_HYP_PAGE_SIZE as the header, therefore leaving a
+ * (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap. And since there are two rings in a
+ * ringbuffer, the total size for a RING gpadl that Hyper-V uses is the
+ * total size that the guest uses minus twice of the gap size.
+ */
+static inline u32 hv_gpadl_size(enum hv_gpadl_type type, u32 size)
+{
+       switch (type) {
+       case HV_GPADL_BUFFER:
+               return size;
+       case HV_GPADL_RING:
+               /* The size of a ringbuffer must be page-aligned */
+               BUG_ON(size % PAGE_SIZE);
+               /*
+                * Two things to notice here:
+                * 1) We're processing two ring buffers as a unit
+                * 2) We're skipping any space larger than HV_HYP_PAGE_SIZE in
+                * the first guest-size page of each of the two ring buffers.
+                * So we effectively subtract out two guest-size pages, and add
+                * back two Hyper-V size pages.
+                */
+               return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
+       }
+       BUG();
+       return 0;
+}
+
+/*
+ * hv_ring_gpadl_send_hvpgoffset - Calculate the send offset (in unit of
+ *                                 HV_HYP_PAGE) in a ring gpadl based on the
+ *                                 offset in the guest
+ *
+ * @offset: the offset (in bytes) where the send ringbuffer starts in the
+ *               virtual address space of the guest
+ */
+static inline u32 hv_ring_gpadl_send_hvpgoffset(u32 offset)
+{
+
+       /*
+        * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the
+        * header (because of the alignment requirement), however, the
+        * hypervisor only uses the first HV_HYP_PAGE_SIZE as the header,
+        * therefore leaving a (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap.
+        *
+        * And to calculate the effective send offset in gpadl, we need to
+        * substract this gap.
+        */
+       return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT;
+}
+
+/*
+ * hv_gpadl_hvpfn - Return the Hyper-V page PFN of the @i th Hyper-V page in
+ *                  the gpadl
+ *
+ * @type: the type of the gpadl
+ * @kbuffer: the pointer to the gpadl in the guest
+ * @size: the total size (in bytes) of the gpadl
+ * @send_offset: the offset (in bytes) where the send ringbuffer starts in the
+ *               virtual address space of the guest
+ * @i: the index
+ */
+static inline u64 hv_gpadl_hvpfn(enum hv_gpadl_type type, void *kbuffer,
+                                u32 size, u32 send_offset, int i)
+{
+       int send_idx = hv_ring_gpadl_send_hvpgoffset(send_offset);
+       unsigned long delta = 0UL;
+
+       switch (type) {
+       case HV_GPADL_BUFFER:
+               break;
+       case HV_GPADL_RING:
+               if (i == 0)
+                       delta = 0;
+               else if (i <= send_idx)
+                       delta = PAGE_SIZE - HV_HYP_PAGE_SIZE;
+               else
+                       delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       return virt_to_hvpfn(kbuffer + delta + (HV_HYP_PAGE_SIZE * i));
+}
+
 /*
  * vmbus_setevent- Trigger an event notification on the specified
  * channel.
 /*
  * create_gpadl_header - Creates a gpadl for the specified buffer
  */
-static int create_gpadl_header(void *kbuffer, u32 size,
+static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
+                              u32 size, u32 send_offset,
                               struct vmbus_channel_msginfo **msginfo)
 {
        int i;
 
        int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
 
-       pagecount = size >> HV_HYP_PAGE_SHIFT;
+       pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
 
        /* do we need a gpadl body msg */
        pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
                gpadl_header->range_buflen = sizeof(struct gpa_range) +
                                         pagecount * sizeof(u64);
                gpadl_header->range[0].byte_offset = 0;
-               gpadl_header->range[0].byte_count = size;
+               gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
                for (i = 0; i < pfncount; i++)
-                       gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
-                               kbuffer + HV_HYP_PAGE_SIZE * i);
+                       gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
+                               type, kbuffer, size, send_offset, i);
                *msginfo = msgheader;
 
                pfnsum = pfncount;
                         * so the hypervisor guarantees that this is ok.
                         */
                        for (i = 0; i < pfncurr; i++)
-                               gpadl_body->pfn[i] = virt_to_hvpfn(
-                                       kbuffer + HV_HYP_PAGE_SIZE * (pfnsum + i));
+                               gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
+                                       kbuffer, size, send_offset, pfnsum + i);
 
                        /* add to msg header */
                        list_add_tail(&msgbody->msglistentry,
                gpadl_header->range_buflen = sizeof(struct gpa_range) +
                                         pagecount * sizeof(u64);
                gpadl_header->range[0].byte_offset = 0;
-               gpadl_header->range[0].byte_count = size;
+               gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
                for (i = 0; i < pagecount; i++)
-                       gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
-                               kbuffer + HV_HYP_PAGE_SIZE * i);
+                       gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
+                               type, kbuffer, size, send_offset, i);
 
                *msginfo = msgheader;
        }
 }
 
 /*
- * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
+ * __vmbus_establish_gpadl - Establish a GPADL for a buffer or ringbuffer
  *
  * @channel: a channel
+ * @type: the type of the corresponding GPADL, only meaningful for the guest.
  * @kbuffer: from kmalloc or vmalloc
  * @size: page-size multiple
+ * @send_offset: the offset (in bytes) where the send ring buffer starts,
+ *              should be 0 for BUFFER type gpadl
  * @gpadl_handle: some funky thing
  */
-int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
-                              u32 size, u32 *gpadl_handle)
+static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
+                                  enum hv_gpadl_type type, void *kbuffer,
+                                  u32 size, u32 send_offset,
+                                  u32 *gpadl_handle)
 {
        struct vmbus_channel_gpadl_header *gpadlmsg;
        struct vmbus_channel_gpadl_body *gpadl_body;
        next_gpadl_handle =
                (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
 
-       ret = create_gpadl_header(kbuffer, size, &msginfo);
+       ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
        if (ret)
                return ret;
 
        kfree(msginfo);
        return ret;
 }
+
+/*
+ * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
+ *
+ * @channel: a channel
+ * @kbuffer: from kmalloc or vmalloc
+ * @size: page-size multiple
+ * @gpadl_handle: some funky thing
+ */
+int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+                         u32 size, u32 *gpadl_handle)
+{
+       return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
+                                      0U, gpadl_handle);
+}
 EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
 
 static int __vmbus_open(struct vmbus_channel *newchannel,
        /* Establish the gpadl for the ring buffer */
        newchannel->ringbuffer_gpadlhandle = 0;
 
-       err = vmbus_establish_gpadl(newchannel,
-                                   page_address(newchannel->ringbuffer_page),
-                                   (send_pages + recv_pages) << PAGE_SHIFT,
-                                   &newchannel->ringbuffer_gpadlhandle);
+       err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
+                                     page_address(newchannel->ringbuffer_page),
+                                     (send_pages + recv_pages) << PAGE_SHIFT,
+                                     newchannel->ringbuffer_send_offset << PAGE_SHIFT,
+                                     &newchannel->ringbuffer_gpadlhandle);
        if (err)
                goto error_clean_ring;
 
        open_msg->openid = newchannel->offermsg.child_relid;
        open_msg->child_relid = newchannel->offermsg.child_relid;
        open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
-       open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset;
+       /*
+        * The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
+        * the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
+        * here we calculate it into HV_HYP_PAGE.
+        */
+       open_msg->downstream_ringbuffer_pageoffset =
+               hv_ring_gpadl_send_hvpgoffset(send_pages << PAGE_SHIFT);
        open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
 
        if (userdatalen)
 }
 EXPORT_SYMBOL_GPL(vmbus_open);
 
-
 /*
  * vmbus_teardown_gpadl -Teardown the specified GPADL handle
  */