}
                sh->dev[i].page = page;
                sh->dev[i].orig_page = page;
+               sh->dev[i].offset = 0;
        }
 
        return 0;
                                sh->dev[i].vec.bv_page = sh->dev[i].page;
                        bi->bi_vcnt = 1;
                        bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
-                       bi->bi_io_vec[0].bv_offset = 0;
+                       bi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
                        bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
                        bi->bi_write_hint = sh->dev[i].write_hint;
                        if (!rrdev)
                        sh->dev[i].rvec.bv_page = sh->dev[i].page;
                        rbi->bi_vcnt = 1;
                        rbi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
-                       rbi->bi_io_vec[0].bv_offset = 0;
+                       rbi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
                        rbi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
                        rbi->bi_write_hint = sh->dev[i].write_hint;
                        sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
        return (void *) (to_addr_page(percpu, i) + sh->disks + 2);
 }
 
+/*
+ * Return a pointer to record offset address.
+ */
+static unsigned int *
+to_addr_offs(struct stripe_head *sh, struct raid5_percpu *percpu)
+{
+       return (unsigned int *) (to_addr_conv(sh, percpu, 0) + sh->disks + 2);
+}
+
 static struct dma_async_tx_descriptor *
 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
 {
        int disks = sh->disks;
        struct page **xor_srcs = to_addr_page(percpu, 0);
+       unsigned int *off_srcs = to_addr_offs(sh, percpu);
        int target = sh->ops.target;
        struct r5dev *tgt = &sh->dev[target];
        struct page *xor_dest = tgt->page;
+       unsigned int off_dest = tgt->offset;
        int count = 0;
        struct dma_async_tx_descriptor *tx;
        struct async_submit_ctl submit;
                __func__, (unsigned long long)sh->sector, target);
        BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 
-       for (i = disks; i--; )
-               if (i != target)
+       for (i = disks; i--; ) {
+               if (i != target) {
+                       off_srcs[count] = sh->dev[i].offset;
                        xor_srcs[count++] = sh->dev[i].page;
+               }
+       }
 
        atomic_inc(&sh->count);
 
        init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
                          ops_complete_compute, sh, to_addr_conv(sh, percpu, 0));
        if (unlikely(count == 1))
-               tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0,
+               tx = async_memcpy(xor_dest, xor_srcs[0], off_dest, off_srcs[0],
                                RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
        else
                tx = async_xor(xor_dest, xor_srcs, 0, count,
 {
        int disks = sh->disks;
        struct page **xor_srcs;
+       unsigned int *off_srcs;
        struct async_submit_ctl submit;
        int count, pd_idx = sh->pd_idx, i;
        struct page *xor_dest;
+       unsigned int off_dest;
        int prexor = 0;
        unsigned long flags;
        int j = 0;
 again:
        count = 0;
        xor_srcs = to_addr_page(percpu, j);
+       off_srcs = to_addr_offs(sh, percpu);
        /* check if prexor is active which means only process blocks
         * that are part of a read-modify-write (written)
         */
        if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
                prexor = 1;
+               off_dest = off_srcs[count] = sh->dev[pd_idx].offset;
                xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
                        if (head_sh->dev[i].written ||
-                           test_bit(R5_InJournal, &head_sh->dev[i].flags))
+                           test_bit(R5_InJournal, &head_sh->dev[i].flags)) {
+                               off_srcs[count] = dev->offset;
                                xor_srcs[count++] = dev->page;
+                       }
                }
        } else {
                xor_dest = sh->dev[pd_idx].page;
+               off_dest = sh->dev[pd_idx].offset;
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
-                       if (i != pd_idx)
+                       if (i != pd_idx) {
+                               off_srcs[count] = dev->offset;
                                xor_srcs[count++] = dev->page;
+                       }
                }
        }
 
        }
 
        if (unlikely(count == 1))
-               tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0,
+               tx = async_memcpy(xor_dest, xor_srcs[0], off_dest, off_srcs[0],
                                RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
        else
                tx = async_xor(xor_dest, xor_srcs, 0, count,
                          int num, int cnt)
 {
        size_t obj_size =
-               sizeof(struct page *) * (num+2) +
-               sizeof(addr_conv_t) * (num+2);
+               sizeof(struct page *) * (num + 2) +
+               sizeof(addr_conv_t) * (num + 2) +
+               sizeof(unsigned int) * (num + 2);
        void *scribble;
 
        /*
                for(i=0; i<conf->pool_size; i++) {
                        nsh->dev[i].page = osh->dev[i].page;
                        nsh->dev[i].orig_page = osh->dev[i].page;
+                       nsh->dev[i].offset = osh->dev[i].offset;
                }
                nsh->hash_lock_index = hash;
                free_stripe(conf->slab_cache, osh);
                                struct page *p = alloc_page(GFP_NOIO);
                                nsh->dev[i].page = p;
                                nsh->dev[i].orig_page = p;
+                               nsh->dev[i].offset = 0;
                                if (!p)
                                        err = -ENOMEM;
                        }
                        /* place all the copies on one channel */
                        init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
                        tx = async_memcpy(sh2->dev[dd_idx].page,
-                                         sh->dev[i].page, 0, 0, RAID5_STRIPE_SIZE(conf),
+                                         sh->dev[i].page, sh2->dev[dd_idx].offset,
+                                         sh->dev[i].offset, RAID5_STRIPE_SIZE(conf),
                                          &submit);
 
                        set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);