freq->sg_table.sgl = freq->first_sgl;
        ret = sg_alloc_table_chained(&freq->sg_table,
-                       blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
+                       blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
+                       SG_CHUNK_SIZE);
        if (ret)
                return -ENOMEM;
 
        freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
                                op->nents, dir);
        if (unlikely(freq->sg_cnt <= 0)) {
-               sg_free_table_chained(&freq->sg_table, true);
+               sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
                freq->sg_cnt = 0;
                return -EFAULT;
        }
 
        nvme_cleanup_cmd(rq);
 
-       sg_free_table_chained(&freq->sg_table, true);
+       sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
 
        freq->sg_cnt = 0;
 }
 
                                    WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
        nvme_cleanup_cmd(rq);
-       sg_free_table_chained(&req->sg_table, true);
+       sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
 }
 
 static int nvme_rdma_set_sg_null(struct nvme_command *c)
 
        req->sg_table.sgl = req->first_sgl;
        ret = sg_alloc_table_chained(&req->sg_table,
-                       blk_rq_nr_phys_segments(rq), req->sg_table.sgl);
+                       blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
+                       SG_CHUNK_SIZE);
        if (ret)
                return -ENOMEM;
 
                        req->nents, rq_data_dir(rq) ==
                        WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 out_free_table:
-       sg_free_table_chained(&req->sg_table, true);
+       sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
        return ret;
 }
 
 
        struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
 
        nvme_cleanup_cmd(req);
-       sg_free_table_chained(&iod->sg_table, true);
+       sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
        nvme_complete_rq(req);
 }
 
                iod->sg_table.sgl = iod->first_sgl;
                if (sg_alloc_table_chained(&iod->sg_table,
                                blk_rq_nr_phys_segments(req),
-                               iod->sg_table.sgl))
+                               iod->sg_table.sgl, SG_CHUNK_SIZE))
                        return BLK_STS_RESOURCE;
 
                iod->req.sg = iod->sg_table.sgl;
 
 static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
 {
        if (cmd->sdb.table.nents)
-               sg_free_table_chained(&cmd->sdb.table, true);
+               sg_free_table_chained(&cmd->sdb.table, SG_CHUNK_SIZE);
        if (scsi_prot_sg_count(cmd))
-               sg_free_table_chained(&cmd->prot_sdb->table, true);
+               sg_free_table_chained(&cmd->prot_sdb->table, SG_CHUNK_SIZE);
 }
 
 static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
         * If sg table allocation fails, requeue request later.
         */
        if (unlikely(sg_alloc_table_chained(&sdb->table,
-                       blk_rq_nr_phys_segments(req), sdb->table.sgl)))
+                       blk_rq_nr_phys_segments(req), sdb->table.sgl,
+                       SG_CHUNK_SIZE)))
                return BLK_STS_RESOURCE;
 
        /* 
                ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
 
                if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
-                               prot_sdb->table.sgl)) {
+                               prot_sdb->table.sgl,
+                               SG_CHUNK_SIZE)) {
                        ret = BLK_STS_RESOURCE;
                        goto out_free_sgtables;
                }
 
 typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
 typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
 
-void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *);
+void __sg_free_table(struct sg_table *, unsigned int, unsigned int,
+                    sg_free_fn *);
 void sg_free_table(struct sg_table *);
 int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
-                    struct scatterlist *, gfp_t, sg_alloc_fn *);
+                    struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *);
 int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
 int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
                                unsigned int n_pages, unsigned int offset,
 #endif
 
 #ifdef CONFIG_SG_POOL
-void sg_free_table_chained(struct sg_table *table, bool first_chunk);
+void sg_free_table_chained(struct sg_table *table,
+                          unsigned nents_first_chunk);
 int sg_alloc_table_chained(struct sg_table *table, int nents,
-                          struct scatterlist *first_chunk);
+                          struct scatterlist *first_chunk,
+                          unsigned nents_first_chunk);
 #endif
 
 /*
 
  * __sg_free_table - Free a previously mapped sg table
  * @table:     The sg table header to use
  * @max_ents:  The maximum number of entries per single scatterlist
- * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
+ * @nents_first_chunk: Number of entries int the (preallocated) first
+ *     scatterlist chunk, 0 means no such preallocated first chunk
  * @free_fn:   Free function
  *
  *  Description:
  *
  **/
 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
-                    bool skip_first_chunk, sg_free_fn *free_fn)
+                    unsigned int nents_first_chunk, sg_free_fn *free_fn)
 {
        struct scatterlist *sgl, *next;
+       unsigned curr_max_ents = nents_first_chunk ?: max_ents;
 
        if (unlikely(!table->sgl))
                return;
                 * sg_size is then one less than alloc size, since the last
                 * element is the chain pointer.
                 */
-               if (alloc_size > max_ents) {
-                       next = sg_chain_ptr(&sgl[max_ents - 1]);
-                       alloc_size = max_ents;
+               if (alloc_size > curr_max_ents) {
+                       next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
+                       alloc_size = curr_max_ents;
                        sg_size = alloc_size - 1;
                } else {
                        sg_size = alloc_size;
                }
 
                table->orig_nents -= sg_size;
-               if (skip_first_chunk)
-                       skip_first_chunk = false;
+               if (nents_first_chunk)
+                       nents_first_chunk = 0;
                else
                        free_fn(sgl, alloc_size);
                sgl = next;
+               curr_max_ents = max_ents;
        }
 
        table->sgl = NULL;
  * @table:     The sg table header to use
  * @nents:     Number of entries in sg list
  * @max_ents:  The maximum number of entries the allocator returns per call
+ * @nents_first_chunk: Number of entries int the (preallocated) first
+ *     scatterlist chunk, 0 means no such preallocated chunk provided by user
  * @gfp_mask:  GFP allocation mask
  * @alloc_fn:  Allocator to use
  *
  **/
 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
                     unsigned int max_ents, struct scatterlist *first_chunk,
-                    gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
+                    unsigned int nents_first_chunk, gfp_t gfp_mask,
+                    sg_alloc_fn *alloc_fn)
 {
        struct scatterlist *sg, *prv;
        unsigned int left;
+       unsigned curr_max_ents = nents_first_chunk ?: max_ents;
+       unsigned prv_max_ents;
 
        memset(table, 0, sizeof(*table));
 
        do {
                unsigned int sg_size, alloc_size = left;
 
-               if (alloc_size > max_ents) {
-                       alloc_size = max_ents;
+               if (alloc_size > curr_max_ents) {
+                       alloc_size = curr_max_ents;
                        sg_size = alloc_size - 1;
                } else
                        sg_size = alloc_size;
                 * If this is not the first mapping, chain previous part.
                 */
                if (prv)
-                       sg_chain(prv, max_ents, sg);
+                       sg_chain(prv, prv_max_ents, sg);
                else
                        table->sgl = sg;
 
                        sg_mark_end(&sg[sg_size - 1]);
 
                prv = sg;
+               prv_max_ents = curr_max_ents;
+               curr_max_ents = max_ents;
        } while (left);
 
        return 0;
        int ret;
 
        ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
-                              NULL, gfp_mask, sg_kmalloc);
+                              NULL, 0, gfp_mask, sg_kmalloc);
        if (unlikely(ret))
-               __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
+               __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
 
        return ret;
 }
 
 /**
  * sg_free_table_chained - Free a previously mapped sg table
  * @table:     The sg table header to use
- * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained?
+ * @nents_first_chunk: size of the first_chunk SGL passed to
+ *             sg_alloc_table_chained
  *
  *  Description:
  *    Free an sg table previously allocated and setup with
  *    sg_alloc_table_chained().
  *
+ *    @nents_first_chunk has to be same with that same parameter passed
+ *    to sg_alloc_table_chained().
+ *
  **/
-void sg_free_table_chained(struct sg_table *table, bool first_chunk)
+void sg_free_table_chained(struct sg_table *table,
+               unsigned nents_first_chunk)
 {
-       if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE)
+       if (table->orig_nents <= nents_first_chunk)
                return;
-       __sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free);
+
+       if (nents_first_chunk == 1)
+               nents_first_chunk = 0;
+
+       __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free);
 }
 EXPORT_SYMBOL_GPL(sg_free_table_chained);
 
  * @table:     The sg table header to use
  * @nents:     Number of entries in sg list
  * @first_chunk: first SGL
+ * @nents_first_chunk: number of the SGL of @first_chunk
  *
  *  Description:
  *    Allocate and chain SGLs in an sg table. If @nents@ is larger than
- *    SG_CHUNK_SIZE a chained sg table will be setup.
+ *    @nents_first_chunk a chained sg table will be setup.
  *
  **/
 int sg_alloc_table_chained(struct sg_table *table, int nents,
-               struct scatterlist *first_chunk)
+               struct scatterlist *first_chunk, unsigned nents_first_chunk)
 {
        int ret;
 
        BUG_ON(!nents);
 
-       if (first_chunk) {
-               if (nents <= SG_CHUNK_SIZE) {
+       if (first_chunk && nents_first_chunk) {
+               if (nents <= nents_first_chunk) {
                        table->nents = table->orig_nents = nents;
                        sg_init_table(table->sgl, nents);
                        return 0;
                }
        }
 
+       /* User supposes that the 1st SGL includes real entry */
+       if (nents_first_chunk == 1) {
+               first_chunk = NULL;
+               nents_first_chunk = 0;
+       }
+
        ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
-                              first_chunk, GFP_ATOMIC, sg_pool_alloc);
+                              first_chunk, nents_first_chunk,
+                              GFP_ATOMIC, sg_pool_alloc);
        if (unlikely(ret))
-               sg_free_table_chained(table, (bool)first_chunk);
+               sg_free_table_chained(table, nents_first_chunk);
        return ret;
 }
 EXPORT_SYMBOL_GPL(sg_alloc_table_chained);
 
 
        ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
        if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
-                                  ctxt->rw_sg_table.sgl)) {
+                                  ctxt->rw_sg_table.sgl,
+                                  SG_CHUNK_SIZE)) {
                kfree(ctxt);
                ctxt = NULL;
        }
 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
                                 struct svc_rdma_rw_ctxt *ctxt)
 {
-       sg_free_table_chained(&ctxt->rw_sg_table, true);
+       sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
 
        spin_lock(&rdma->sc_rw_ctxt_lock);
        list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);