sdev->allow_restart = 1;
 
-       /*
-        * SBP-2 does not require any alignment, but we set it anyway
-        * for compatibility with earlier versions of this driver.
-        */
-       blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
-
        if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
                sdev->inquiry_len = 36;
 
 
        .sg_tablesize                   = MPT_SCSI_SG_DEPTH,
        .max_sectors                    = 8192,
        .cmd_per_lun                    = 7,
+       .dma_alignment                  = 511,
        .shost_groups                   = mptscsih_host_attr_groups,
 };
 
 
        .sg_tablesize                   = MPT_SCSI_SG_DEPTH,
        .max_sectors                    = 8192,
        .cmd_per_lun                    = 7,
+       .dma_alignment                  = 511,
        .shost_groups                   = mptscsih_host_attr_groups,
        .no_write_same                  = 1,
 };
 
                "tagged %d, simple %d\n",
                ioc->name,sdev->tagged_supported, sdev->simple_tags));
 
-       blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
-
        return 0;
 }
 
 
        .sg_tablesize                   = MPT_SCSI_SG_DEPTH,
        .max_sectors                    = 8192,
        .cmd_per_lun                    = 7,
+       .dma_alignment                  = 511,
        .shost_groups                   = mptscsih_host_attr_groups,
 };
 
 
        else
                shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
 
+       /* 32-byte (dword) is a common minimum for HBAs. */
+       if (sht->dma_alignment)
+               shost->dma_alignment = sht->dma_alignment;
+       else
+               shost->dma_alignment = 3;
+
        /*
         * assume a 4GB boundary, if not set
         */
 
        shost->max_id = 0;
        shost->max_channel = 0;
        shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+       shost->dma_alignment = 0;
 
        rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
        if (rc < 0)
        if (conn->datadgst_en)
                blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
                                   sdev->request_queue);
-       blk_queue_dma_alignment(sdev->request_queue, 0);
        return 0;
 }
 
 
        scsi_qla_host_t *vha = shost_priv(sdev->host);
        struct req_que *req = vha->req;
 
-       if (IS_T10_PI_CAPABLE(vha->hw))
-               blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
-
        scsi_change_queue_depth(sdev, req->max_q_depth);
        return 0;
 }
                    QLA_SG_ALL : 128;
        }
 
+       if (IS_T10_PI_CAPABLE(base_vha->hw))
+               host->dma_alignment = 0x7;
+
        ret = scsi_add_host(host, &pdev->dev);
        if (ret)
                goto probe_failed;
 
        lim->seg_boundary_mask = shost->dma_boundary;
        lim->max_segment_size = shost->max_segment_size;
        lim->virt_boundary_mask = shost->virt_boundary_mask;
-
-       /*
-        * Set a reasonable default alignment:  The larger of 32-byte (dword),
-        * which is a common minimum for HBAs, and the minimum DMA alignment,
-        * which is set by the platform.
-        *
-        * Devices that require a bigger alignment can increase it later.
-        */
-       lim->dma_alignment = max(4, dma_get_cache_alignment()) - 1;
+       lim->dma_alignment = max_t(unsigned int,
+               shost->dma_alignment, dma_get_cache_alignment() - 1);
 
        if (shost->no_highmem)
                lim->bounce = BLK_BOUNCE_HIGH;
 
 
 static int slave_configure(struct scsi_device *sdev)
 {
-       /*
-        * Scatter-gather buffers (all but the last) must have a length
-        * divisible by the bulk maxpacket size.  Otherwise a data packet
-        * would end up being short, causing a premature end to the data
-        * transfer.  Since high-speed bulk pipes have a maxpacket size
-        * of 512, we'll use that as the scsi device queue's DMA alignment
-        * mask.  Guaranteeing proper alignment of the first buffer will
-        * have the desired effect because, except at the beginning and
-        * the end, scatter-gather buffers follow page boundaries.
-        */
-       blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
-
        /* Set the SCSI level to at least 2.  We'll leave it at 3 if that's
         * what is originally reported.  We need this to avoid confusing
         * the SCSI layer with devices that report 0 or 1, but need 10-byte
        /* limit the total size of a transfer to 120 KB */
        .max_sectors =                  240,
 
+       /*
+        * Scatter-gather buffers (all but the last) must have a length
+        * divisible by the bulk maxpacket size.  Otherwise a data packet
+        * would end up being short, causing a premature end to the data
+        * transfer.  Since high-speed bulk pipes have a maxpacket size
+        * of 512, we'll use that as the scsi device queue's DMA alignment
+        * mask.  Guaranteeing proper alignment of the first buffer will
+        * have the desired effect because, except at the beginning and
+        * the end, scatter-gather buffers follow page boundaries.
+        */
+       .dma_alignment =                511,
+
        /* emulated HBA */
        .emulated =                     1,
 
 
        return 0;
 }
 
-static int mts_slave_configure (struct scsi_device *s)
-{
-       blk_queue_dma_alignment(s->request_queue, (512 - 1));
-       return 0;
-}
-
 static int mts_scsi_abort(struct scsi_cmnd *srb)
 {
        struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
        .can_queue =            1,
        .this_id =              -1,
        .emulated =             1,
+       .dma_alignment =        511,
        .slave_alloc =          mts_slave_alloc,
-       .slave_configure =      mts_slave_configure,
        .max_sectors=           256, /* 128 K */
 };
 
 
         */
        sdev->inquiry_len = 36;
 
-       /*
-        * Some host controllers may have alignment requirements.
-        * We'll play it safe by requiring 512-byte alignment always.
-        */
-       blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
-
        /* Tell the SCSI layer if we know there is more than one LUN */
        if (us->protocol == USB_PR_BULK && us->max_lun > 0)
                sdev->sdev_bflags |= BLIST_FORCELUN;
        /* lots of sg segments can be handled */
        .sg_tablesize =                 SG_MAX_SEGMENTS,
 
+       /*
+        * Some host controllers may have alignment requirements.
+        * We'll play it safe by requiring 512-byte alignment always.
+        */
+       .dma_alignment =                511,
 
        /*
         * Limit the total size of a transfer to 120 KB.
 
 
        sdev->hostdata = devinfo;
 
-       /*
-        * The protocol has no requirements on alignment in the strict sense.
-        * Controllers may or may not have alignment restrictions.
-        * As this is not exported, we use an extremely conservative guess.
-        */
-       blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
-
        if (devinfo->flags & US_FL_MAX_SECTORS_64)
                blk_queue_max_hw_sectors(sdev->request_queue, 64);
        else if (devinfo->flags & US_FL_MAX_SECTORS_240)
        .eh_device_reset_handler = uas_eh_device_reset_handler,
        .this_id = -1,
        .skip_settle_delay = 1,
+       /*
+        * The protocol has no requirements on alignment in the strict sense.
+        * Controllers may or may not have alignment restrictions.
+        * As this is not exported, we use an extremely conservative guess.
+        */
+       .dma_alignment = 511,
        .dma_boundary = PAGE_SIZE - 1,
        .cmd_size = sizeof(struct uas_cmd_info),
 };
 
         */
        unsigned int max_segment_size;
 
+       unsigned int dma_alignment;
+
        /*
         * DMA scatter gather segment boundary limit. A segment crossing this
         * boundary will be split in two.
        unsigned int max_sectors;
        unsigned int opt_sectors;
        unsigned int max_segment_size;
+       unsigned int dma_alignment;
        unsigned long dma_boundary;
        unsigned long virt_boundary_mask;
        /*