device->bus, device->target, device->lun, status);
 }
 
-static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
-       struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
-{
-       struct pqi_scsi_dev *device;
-
-       list_for_each_entry(device, &ctrl_info->scsi_device_list,
-               scsi_device_list_entry) {
-               if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
-                       continue;
-               if (pqi_is_logical_device(device))
-                       continue;
-               if (device->aio_handle == aio_handle)
-                       return device;
-       }
-
-       return NULL;
-}
-
-static void pqi_update_logical_drive_queue_depth(
-       struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
-{
-       unsigned int i;
-       struct raid_map *raid_map;
-       struct raid_map_disk_data *disk_data;
-       struct pqi_scsi_dev *phys_disk;
-       unsigned int num_phys_disks;
-       unsigned int num_raid_map_entries;
-       unsigned int queue_depth;
-
-       logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
-
-       raid_map = logical_drive->raid_map;
-       if (!raid_map)
-               return;
-
-       disk_data = raid_map->disk_data;
-       num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
-               (get_unaligned_le16(&raid_map->data_disks_per_row) +
-               get_unaligned_le16(&raid_map->metadata_disks_per_row));
-       num_raid_map_entries = num_phys_disks *
-               get_unaligned_le16(&raid_map->row_cnt);
-
-       queue_depth = 0;
-       for (i = 0; i < num_raid_map_entries; i++) {
-               phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
-                       disk_data[i].aio_handle);
-
-               if (!phys_disk) {
-                       dev_warn(&ctrl_info->pci_dev->dev,
-                               "failed to find physical disk for logical drive %016llx\n",
-                               get_unaligned_be64(logical_drive->scsi3addr));
-                       logical_drive->offload_enabled = false;
-                       logical_drive->offload_enabled_pending = false;
-                       kfree(raid_map);
-                       logical_drive->raid_map = NULL;
-                       return;
-               }
-
-               queue_depth += phys_disk->queue_depth;
-       }
-
-       logical_drive->queue_depth = queue_depth;
-}
-
-static void pqi_update_all_logical_drive_queue_depths(
-       struct pqi_ctrl_info *ctrl_info)
-{
-       struct pqi_scsi_dev *device;
-
-       list_for_each_entry(device, &ctrl_info->scsi_device_list,
-               scsi_device_list_entry) {
-               if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
-                       continue;
-               if (!pqi_is_logical_device(device))
-                       continue;
-               if (device->is_external_raid_device)
-                       continue;
-               pqi_update_logical_drive_queue_depth(ctrl_info, device);
-       }
-}
-
 static void pqi_rescan_worker(struct work_struct *work)
 {
        struct pqi_ctrl_info *ctrl_info;
        char *action, struct pqi_scsi_dev *device)
 {
        dev_info(&ctrl_info->pci_dev->dev,
-               "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
+               "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c qd=%d\n",
                action,
                ctrl_info->scsi_host->host_no,
                device->bus,
                        pqi_raid_level_to_string(device->raid_level) : "",
                device->offload_configured ? '+' : '-',
                device->offload_enabled_pending ? '+' : '-',
-               device->expose_device ? '+' : '-',
                device->queue_depth);
 }
 
        existing_device->is_physical_device = new_device->is_physical_device;
        existing_device->is_external_raid_device =
                new_device->is_external_raid_device;
-       existing_device->expose_device = new_device->expose_device;
-       existing_device->no_uld_attach = new_device->no_uld_attach;
        existing_device->aio_enabled = new_device->aio_enabled;
        memcpy(existing_device->vendor, new_device->vendor,
                sizeof(existing_device->vendor));
                device->keep_device = true;
        }
 
-       pqi_update_all_logical_drive_queue_depths(ctrl_info);
-
        list_for_each_entry(device, &ctrl_info->scsi_device_list,
                scsi_device_list_entry)
                device->offload_enabled =
 
        /* Expose any new devices. */
        list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
-               if (device->expose_device && !device->sdev) {
+               if (!device->sdev) {
                        rc = pqi_add_device(ctrl_info, device);
                        if (rc) {
                                dev_warn(&ctrl_info->pci_dev->dev,
        return is_supported;
 }
 
-static inline bool pqi_skip_device(u8 *scsi3addr,
-       struct report_phys_lun_extended_entry *phys_lun_ext_entry)
+static inline bool pqi_skip_device(u8 *scsi3addr)
 {
-       u8 device_flags;
-
-       if (!MASKED_DEVICE(scsi3addr))
-               return false;
-
-       /* The device is masked. */
-
-       device_flags = phys_lun_ext_entry->device_flags;
-
-       if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
-               /*
-                * It's a non-disk device.  We ignore all devices of this type
-                * when they're masked.
-                */
+       /* Ignore all masked devices. */
+       if (MASKED_DEVICE(scsi3addr))
                return true;
-       }
 
        return false;
 }
 
-static inline bool pqi_ok_to_expose_device(struct pqi_scsi_dev *device)
-{
-       /* Expose all devices except for physical devices that are masked. */
-       if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
-               return false;
-
-       return true;
-}
-
 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
 {
        int i;
                        scsi3addr = log_lun_ext_entry->lunid;
                }
 
-               if (is_physical_device &&
-                       pqi_skip_device(scsi3addr, phys_lun_ext_entry))
+               if (is_physical_device && pqi_skip_device(scsi3addr))
                        continue;
 
                if (device)
 
                pqi_assign_bus_target_lun(device);
 
-               device->expose_device = pqi_ok_to_expose_device(device);
-
                if (device->is_physical_device) {
                        device->wwid = phys_lun_ext_entry->wwid;
                        if ((phys_lun_ext_entry->device_flags &
                                rc == SCSI_MLQUEUE_HOST_BUSY ||
                                rc == SAM_STAT_CHECK_CONDITION ||
                                rc == SAM_STAT_RESERVATION_CONFLICT)
-                               raid_bypassed = true;
+                                       raid_bypassed = true;
                }
                if (!raid_bypassed)
                        rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
                        sdev_id(sdev), sdev->lun);
        }
 
-       if (device && device->expose_device) {
+       if (device) {
                sdev->hostdata = device;
                device->sdev = sdev;
                if (device->queue_depth) {
        return 0;
 }
 
-static int pqi_slave_configure(struct scsi_device *sdev)
-{
-       struct pqi_scsi_dev *device;
-
-       device = sdev->hostdata;
-       if (!device->expose_device)
-               sdev->no_uld_attach = true;
-
-       return 0;
-}
-
 static int pqi_map_queues(struct Scsi_Host *shost)
 {
        struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
        .eh_device_reset_handler = pqi_eh_device_reset_handler,
        .ioctl = pqi_ioctl,
        .slave_alloc = pqi_slave_alloc,
-       .slave_configure = pqi_slave_configure,
        .map_queues = pqi_map_queues,
        .sdev_attrs = pqi_sdev_attrs,
        .shost_attrs = pqi_shost_attrs,