PQI_RESCAN_WORK_INTERVAL);
 }
 
+static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
+{
+       cancel_delayed_work_sync(&ctrl_info->rescan_work);
+}
+
 static int pqi_map_single(struct pci_dev *pci_dev,
        struct pqi_sg_descriptor *sg_descriptor, void *buffer,
        size_t buffer_length, int data_direction)
        buffer->driver_version_tag[1] = 'V';
        put_unaligned_le16(sizeof(buffer->driver_version),
                &buffer->driver_version_length);
-       strncpy(buffer->driver_version, DRIVER_VERSION,
+       strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
                sizeof(buffer->driver_version) - 1);
        buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
        buffer->end_tag[0] = 'Z';
 static inline void pqi_schedule_update_time_worker(
        struct pqi_ctrl_info *ctrl_info)
 {
+       if (ctrl_info->update_time_worker_scheduled)
+               return;
+
        schedule_delayed_work(&ctrl_info->update_time_work, 0);
+       ctrl_info->update_time_worker_scheduled = true;
+}
+
+static inline void pqi_cancel_update_time_worker(
+       struct pqi_ctrl_info *ctrl_info)
+{
+       if (!ctrl_info->update_time_worker_scheduled)
+               return;
+
+       cancel_delayed_work_sync(&ctrl_info->update_time_work);
+       ctrl_info->update_time_worker_scheduled = false;
 }
 
 static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
        return !mutex_is_locked(&ctrl_info->scan_mutex);
 }
 
+static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
+{
+       mutex_lock(&ctrl_info->scan_mutex);
+       mutex_unlock(&ctrl_info->scan_mutex);
+}
+
+static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
+{
+       mutex_lock(&ctrl_info->lun_reset_mutex);
+       mutex_unlock(&ctrl_info->lun_reset_mutex);
+}
+
 static inline void pqi_set_encryption_info(
        struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
        u64 first_block)
        int num_interrupts;
        struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
 
+       if (!ctrl_info->heartbeat_timer_started)
+               return;
+
        num_interrupts = atomic_read(&ctrl_info->num_interrupts);
 
        if (num_interrupts == ctrl_info->previous_num_interrupts) {
                jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
        ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
        ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
-       add_timer(&ctrl_info->heartbeat_timer);
        ctrl_info->heartbeat_timer_started = true;
+       add_timer(&ctrl_info->heartbeat_timer);
 }
 
 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
 {
-       if (ctrl_info->heartbeat_timer_started)
+       if (ctrl_info->heartbeat_timer_started) {
+               ctrl_info->heartbeat_timer_started = false;
                del_timer_sync(&ctrl_info->heartbeat_timer);
+       }
 }
 
 static inline int pqi_event_type_to_event_index(unsigned int event_type)
        return num_events;
 }
 
+#define PQI_LEGACY_INTX_MASK   0x1
+
+static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
+                                               bool enable_intx)
+{
+       u32 intx_mask;
+       struct pqi_device_registers __iomem *pqi_registers;
+       volatile void __iomem *register_addr;
+
+       pqi_registers = ctrl_info->pqi_registers;
+
+       if (enable_intx)
+               register_addr = &pqi_registers->legacy_intx_mask_clear;
+       else
+               register_addr = &pqi_registers->legacy_intx_mask_set;
+
+       intx_mask = readl(register_addr);
+       intx_mask |= PQI_LEGACY_INTX_MASK;
+       writel(intx_mask, register_addr);
+}
+
+static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
+       enum pqi_irq_mode new_mode)
+{
+       switch (ctrl_info->irq_mode) {
+       case IRQ_MODE_MSIX:
+               switch (new_mode) {
+               case IRQ_MODE_MSIX:
+                       break;
+               case IRQ_MODE_INTX:
+                       pqi_configure_legacy_intx(ctrl_info, true);
+                       sis_disable_msix(ctrl_info);
+                       sis_enable_intx(ctrl_info);
+                       break;
+               case IRQ_MODE_NONE:
+                       sis_disable_msix(ctrl_info);
+                       break;
+               }
+               break;
+       case IRQ_MODE_INTX:
+               switch (new_mode) {
+               case IRQ_MODE_MSIX:
+                       pqi_configure_legacy_intx(ctrl_info, false);
+                       sis_disable_intx(ctrl_info);
+                       sis_enable_msix(ctrl_info);
+                       break;
+               case IRQ_MODE_INTX:
+                       break;
+               case IRQ_MODE_NONE:
+                       pqi_configure_legacy_intx(ctrl_info, false);
+                       sis_disable_intx(ctrl_info);
+                       break;
+               }
+               break;
+       case IRQ_MODE_NONE:
+               switch (new_mode) {
+               case IRQ_MODE_MSIX:
+                       sis_enable_msix(ctrl_info);
+                       break;
+               case IRQ_MODE_INTX:
+                       pqi_configure_legacy_intx(ctrl_info, true);
+                       sis_enable_intx(ctrl_info);
+                       break;
+               case IRQ_MODE_NONE:
+                       break;
+               }
+               break;
+       }
+
+       ctrl_info->irq_mode = new_mode;
+}
+
+#define PQI_LEGACY_INTX_PENDING                0x1
+
+static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
+{
+       bool valid_irq;
+       u32 intx_status;
+
+       switch (ctrl_info->irq_mode) {
+       case IRQ_MODE_MSIX:
+               valid_irq = true;
+               break;
+       case IRQ_MODE_INTX:
+               intx_status =
+                       readl(&ctrl_info->pqi_registers->legacy_intx_status);
+               if (intx_status & PQI_LEGACY_INTX_PENDING)
+                       valid_irq = true;
+               else
+                       valid_irq = false;
+               break;
+       case IRQ_MODE_NONE:
+       default:
+               valid_irq = false;
+               break;
+       }
+
+       return valid_irq;
+}
+
 static irqreturn_t pqi_irq_handler(int irq, void *data)
 {
        struct pqi_ctrl_info *ctrl_info;
        queue_group = data;
        ctrl_info = queue_group->ctrl_info;
 
-       if (!ctrl_info || !queue_group->oq_ci)
+       if (!pqi_is_valid_irq(ctrl_info))
                return IRQ_NONE;
 
        num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
        }
 
        ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
-
+       ctrl_info->irq_mode = IRQ_MODE_MSIX;
        return 0;
 }
 
        return 0;
 }
 
-static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
+static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
+       unsigned int group_number)
 {
-       unsigned int i;
        int rc;
        struct pqi_queue_group *queue_group;
        struct pqi_general_admin_request request;
        struct pqi_general_admin_response response;
 
-       i = ctrl_info->num_active_queue_groups;
-       queue_group = &ctrl_info->queue_groups[i];
+       queue_group = &ctrl_info->queue_groups[group_number];
 
        /*
         * Create IQ (Inbound Queue - host to device queue) for
                get_unaligned_le64(
                        &response.data.create_operational_oq.oq_ci_offset);
 
-       ctrl_info->num_active_queue_groups++;
-
        return 0;
 
 delete_inbound_queue_aio:
        }
 
        for (i = 0; i < ctrl_info->num_queue_groups; i++) {
-               rc = pqi_create_queue_group(ctrl_info);
+               rc = pqi_create_queue_group(ctrl_info, i);
                if (rc) {
                        dev_err(&ctrl_info->pci_dev->dev,
                                "error creating queue group number %u/%u\n",
        num_queue_groups = min(num_queue_groups, max_queue_groups);
 
        ctrl_info->num_queue_groups = num_queue_groups;
+       ctrl_info->max_hw_queue_index = num_queue_groups - 1;
 
        /*
         * Make sure that the max. inbound IU length is an even multiple
        return 0;
 }
 
+static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
+       struct scsi_cmnd *scmd)
+{
+       u16 hw_queue;
+
+       hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
+       if (hw_queue > ctrl_info->max_hw_queue_index)
+               hw_queue = 0;
+
+       return hw_queue;
+}
+
 /*
  * This function gets called just before we hand the completed SCSI request
  * back to the SML.
        int rc;
        struct pqi_ctrl_info *ctrl_info;
        struct pqi_scsi_dev *device;
-       u16 hwq;
+       u16 hw_queue;
        struct pqi_queue_group *queue_group;
        bool raid_bypassed;
 
         */
        scmd->result = 0;
 
-       hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
-       if (hwq >= ctrl_info->num_queue_groups)
-               hwq = 0;
-
-       queue_group = &ctrl_info->queue_groups[hwq];
+       hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
+       queue_group = &ctrl_info->queue_groups[hw_queue];
 
        if (pqi_is_logical_device(device)) {
                raid_bypassed = false;
        }
 }
 
+static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device)
+{
+       while (atomic_read(&device->scsi_cmds_outstanding)) {
+               pqi_check_ctrl_health(ctrl_info);
+               if (pqi_ctrl_offline(ctrl_info))
+                       return -ENXIO;
+               usleep_range(1000, 2000);
+       }
+
+       return 0;
+}
+
+static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
+{
+       bool io_pending;
+       unsigned long flags;
+       struct pqi_scsi_dev *device;
+
+       while (1) {
+               io_pending = false;
+
+               spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+               list_for_each_entry(device, &ctrl_info->scsi_device_list,
+                       scsi_device_list_entry) {
+                       if (atomic_read(&device->scsi_cmds_outstanding)) {
+                               io_pending = true;
+                               break;
+                       }
+               }
+               spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
+                                       flags);
+
+               if (!io_pending)
+                       break;
+
+               pqi_check_ctrl_health(ctrl_info);
+               if (pqi_ctrl_offline(ctrl_info))
+                       return -ENXIO;
+
+               usleep_range(1000, 2000);
+       }
+
+       return 0;
+}
+
 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
        void *context)
 {
        int rc;
 
        rc = pqi_lun_reset(ctrl_info, device);
+       if (rc == 0)
+               rc = pqi_device_wait_for_pending_io(ctrl_info, device);
 
        return rc == 0 ? SUCCESS : FAILED;
 }
 {
        int rc;
 
-       sis_disable_msix(ctrl_info);
+       pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
        rc = pqi_reset(ctrl_info);
        if (rc)
                return rc;
        if (rc)
                return rc;
 
-       sis_enable_msix(ctrl_info);
+       pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
+
+       ctrl_info->controller_online = true;
+       pqi_start_heartbeat_timer(ctrl_info);
 
        rc = pqi_enable_events(ctrl_info);
        if (rc) {
                return rc;
        }
 
-       pqi_start_heartbeat_timer(ctrl_info);
-
-       ctrl_info->controller_online = true;
-
        /* Register with the SCSI subsystem. */
        rc = pqi_register_scsi(ctrl_info);
        if (rc)
        return 0;
 }
 
+#if defined(CONFIG_PM)
+
+static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
+{
+       unsigned int i;
+       struct pqi_admin_queues *admin_queues;
+       struct pqi_event_queue *event_queue;
+
+       admin_queues = &ctrl_info->admin_queues;
+       admin_queues->iq_pi_copy = 0;
+       admin_queues->oq_ci_copy = 0;
+       *admin_queues->oq_pi = 0;
+
+       for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+               ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
+               ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
+               ctrl_info->queue_groups[i].oq_ci_copy = 0;
+
+               *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
+               *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
+               *ctrl_info->queue_groups[i].oq_pi = 0;
+       }
+
+       event_queue = &ctrl_info->event_queue;
+       *event_queue->oq_pi = 0;
+       event_queue->oq_ci_copy = 0;
+}
+
+static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+
+       rc = pqi_force_sis_mode(ctrl_info);
+       if (rc)
+               return rc;
+
+       /*
+        * Wait until the controller is ready to start accepting SIS
+        * commands.
+        */
+       rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
+       if (rc)
+               return rc;
+
+       /*
+        * If the function we are about to call succeeds, the
+        * controller will transition from legacy SIS mode
+        * into PQI mode.
+        */
+       rc = sis_init_base_struct_addr(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error initializing PQI mode\n");
+               return rc;
+       }
+
+       /* Wait for the controller to complete the SIS -> PQI transition. */
+       rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "transition to PQI mode failed\n");
+               return rc;
+       }
+
+       /* From here on, we are running in PQI mode. */
+       ctrl_info->pqi_mode_enabled = true;
+       pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
+
+       pqi_reinit_queues(ctrl_info);
+
+       rc = pqi_create_admin_queues(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error creating admin queues\n");
+               return rc;
+       }
+
+       rc = pqi_create_queues(ctrl_info);
+       if (rc)
+               return rc;
+
+       pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
+
+       ctrl_info->controller_online = true;
+       pqi_start_heartbeat_timer(ctrl_info);
+       pqi_ctrl_unblock_requests(ctrl_info);
+
+       rc = pqi_enable_events(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error configuring events\n");
+               return rc;
+       }
+
+       rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
+       if (rc) {
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "error updating host wellness\n");
+               return rc;
+       }
+
+       pqi_schedule_update_time_worker(ctrl_info);
+
+       pqi_scan_scsi_devices(ctrl_info);
+
+       return 0;
+}
+
+#endif /* CONFIG_PM */
+
 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
        u16 timeout)
 {
        init_waitqueue_head(&ctrl_info->block_requests_wait);
 
        ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
+       ctrl_info->irq_mode = IRQ_MODE_NONE;
        ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
 
        return ctrl_info;
 
 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
 {
-       cancel_delayed_work_sync(&ctrl_info->rescan_work);
-       cancel_delayed_work_sync(&ctrl_info->update_time_work);
+       pqi_cancel_rescan_worker(ctrl_info);
+       pqi_cancel_update_time_worker(ctrl_info);
        pqi_remove_all_scsi_devices(ctrl_info);
        pqi_unregister_scsi(ctrl_info);
        if (ctrl_info->pqi_mode_enabled)
                "unable to flush controller cache\n");
 }
 
+#if defined(CONFIG_PM)
+
+static int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
+{
+       struct pqi_ctrl_info *ctrl_info;
+
+       ctrl_info = pci_get_drvdata(pci_dev);
+
+       pqi_disable_events(ctrl_info);
+       pqi_cancel_update_time_worker(ctrl_info);
+       pqi_cancel_rescan_worker(ctrl_info);
+       pqi_wait_until_scan_finished(ctrl_info);
+       pqi_wait_until_lun_reset_finished(ctrl_info);
+       pqi_flush_cache(ctrl_info);
+       pqi_ctrl_block_requests(ctrl_info);
+       pqi_ctrl_wait_until_quiesced(ctrl_info);
+       pqi_wait_until_inbound_queues_empty(ctrl_info);
+       pqi_ctrl_wait_for_pending_io(ctrl_info);
+       pqi_stop_heartbeat_timer(ctrl_info);
+
+       if (state.event == PM_EVENT_FREEZE)
+               return 0;
+
+       pci_save_state(pci_dev);
+       pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
+
+       ctrl_info->controller_online = false;
+       ctrl_info->pqi_mode_enabled = false;
+
+       return 0;
+}
+
+static int pqi_resume(struct pci_dev *pci_dev)
+{
+       int rc;
+       struct pqi_ctrl_info *ctrl_info;
+
+       ctrl_info = pci_get_drvdata(pci_dev);
+
+       if (pci_dev->current_state != PCI_D0) {
+               ctrl_info->max_hw_queue_index = 0;
+               pqi_free_interrupts(ctrl_info);
+               pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
+               rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
+                       IRQF_SHARED, DRIVER_NAME_SHORT,
+                       &ctrl_info->queue_groups[0]);
+               if (rc) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "irq %u init failed with error %d\n",
+                               pci_dev->irq, rc);
+                       return rc;
+               }
+               pqi_start_heartbeat_timer(ctrl_info);
+               pqi_ctrl_unblock_requests(ctrl_info);
+               return 0;
+       }
+
+       pci_set_power_state(pci_dev, PCI_D0);
+       pci_restore_state(pci_dev);
+
+       return pqi_ctrl_init_resume(ctrl_info);
+}
+
+#endif /* CONFIG_PM */
+
 /* Define the PCI IDs for the controllers that we support. */
 static const struct pci_device_id pqi_pci_id_table[] = {
        {
        .probe = pqi_pci_probe,
        .remove = pqi_pci_remove,
        .shutdown = pqi_shutdown,
+#if defined(CONFIG_PM)
+       .suspend = pqi_suspend,
+       .resume = pqi_resume,
+#endif
 };
 
 static int __init pqi_init(void)
        BUILD_BUG_ON(offsetof(struct pqi_event_config,
                descriptors) != 4);
 
+       BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
+               ARRAY_SIZE(pqi_supported_event_types));
+
        BUILD_BUG_ON(offsetof(struct pqi_event_response,
                header.iu_type) != 0);
        BUILD_BUG_ON(offsetof(struct pqi_event_response,
 
 /* for submission of legacy SIS commands */
 #define SIS_REENABLE_SIS_MODE                  0x1
 #define SIS_ENABLE_MSIX                                0x40
+#define SIS_ENABLE_INTX                                0x80
 #define SIS_SOFT_RESET                         0x100
 #define SIS_TRIGGER_SHUTDOWN                   0x800000
 #define SIS_CMD_READY                          0x200
 #define SIS_CTRL_KERNEL_UP                     0x80
 #define SIS_CTRL_KERNEL_PANIC                  0x100
 #define SIS_CTRL_READY_TIMEOUT_SECS            30
+#define SIS_CTRL_READY_RESUME_TIMEOUT_SECS     90
 #define SIS_CTRL_READY_POLL_INTERVAL_MSECS     10
 
 #pragma pack(1)
 
 #pragma pack()
 
-int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
+static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
+       unsigned int timeout_secs)
 {
        unsigned long timeout;
        u32 status;
 
-       timeout = (SIS_CTRL_READY_TIMEOUT_SECS * HZ) + jiffies;
+       timeout = (timeout_secs * HZ) + jiffies;
 
        while (1) {
                status = readl(&ctrl_info->registers->sis_firmware_status);
        return 0;
 }
 
+int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
+{
+       return sis_wait_for_ctrl_ready_with_timeout(ctrl_info,
+               SIS_CTRL_READY_TIMEOUT_SECS);
+}
+
+int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info)
+{
+       return sis_wait_for_ctrl_ready_with_timeout(ctrl_info,
+               SIS_CTRL_READY_RESUME_TIMEOUT_SECS);
+}
+
 bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
 {
        bool running;
        return rc;
 }
 
+#define SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS    30
+
+static void sis_wait_for_doorbell_bit_to_clear(
+       struct pqi_ctrl_info *ctrl_info, u32 bit)
+{
+       u32 doorbell_register;
+       unsigned long timeout;
+
+       timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * HZ) + jiffies;
+
+       while (1) {
+               doorbell_register =
+                       readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+               if ((doorbell_register & bit) == 0)
+                       break;
+               if (readl(&ctrl_info->registers->sis_firmware_status) &
+                       SIS_CTRL_KERNEL_PANIC)
+                       break;
+               if (time_after(jiffies, timeout)) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "doorbell register bit 0x%x not cleared\n",
+                               bit);
+                       break;
+               }
+               usleep_range(1000, 2000);
+       }
+}
+
 /* Enable MSI-X interrupts on the controller. */
 
 void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
 
        writel(doorbell_register,
                &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+
+       sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_MSIX);
 }
 
 /* Disable MSI-X interrupts on the controller. */
                &ctrl_info->registers->sis_host_to_ctrl_doorbell);
 }
 
+void sis_enable_intx(struct pqi_ctrl_info *ctrl_info)
+{
+       u32 doorbell_register;
+
+       doorbell_register =
+               readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+       doorbell_register |= SIS_ENABLE_INTX;
+
+       writel(doorbell_register,
+               &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+
+       sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_INTX);
+}
+
+void sis_disable_intx(struct pqi_ctrl_info *ctrl_info)
+{
+       u32 doorbell_register;
+
+       doorbell_register =
+               readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+       doorbell_register &= ~SIS_ENABLE_INTX;
+
+       writel(doorbell_register,
+               &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+}
+
 void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
 {
        writel(SIS_SOFT_RESET,