};
 
 static struct workqueue_struct *device_reset_wq;
+static struct workqueue_struct *device_sriov_wq;
 
 static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
                                           pci_channel_state_t state)
        struct work_struct reset_work;
 };
 
+/* sriov dev data */
+struct adf_sriov_dev_data {
+       struct adf_accel_dev *accel_dev;
+       struct completion compl;
+       struct work_struct sriov_work;
+};
+
 void adf_reset_sbr(struct adf_accel_dev *accel_dev)
 {
        struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
        }
 }
 
+static void adf_device_sriov_worker(struct work_struct *work)
+{
+       struct adf_sriov_dev_data *sriov_data =
+               container_of(work, struct adf_sriov_dev_data, sriov_work);
+
+       adf_reenable_sriov(sriov_data->accel_dev);
+       complete(&sriov_data->compl);
+}
+
 static void adf_device_reset_worker(struct work_struct *work)
 {
        struct adf_reset_dev_data *reset_data =
                  container_of(work, struct adf_reset_dev_data, reset_work);
        struct adf_accel_dev *accel_dev = reset_data->accel_dev;
+       unsigned long wait_jiffies = msecs_to_jiffies(10000);
+       struct adf_sriov_dev_data sriov_data;
 
        adf_dev_restarting_notify(accel_dev);
        if (adf_dev_restart(accel_dev)) {
                WARN(1, "QAT: device restart failed. Device is unusable\n");
                return;
        }
+
+       sriov_data.accel_dev = accel_dev;
+       init_completion(&sriov_data.compl);
+       INIT_WORK(&sriov_data.sriov_work, adf_device_sriov_worker);
+       queue_work(device_sriov_wq, &sriov_data.sriov_work);
+       if (wait_for_completion_timeout(&sriov_data.compl, wait_jiffies))
+               adf_pf2vf_notify_restarted(accel_dev);
+
        adf_dev_restarted_notify(accel_dev);
        clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
 
 {
        device_reset_wq = alloc_workqueue("qat_device_reset_wq",
                                          WQ_MEM_RECLAIM, 0);
-       return !device_reset_wq ? -EFAULT : 0;
+       if (!device_reset_wq)
+               return -EFAULT;
+
+       device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0);
+       if (!device_sriov_wq)
+               return -EFAULT;
+
+       return 0;
 }
 
 void adf_exit_aer(void)
        if (device_reset_wq)
                destroy_workqueue(device_reset_wq);
        device_reset_wq = NULL;
+
+       if (device_sriov_wq)
+               destroy_workqueue(device_sriov_wq);
+       device_sriov_wq = NULL;
 }
 
        ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
 #define ADF_ACCEL_STR "Accelerator%d"
 #define ADF_HEARTBEAT_TIMER  "HeartbeatTimer"
+#define ADF_SRIOV_ENABLED "SriovEnabled"
 
 #endif
 
 #if defined(CONFIG_PCI_IOV)
 int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
 void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_reenable_sriov(struct adf_accel_dev *accel_dev);
 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask);
 void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev);
 bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
 {
 }
 
+static inline void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+
 static inline int adf_init_pf_wq(void)
 {
        return 0;
 
                /* This ptr will be populated when VFs will be created */
                vf_info->accel_dev = accel_dev;
                vf_info->vf_nr = i;
-               vf_info->vf_compat_ver = 0;
 
                mutex_init(&vf_info->pf2vf_lock);
                ratelimit_state_init(&vf_info->vf2pf_ratelimit,
        return pci_enable_sriov(pdev, totalvfs);
 }
 
+void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+       char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+       unsigned long val = 0;
+
+       if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+                                   ADF_SRIOV_ENABLED, cfg))
+               return;
+
+       if (!accel_dev->pf.vf_info)
+               return;
+
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+                                       &val, ADF_DEC))
+               return;
+
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+                                       &val, ADF_DEC))
+               return;
+
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+       dev_dbg(&pdev->dev, "Re-enabling SRIOV\n");
+       adf_enable_sriov(accel_dev);
+}
+
 /**
  * adf_disable_sriov() - Disable SRIOV for the device
  * @accel_dev:  Pointer to accel device.
        for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
                mutex_destroy(&vf->pf2vf_lock);
 
-       kfree(accel_dev->pf.vf_info);
-       accel_dev->pf.vf_info = NULL;
+       if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
+               kfree(accel_dev->pf.vf_info);
+               accel_dev->pf.vf_info = NULL;
+       }
 }
 EXPORT_SYMBOL_GPL(adf_disable_sriov);
 
        if (ret)
                return ret;
 
+       val = 1;
+       adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED,
+                                   &val, ADF_DEC);
+
        return numvfs;
 }
 EXPORT_SYMBOL_GPL(adf_sriov_configure);