{
        uint32_t ret;
 
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) &&
            down_read_trylock(&adev->reset_sem)) {
                ret = amdgpu_kiq_rreg(adev, reg);
  * Returns the 8 bit value from the offset specified.
  */
 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        if (offset < adev->rmmio_size)
                return (readb(adev->rmmio + offset));
        BUG();
  * Writes the value specified to the offset specified.
  */
 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if (offset < adev->rmmio_size)
                writeb(value, adev->rmmio + offset);
        else
                                       uint32_t reg, uint32_t v,
                                       uint32_t acc_flags)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
 
        if ((reg * 4) < adev->rmmio_size)
 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
                    uint32_t acc_flags)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) &&
            down_read_trylock(&adev->reset_sem)) {
                amdgpu_kiq_wreg(adev, reg, v);
 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
                    uint32_t acc_flags)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if (amdgpu_sriov_fullaccess(adev) &&
                adev->gfx.rlc.funcs &&
                adev->gfx.rlc.funcs->is_rlcg_access_range) {
  */
 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
 {
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        if ((reg * 4) < adev->rio_mem_size)
                return ioread32(adev->rio_mem + (reg * 4));
        else {
  */
 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if ((reg * 4) < adev->rio_mem_size)
                iowrite32(v, adev->rio_mem + (reg * 4));
        else {
  */
 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
 {
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        if (index < adev->doorbell.num_doorbells) {
                return readl(adev->doorbell.ptr + index);
        } else {
  */
 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if (index < adev->doorbell.num_doorbells) {
                writel(v, adev->doorbell.ptr + index);
        } else {
  */
 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
 {
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        if (index < adev->doorbell.num_doorbells) {
                return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
        } else {
  */
 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if (index < adev->doorbell.num_doorbells) {
                atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
        } else {
 
        pci_restore_state(pdev);
 
+       adev->in_pci_err_recovery = true;
        r = amdgpu_device_ip_suspend(adev);
+       adev->in_pci_err_recovery = false;
        if (r)
                goto out;
 
 
 
        amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
        amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+       if (!adev->in_pci_err_recovery) {
 #ifndef BRING_UP_DEBUG
-       if (amdgpu_async_gfx_ring) {
-               r = gfx_v10_0_kiq_disable_kgq(adev);
-               if (r)
-                       DRM_ERROR("KGQ disable failed\n");
-       }
+               if (amdgpu_async_gfx_ring) {
+                       r = gfx_v10_0_kiq_disable_kgq(adev);
+                       if (r)
+                               DRM_ERROR("KGQ disable failed\n");
+               }
 #endif
-       if (amdgpu_gfx_disable_kcq(adev))
-               DRM_ERROR("KCQ disable failed\n");
+               if (amdgpu_gfx_disable_kcq(adev))
+                       DRM_ERROR("KCQ disable failed\n");
+       }
+
        if (amdgpu_sriov_vf(adev)) {
                gfx_v10_0_cp_gfx_enable(adev, false);
                /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */