static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
+static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
 
 static const u32 crtc_offsets[] = {
        CRTC0_REGISTER_OFFSET,
                                    AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
                WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
+               dce_v10_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
                dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
                amdgpu_irq_get(adev, &adev->hpd_irq,
                               amdgpu_connector->hpd.hpd);
 
 
 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
+static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
 
 static const u32 crtc_offsets[] =
 {
                                    AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
                WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
+               dce_v11_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
                dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
                amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
        }
 
        WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 }
 
+static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev,
+                                int hpd)
+{
+       u32 tmp;
+
+       if (hpd >= adev->mode_info.num_hpd) {
+               DRM_DEBUG("invalid hdp %d\n", hpd);
+               return;
+       }
+
+       tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+       tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+       WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+}
+
 /**
  * dce_v6_0_hpd_init - hpd setup callback.
  *
                        continue;
                }
 
+               dce_v6_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
                dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
                amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
        }
                            struct amdgpu_irq_src *source,
                            struct amdgpu_iv_entry *entry)
 {
-       uint32_t disp_int, mask, tmp;
+       uint32_t disp_int, mask;
        unsigned hpd;
 
        if (entry->src_data[0] >= adev->mode_info.num_hpd) {
        mask = interrupt_status_offsets[hpd].hpd;
 
        if (disp_int & mask) {
-               tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
-               tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
-               WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+               dce_v6_0_hpd_int_ack(adev, hpd);
                schedule_delayed_work(&adev->hotplug_work, 0);
                DRM_DEBUG("IH: HPD%d\n", hpd + 1);
        }
 
        WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 }
 
+static void dce_v8_0_hpd_int_ack(struct amdgpu_device *adev,
+                                int hpd)
+{
+       u32 tmp;
+
+       if (hpd >= adev->mode_info.num_hpd) {
+               DRM_DEBUG("invalid hdp %d\n", hpd);
+               return;
+       }
+
+       tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+       tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+       WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+}
+
 /**
  * dce_v8_0_hpd_init - hpd setup callback.
  *
                        continue;
                }
 
+               dce_v8_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
                dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
                amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
        }
                            struct amdgpu_irq_src *source,
                            struct amdgpu_iv_entry *entry)
 {
-       uint32_t disp_int, mask, tmp;
+       uint32_t disp_int, mask;
        unsigned hpd;
 
        if (entry->src_data[0] >= adev->mode_info.num_hpd) {
        mask = interrupt_status_offsets[hpd].hpd;
 
        if (disp_int & mask) {
-               tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
-               tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
-               WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+               dce_v8_0_hpd_int_ack(adev, hpd);
                schedule_delayed_work(&adev->hotplug_work, 0);
                DRM_DEBUG("IH: HPD%d\n", hpd + 1);
        }