/* get config memsize register */
        u32 (*get_config_memsize)(struct amdgpu_device *adev);
        /* flush hdp write queue */
-       void (*flush_hdp)(struct amdgpu_device *adev);
+       void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
        /* invalidate hdp read cache */
-       void (*invalidate_hdp)(struct amdgpu_device *adev);
+       void (*invalidate_hdp)(struct amdgpu_device *adev,
+                              struct amdgpu_ring *ring);
 };
 
 /*
        u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
        u32 (*get_rev_id)(struct amdgpu_device *adev);
        void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
-       void (*hdp_flush)(struct amdgpu_device *adev);
+       void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
        u32 (*get_memsize)(struct amdgpu_device *adev);
        void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
                                    bool use_doorbell, int doorbell_index);
 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
-#define amdgpu_asic_flush_hdp(adev) (adev)->asic_funcs->flush_hdp((adev))
-#define amdgpu_asic_invalidate_hdp(adev) (adev)->asic_funcs->invalidate_hdp((adev))
+#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
+#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
 #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
 #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, pasid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (pasid), (addr))
 #define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
 
                }
        }
        mb();
-       amdgpu_asic_flush_hdp(adev);
+       amdgpu_asic_flush_hdp(adev, NULL);
        amdgpu_gmc_flush_gpu_tlb(adev, 0);
        return 0;
 }
                return r;
 
        mb();
-       amdgpu_asic_flush_hdp(adev);
+       amdgpu_asic_flush_hdp(adev, NULL);
        amdgpu_gmc_flush_gpu_tlb(adev, 0);
        return 0;
 }
 
        if (vm->use_cpu_for_update) {
                /* Flush HDP */
                mb();
-               amdgpu_asic_flush_hdp(adev);
+               amdgpu_asic_flush_hdp(adev, NULL);
        } else if (params.ib->length_dw == 0) {
                amdgpu_job_free(job);
        } else {
        if (vm->use_cpu_for_update) {
                /* Flush HDP */
                mb();
-               amdgpu_asic_flush_hdp(adev);
+               amdgpu_asic_flush_hdp(adev, NULL);
        }
 
        spin_lock(&vm->status_lock);
 
                adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
 }
 
-static void cik_flush_hdp(struct amdgpu_device *adev)
+static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
-       WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
-       RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
+       if (!ring || !ring->funcs->emit_wreg) {
+               WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
+               RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
+       } else {
+               amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
+       }
 }
 
-static void cik_invalidate_hdp(struct amdgpu_device *adev)
+static void cik_invalidate_hdp(struct amdgpu_device *adev,
+                              struct amdgpu_ring *ring)
 {
-       WREG32(mmHDP_DEBUG0, 1);
-       RREG32(mmHDP_DEBUG0);
+       if (!ring || !ring->funcs->emit_wreg) {
+               WREG32(mmHDP_DEBUG0, 1);
+               RREG32(mmHDP_DEBUG0);
+       } else {
+               amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
+       }
 }
 
 static const struct amdgpu_asic_funcs cik_asic_funcs =
 
        WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
 
        /* After HDP is initialized, flush HDP.*/
-       adev->nbio_funcs->hdp_flush(adev);
+       adev->nbio_funcs->hdp_flush(adev, NULL);
 
        if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
                value = false;
 
                WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
 }
 
-static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
+static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev,
+                               struct amdgpu_ring *ring)
 {
-       WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+       if (!ring || !ring->funcs->emit_wreg)
+               WREG32_SOC15_NO_KIQ(NBIO, 0,
+                                   mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL,
+                                   0);
+       else
+               amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+                       NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
 }
 
 static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
 
                WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
 }
 
-static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev)
+static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev,
+                               struct amdgpu_ring *ring)
 {
-       WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+       if (!ring || !ring->funcs->emit_wreg)
+               WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+       else
+               amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+                       NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0);
 }
 
 static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
 
                adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
 }
 
-static void si_flush_hdp(struct amdgpu_device *adev)
+static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
-       WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
-       RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
+       if (!ring || !ring->funcs->emit_wreg) {
+               WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
+               RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
+       } else {
+               amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
+       }
 }
 
-static void si_invalidate_hdp(struct amdgpu_device *adev)
+static void si_invalidate_hdp(struct amdgpu_device *adev,
+                             struct amdgpu_ring *ring)
 {
-       WREG32(mmHDP_DEBUG0, 1);
-       RREG32(mmHDP_DEBUG0);
+       if (!ring || !ring->funcs->emit_wreg) {
+               WREG32(mmHDP_DEBUG0, 1);
+               RREG32(mmHDP_DEBUG0);
+       } else {
+               amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
+       }
 }
 
 static const struct amdgpu_asic_funcs si_asic_funcs =
 
        return adev->nbio_funcs->get_rev_id(adev);
 }
 
-static void soc15_flush_hdp(struct amdgpu_device *adev)
+static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
-       adev->nbio_funcs->hdp_flush(adev);
+       adev->nbio_funcs->hdp_flush(adev, ring);
 }
 
-static void soc15_invalidate_hdp(struct amdgpu_device *adev)
+static void soc15_invalidate_hdp(struct amdgpu_device *adev,
+                                struct amdgpu_ring *ring)
 {
-       WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+       if (!ring || !ring->funcs->emit_wreg)
+               WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+       else
+               amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+                       HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
 }
 
 static const struct amdgpu_asic_funcs soc15_asic_funcs =
 
                        >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
 }
 
-static void vi_flush_hdp(struct amdgpu_device *adev)
+static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
-       WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
-       RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
+       if (!ring || !ring->funcs->emit_wreg) {
+               WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
+               RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
+       } else {
+               amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
+       }
 }
 
-static void vi_invalidate_hdp(struct amdgpu_device *adev)
+static void vi_invalidate_hdp(struct amdgpu_device *adev,
+                             struct amdgpu_ring *ring)
 {
-       WREG32(mmHDP_DEBUG0, 1);
-       RREG32(mmHDP_DEBUG0);
+       if (!ring || !ring->funcs->emit_wreg) {
+               WREG32(mmHDP_DEBUG0, 1);
+               RREG32(mmHDP_DEBUG0);
+       } else {
+               amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
+       }
 }
 
 static const struct amdgpu_asic_funcs vi_asic_funcs =