uint32_t pipe_id, uint32_t queue_id,
                            uint32_t doorbell_off)
 {
-       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
        struct v10_compute_mqd *m;
        uint32_t mec, pipe;
        int r;
        pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
                 mec, pipe, queue_id);
 
-       spin_lock(&adev->gfx.kiq.ring_lock);
+       spin_lock(&adev->gfx.kiq[0].ring_lock);
        r = amdgpu_ring_alloc(kiq_ring, 7);
        if (r) {
                pr_err("Failed to alloc KIQ (%d).\n", r);
        amdgpu_ring_commit(kiq_ring);
 
 out_unlock:
-       spin_unlock(&adev->gfx.kiq.ring_lock);
+       spin_unlock(&adev->gfx.kiq[0].ring_lock);
        release_queue(adev);
 
        return r;
 
                            uint32_t pipe_id, uint32_t queue_id,
                            uint32_t doorbell_off)
 {
-       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
        struct v10_compute_mqd *m;
        uint32_t mec, pipe;
        int r;
        pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
                 mec, pipe, queue_id);
 
-       spin_lock(&adev->gfx.kiq.ring_lock);
+       spin_lock(&adev->gfx.kiq[0].ring_lock);
        r = amdgpu_ring_alloc(kiq_ring, 7);
        if (r) {
                pr_err("Failed to alloc KIQ (%d).\n", r);
        amdgpu_ring_commit(kiq_ring);
 
 out_unlock:
-       spin_unlock(&adev->gfx.kiq.ring_lock);
+       spin_unlock(&adev->gfx.kiq[0].ring_lock);
        release_queue(adev);
 
        return r;
 
                              uint32_t pipe_id, uint32_t queue_id,
                              uint32_t doorbell_off)
 {
-       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
        struct v11_compute_mqd *m;
        uint32_t mec, pipe;
        int r;
        pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
                 mec, pipe, queue_id);
 
-       spin_lock(&adev->gfx.kiq.ring_lock);
+       spin_lock(&adev->gfx.kiq[0].ring_lock);
        r = amdgpu_ring_alloc(kiq_ring, 7);
        if (r) {
                pr_err("Failed to alloc KIQ (%d).\n", r);
        amdgpu_ring_commit(kiq_ring);
 
 out_unlock:
-       spin_unlock(&adev->gfx.kiq.ring_lock);
+       spin_unlock(&adev->gfx.kiq[0].ring_lock);
        release_queue(adev);
 
        return r;
 
                            uint32_t pipe_id, uint32_t queue_id,
                            uint32_t doorbell_off)
 {
-       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
        struct v9_mqd *m;
        uint32_t mec, pipe;
        int r;
        pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
                 mec, pipe, queue_id);
 
-       spin_lock(&adev->gfx.kiq.ring_lock);
+       spin_lock(&adev->gfx.kiq[0].ring_lock);
        r = amdgpu_ring_alloc(kiq_ring, 7);
        if (r) {
                pr_err("Failed to alloc KIQ (%d).\n", r);
        amdgpu_ring_commit(kiq_ring);
 
 out_unlock:
-       spin_unlock(&adev->gfx.kiq.ring_lock);
+       spin_unlock(&adev->gfx.kiq[0].ring_lock);
        release_queue(adev);
 
        return r;
 
                             struct amdgpu_ring *ring,
                             struct amdgpu_irq_src *irq)
 {
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        int r = 0;
 
        spin_lock_init(&kiq->ring_lock);
 
 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
 {
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
 
        amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
 }
 {
        int r;
        u32 *hpd;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
 
        r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
                                    AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
        int r, i;
 
        /* create MQD for KIQ */
-       ring = &adev->gfx.kiq.ring;
+       ring = &adev->gfx.kiq[0].ring;
        if (!adev->enable_mes_kiq && !ring->mqd_obj) {
                /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
                 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
                                      &ring->mqd_ptr);
        }
 
-       ring = &adev->gfx.kiq.ring;
+       ring = &adev->gfx.kiq[0].ring;
        kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
        amdgpu_bo_free_kernel(&ring->mqd_obj,
                              &ring->mqd_gpu_addr,
 
 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
 {
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        struct amdgpu_ring *kiq_ring = &kiq->ring;
        int i, r = 0;
 
        if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
                return -EINVAL;
 
-       spin_lock(&adev->gfx.kiq.ring_lock);
+       spin_lock(&adev->gfx.kiq[0].ring_lock);
        if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
                                        adev->gfx.num_compute_rings)) {
-               spin_unlock(&adev->gfx.kiq.ring_lock);
+               spin_unlock(&adev->gfx.kiq[0].ring_lock);
                return -ENOMEM;
        }
 
                kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
                                           RESET_QUEUES, 0, 0);
 
-       if (adev->gfx.kiq.ring.sched.ready && !adev->job_hang)
+       if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
                r = amdgpu_ring_test_helper(kiq_ring);
-       spin_unlock(&adev->gfx.kiq.ring_lock);
+       spin_unlock(&adev->gfx.kiq[0].ring_lock);
 
        return r;
 }
 
 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
 {
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
-       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
        uint64_t queue_mask = 0;
        int r, i;
 
 
        DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
                                                        kiq_ring->queue);
-       spin_lock(&adev->gfx.kiq.ring_lock);
+       spin_lock(&adev->gfx.kiq[0].ring_lock);
        r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
                                        adev->gfx.num_compute_rings +
                                        kiq->pmf->set_resources_size);
        if (r) {
                DRM_ERROR("Failed to lock KIQ (%d).\n", r);
-               spin_unlock(&adev->gfx.kiq.ring_lock);
+               spin_unlock(&adev->gfx.kiq[0].ring_lock);
                return r;
        }
 
                kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
 
        r = amdgpu_ring_test_helper(kiq_ring);
-       spin_unlock(&adev->gfx.kiq.ring_lock);
+       spin_unlock(&adev->gfx.kiq[0].ring_lock);
        if (r)
                DRM_ERROR("KCQ enable failed\n");
 
        signed long r, cnt = 0;
        unsigned long flags;
        uint32_t seq, reg_val_offs = 0, value = 0;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        struct amdgpu_ring *ring = &kiq->ring;
 
        if (amdgpu_device_skip_hw_access(adev))
        signed long r, cnt = 0;
        unsigned long flags;
        uint32_t seq;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        struct amdgpu_ring *ring = &kiq->ring;
 
        BUG_ON(!ring->funcs->emit_wreg);
 
        struct amdgpu_ce                ce;
        struct amdgpu_me                me;
        struct amdgpu_mec               mec;
-       struct amdgpu_kiq               kiq;
+       struct amdgpu_kiq               kiq[AMDGPU_MAX_GC_INSTANCES];
        struct amdgpu_imu               imu;
        bool                            rs64_enable; /* firmware format */
        const struct firmware           *me_fw; /* ME firmware */
 
                                        uint32_t reg0, uint32_t reg1,
                                        uint32_t ref, uint32_t mask)
 {
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        struct amdgpu_ring *ring = &kiq->ring;
        signed long r, cnt = 0;
        unsigned long flags;
 
        struct amdgpu_device *adev = kiq_ring->adev;
        uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
 
-       if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) {
+       if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
                amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
                return;
        }
 
 static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
 {
-       adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
+       adev->gfx.kiq[0].pmf = &gfx_v10_0_kiq_pm4_funcs;
 }
 
 static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
        /* KIQ event */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
                              GFX_10_1__SRCID__CP_IB2_INTERRUPT_PKT,
-                             &adev->gfx.kiq.irq);
+                             &adev->gfx.kiq[0].irq);
        if (r)
                return r;
 
                        return r;
                }
 
-               kiq = &adev->gfx.kiq;
+               kiq = &adev->gfx.kiq[0];
                r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
                if (r)
                        return r;
        amdgpu_gfx_mqd_sw_fini(adev);
 
        if (!adev->enable_mes_kiq) {
-               amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
+               amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
                amdgpu_gfx_kiq_fini(adev);
        }
 
                                      CP_MEC_CNTL__MEC_ME2_HALT_MASK));
                        break;
                }
-               adev->gfx.kiq.ring.sched.ready = false;
+               adev->gfx.kiq[0].ring.sched.ready = false;
        }
        udelay(50);
 }
 #ifndef BRING_UP_DEBUG
 static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
 {
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
-       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
        int r, i;
 
        if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
        struct amdgpu_ring *ring;
        int r;
 
-       ring = &adev->gfx.kiq.ring;
+       ring = &adev->gfx.kiq[0].ring;
 
        r = amdgpu_bo_reserve(ring->mqd_obj, false);
        if (unlikely(r != 0))
 #ifndef BRING_UP_DEBUG
 static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
 {
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        struct amdgpu_ring *kiq_ring = &kiq->ring;
        int i;
 
 {
        int i, r = 0;
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        struct amdgpu_ring *kiq_ring = &kiq->ring;
        unsigned long flags;
 
                                             enum amdgpu_interrupt_state state)
 {
        uint32_t tmp, target;
-       struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
+       struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
 
        if (ring->me == 1)
                target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
                             struct amdgpu_iv_entry *entry)
 {
        u8 me_id, pipe_id, queue_id;
-       struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
+       struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
 
        me_id = (entry->ring_id & 0x0c) >> 2;
        pipe_id = (entry->ring_id & 0x03) >> 0;
 {
        int i;
 
-       adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq;
+       adev->gfx.kiq[0].ring.funcs = &gfx_v10_0_ring_funcs_kiq;
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx;
        adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
        adev->gfx.eop_irq.funcs = &gfx_v10_0_eop_irq_funcs;
 
-       adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
-       adev->gfx.kiq.irq.funcs = &gfx_v10_0_kiq_irq_funcs;
+       adev->gfx.kiq[0].irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
+       adev->gfx.kiq[0].irq.funcs = &gfx_v10_0_kiq_irq_funcs;
 
        adev->gfx.priv_reg_irq.num_types = 1;
        adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs;
 
        struct amdgpu_device *adev = kiq_ring->adev;
        uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
 
-       if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) {
+       if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
                amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
                return;
        }
 
 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
 {
-       adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs;
+       adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs;
 }
 
 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
                        return r;
                }
 
-               kiq = &adev->gfx.kiq;
+               kiq = &adev->gfx.kiq[0];
                r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
                if (r)
                        return r;
        amdgpu_gfx_mqd_sw_fini(adev);
 
        if (!adev->enable_mes_kiq) {
-               amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
+               amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
                amdgpu_gfx_kiq_fini(adev);
        }
 
                WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
        }
 
-       adev->gfx.kiq.ring.sched.ready = enable;
+       adev->gfx.kiq[0].ring.sched.ready = enable;
 
        udelay(50);
 }
 #ifndef BRING_UP_DEBUG
 static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev)
 {
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
-       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
        int r, i;
 
        if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
        struct amdgpu_ring *ring;
        int r;
 
-       ring = &adev->gfx.kiq.ring;
+       ring = &adev->gfx.kiq[0].ring;
 
        r = amdgpu_bo_reserve(ring->mqd_obj, false);
        if (unlikely(r != 0))
 #ifndef BRING_UP_DEBUG
 static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev)
 {
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        struct amdgpu_ring *kiq_ring = &kiq->ring;
        int i, r = 0;
 
                kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
                                           PREEMPT_QUEUES, 0, 0);
 
-       if (adev->gfx.kiq.ring.sched.ready)
+       if (adev->gfx.kiq[0].ring.sched.ready)
                r = amdgpu_ring_test_helper(kiq_ring);
 
        return r;
 {
        int i, r = 0;
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        struct amdgpu_ring *kiq_ring = &kiq->ring;
        unsigned long flags;
 
                                             enum amdgpu_interrupt_state state)
 {
        uint32_t tmp, target;
-       struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
+       struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
 
        target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
        target += ring->pipe;
 {
        int i;
 
-       adev->gfx.kiq.ring.funcs = &gfx_v11_0_ring_funcs_kiq;
+       adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq;
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx;
 
                return r;
        }
 
-       kiq = &adev->gfx.kiq;
+       kiq = &adev->gfx.kiq[0];
        r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
        if (r)
                return r;
                amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
        amdgpu_gfx_mqd_sw_fini(adev);
-       amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
+       amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
        amdgpu_gfx_kiq_fini(adev);
 
        gfx_v8_0_mec_fini(adev);
                WREG32(mmCP_MEC_CNTL, 0);
        } else {
                WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
-               adev->gfx.kiq.ring.sched.ready = false;
+               adev->gfx.kiq[0].ring.sched.ready = false;
        }
        udelay(50);
 }
 
 static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
        uint64_t queue_mask = 0;
        int r, i;
 
        struct amdgpu_ring *ring;
        int r;
 
-       ring = &adev->gfx.kiq.ring;
+       ring = &adev->gfx.kiq[0].ring;
 
        r = amdgpu_bo_reserve(ring->mqd_obj, false);
        if (unlikely(r != 0))
        if (r)
                return r;
 
-       ring = &adev->gfx.kiq.ring;
+       ring = &adev->gfx.kiq[0].ring;
        r = amdgpu_ring_test_helper(ring);
        if (r)
                return r;
 static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
 {
        int r, i;
-       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
 
        r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
        if (r)
 {
        int i;
 
-       adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq;
+       adev->gfx.kiq[0].ring.funcs = &gfx_v8_0_ring_funcs_kiq;
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
 
 
 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
 {
-       adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
+       adev->gfx.kiq[0].pmf = &gfx_v9_0_kiq_pm4_funcs;
 }
 
 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
                return r;
        }
 
-       kiq = &adev->gfx.kiq;
+       kiq = &adev->gfx.kiq[0];
        r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
        if (r)
                return r;
                amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
        amdgpu_gfx_mqd_sw_fini(adev);
-       amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
+       amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
        amdgpu_gfx_kiq_fini(adev);
 
        gfx_v9_0_mec_fini(adev);
        } else {
                WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
                        (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
-               adev->gfx.kiq.ring.sched.ready = false;
+               adev->gfx.kiq[0].ring.sched.ready = false;
        }
        udelay(50);
 }
        struct amdgpu_ring *ring;
        int r;
 
-       ring = &adev->gfx.kiq.ring;
+       ring = &adev->gfx.kiq[0].ring;
 
        r = amdgpu_bo_reserve(ring->mqd_obj, false);
        if (unlikely(r != 0))
         */
        if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
                mutex_lock(&adev->srbm_mutex);
-               soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
-                               adev->gfx.kiq.ring.pipe,
-                               adev->gfx.kiq.ring.queue, 0);
-               gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
+               soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me,
+                               adev->gfx.kiq[0].ring.pipe,
+                               adev->gfx.kiq[0].ring.queue, 0);
+               gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring);
                soc15_grbm_select(adev, 0, 0, 0, 0);
                mutex_unlock(&adev->srbm_mutex);
        }
        unsigned long flags;
        uint32_t seq, reg_val_offs = 0;
        uint64_t value = 0;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        struct amdgpu_ring *ring = &kiq->ring;
 
        BUG_ON(!ring->funcs->emit_rreg);
 {
        int i, r = 0;
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        struct amdgpu_ring *kiq_ring = &kiq->ring;
        unsigned long flags;
 
 {
        int i;
 
-       adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
+       adev->gfx.kiq[0].ring.funcs = &gfx_v9_0_ring_funcs_kiq;
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
 
        /* For SRIOV run time, driver shouldn't access the register through MMIO
         * Directly use kiq to do the vm invalidation instead
         */
-       if (adev->gfx.kiq.ring.sched.ready && !adev->enable_mes &&
+       if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
            (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
            down_read_trylock(&adev->reset_domain->sem)) {
                struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
        uint16_t queried_pasid;
        bool ret;
        u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
-       struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
 
        if (amdgpu_emu_mode == 0 && ring->sched.ready) {
-               spin_lock(&adev->gfx.kiq.ring_lock);
+               spin_lock(&adev->gfx.kiq[0].ring_lock);
                /* 2 dwords flush + 8 dwords fence */
                amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
                kiq->pmf->kiq_invalidate_tlbs(ring,
                r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
                if (r) {
                        amdgpu_ring_undo(ring);
-                       spin_unlock(&adev->gfx.kiq.ring_lock);
+                       spin_unlock(&adev->gfx.kiq[0].ring_lock);
                        return -ETIME;
                }
 
                amdgpu_ring_commit(ring);
-               spin_unlock(&adev->gfx.kiq.ring_lock);
+               spin_unlock(&adev->gfx.kiq[0].ring_lock);
                r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
                if (r < 1) {
                        dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
 
        /* For SRIOV run time, driver shouldn't access the register through MMIO
         * Directly use kiq to do the vm invalidation instead
         */
-       if ((adev->gfx.kiq.ring.sched.ready || adev->mes.ring.sched.ready) &&
+       if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
            (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
                struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
                const unsigned eng = 17;
        uint32_t seq;
        uint16_t queried_pasid;
        bool ret;
-       struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
 
        if (amdgpu_emu_mode == 0 && ring->sched.ready) {
-               spin_lock(&adev->gfx.kiq.ring_lock);
+               spin_lock(&adev->gfx.kiq[0].ring_lock);
                /* 2 dwords flush + 8 dwords fence */
                amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
                kiq->pmf->kiq_invalidate_tlbs(ring,
                r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
                if (r) {
                        amdgpu_ring_undo(ring);
-                       spin_unlock(&adev->gfx.kiq.ring_lock);
+                       spin_unlock(&adev->gfx.kiq[0].ring_lock);
                        return -ETIME;
                }
 
                amdgpu_ring_commit(ring);
-               spin_unlock(&adev->gfx.kiq.ring_lock);
+               spin_unlock(&adev->gfx.kiq[0].ring_lock);
                r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
                if (r < 1) {
                        dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
 
        /* This is necessary for a HW workaround under SRIOV as well
         * as GFXOFF under bare metal
         */
-       if (adev->gfx.kiq.ring.sched.ready &&
+       if (adev->gfx.kiq[0].ring.sched.ready &&
            (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
            down_read_trylock(&adev->reset_domain->sem)) {
                uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
        uint16_t queried_pasid;
        bool ret;
        u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
-       struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
 
        if (amdgpu_in_reset(adev))
                return -EIO;
                if (vega20_xgmi_wa)
                        ndw += kiq->pmf->invalidate_tlbs_size;
 
-               spin_lock(&adev->gfx.kiq.ring_lock);
+               spin_lock(&adev->gfx.kiq[0].ring_lock);
                /* 2 dwords flush + 8 dwords fence */
                amdgpu_ring_alloc(ring, ndw);
                if (vega20_xgmi_wa)
                r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
                if (r) {
                        amdgpu_ring_undo(ring);
-                       spin_unlock(&adev->gfx.kiq.ring_lock);
+                       spin_unlock(&adev->gfx.kiq[0].ring_lock);
                        up_read(&adev->reset_domain->sem);
                        return -ETIME;
                }
 
                amdgpu_ring_commit(ring);
-               spin_unlock(&adev->gfx.kiq.ring_lock);
+               spin_unlock(&adev->gfx.kiq[0].ring_lock);
                r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
                if (r < 1) {
                        dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
 
 
 static int mes_v10_1_kiq_enable_queue(struct amdgpu_device *adev)
 {
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
-       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
        int r;
 
        if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
 {
        struct amdgpu_ring *ring;
 
-       spin_lock_init(&adev->gfx.kiq.ring_lock);
+       spin_lock_init(&adev->gfx.kiq[0].ring_lock);
 
-       ring = &adev->gfx.kiq.ring;
+       ring = &adev->gfx.kiq[0].ring;
 
        ring->me = 3;
        ring->pipe = 1;
        struct amdgpu_ring *ring;
 
        if (pipe == AMDGPU_MES_KIQ_PIPE)
-               ring = &adev->gfx.kiq.ring;
+               ring = &adev->gfx.kiq[0].ring;
        else if (pipe == AMDGPU_MES_SCHED_PIPE)
                ring = &adev->mes.ring;
        else
                amdgpu_ucode_release(&adev->mes.fw[pipe]);
        }
 
-       amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
-                             &adev->gfx.kiq.ring.mqd_gpu_addr,
-                             &adev->gfx.kiq.ring.mqd_ptr);
+       amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
+                             &adev->gfx.kiq[0].ring.mqd_gpu_addr,
+                             &adev->gfx.kiq[0].ring.mqd_ptr);
 
        amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
                              &adev->mes.ring.mqd_gpu_addr,
                              &adev->mes.ring.mqd_ptr);
 
-       amdgpu_ring_fini(&adev->gfx.kiq.ring);
+       amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
        amdgpu_ring_fini(&adev->mes.ring);
 
        amdgpu_mes_fini(adev);
 
        mes_v10_1_enable(adev, true);
 
-       mes_v10_1_kiq_setting(&adev->gfx.kiq.ring);
+       mes_v10_1_kiq_setting(&adev->gfx.kiq[0].ring);
 
        r = mes_v10_1_queue_init(adev);
        if (r)
         * MES uses KIQ ring exclusively so driver cannot access KIQ ring
         * with MES enabled.
         */
-       adev->gfx.kiq.ring.sched.ready = false;
+       adev->gfx.kiq[0].ring.sched.ready = false;
        adev->mes.ring.sched.ready = true;
 
        return 0;
 
 
 static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
 {
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
-       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+       struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
        int r;
 
        if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
        int r;
 
        if (pipe == AMDGPU_MES_KIQ_PIPE)
-               ring = &adev->gfx.kiq.ring;
+               ring = &adev->gfx.kiq[0].ring;
        else if (pipe == AMDGPU_MES_SCHED_PIPE)
                ring = &adev->mes.ring;
        else
 {
        struct amdgpu_ring *ring;
 
-       spin_lock_init(&adev->gfx.kiq.ring_lock);
+       spin_lock_init(&adev->gfx.kiq[0].ring_lock);
 
-       ring = &adev->gfx.kiq.ring;
+       ring = &adev->gfx.kiq[0].ring;
 
        ring->me = 3;
        ring->pipe = 1;
        struct amdgpu_ring *ring;
 
        if (pipe == AMDGPU_MES_KIQ_PIPE)
-               ring = &adev->gfx.kiq.ring;
+               ring = &adev->gfx.kiq[0].ring;
        else if (pipe == AMDGPU_MES_SCHED_PIPE)
                ring = &adev->mes.ring;
        else
                amdgpu_ucode_release(&adev->mes.fw[pipe]);
        }
 
-       amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
-                             &adev->gfx.kiq.ring.mqd_gpu_addr,
-                             &adev->gfx.kiq.ring.mqd_ptr);
+       amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
+                             &adev->gfx.kiq[0].ring.mqd_gpu_addr,
+                             &adev->gfx.kiq[0].ring.mqd_ptr);
 
        amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
                              &adev->mes.ring.mqd_gpu_addr,
                              &adev->mes.ring.mqd_ptr);
 
-       amdgpu_ring_fini(&adev->gfx.kiq.ring);
+       amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
        amdgpu_ring_fini(&adev->mes.ring);
 
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
 
        mes_v11_0_enable(adev, true);
 
-       mes_v11_0_kiq_setting(&adev->gfx.kiq.ring);
+       mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
 
        r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
        if (r)
        }
 
        if (amdgpu_sriov_vf(adev)) {
-               mes_v11_0_kiq_dequeue(&adev->gfx.kiq.ring);
+               mes_v11_0_kiq_dequeue(&adev->gfx.kiq[0].ring);
                mes_v11_0_kiq_clear(adev);
        }
 
         * MES uses KIQ ring exclusively so driver cannot access KIQ ring
         * with MES enabled.
         */
-       adev->gfx.kiq.ring.sched.ready = false;
+       adev->gfx.kiq[0].ring.sched.ready = false;
        adev->mes.ring.sched.ready = true;
 
        return 0;