return r;
                        }
                }
+
+               r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)),
+                               PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].fw_shared_bo,
+                               &adev->vcn.inst[i].fw_shared_gpu_addr, &adev->vcn.inst[i].fw_shared_cpu_addr);
+               if (r) {
+                       dev_err(adev->dev, "VCN %d (%d) failed to allocate fimware shared bo\n", i, r);
+                       return r;
+               }
        }
 
        return 0;
        for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
                if (adev->vcn.harvest_config & (1 << j))
                        continue;
+
+               amdgpu_bo_free_kernel(&adev->vcn.inst[j].fw_shared_bo,
+                                         &adev->vcn.inst[j].fw_shared_gpu_addr,
+                                         (void **)&adev->vcn.inst[j].fw_shared_cpu_addr);
+
                if (adev->vcn.indirect_sram) {
                        amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
                                                  &adev->vcn.inst[j].dpg_sram_gpu_addr,
 
                }                                                                               \
        } while (0)
 
+#define AMDGPU_VCN_MULTI_QUEUE_FLAG    (1 << 8)
+
+enum fw_queue_mode {
+       FW_QUEUE_RING_RESET = 1,
+       FW_QUEUE_DPG_HOLD_OFF = 2,
+};
+
 enum engine_status_constants {
        UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0,
        UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0,
        struct amdgpu_irq_src   irq;
        struct amdgpu_vcn_reg   external;
        struct amdgpu_bo        *dpg_sram_bo;
+       struct amdgpu_bo        *fw_shared_bo;
        struct dpg_pause_state  pause_state;
        void                    *dpg_sram_cpu_addr;
        uint64_t                dpg_sram_gpu_addr;
        uint32_t                *dpg_sram_curr_addr;
        atomic_t                dpg_enc_submission_cnt;
+       void                    *fw_shared_cpu_addr;
+       uint64_t                fw_shared_gpu_addr;
 };
 
 struct amdgpu_vcn {
                int inst_idx, struct dpg_pause_state *new_state);
 };
 
+struct amdgpu_fw_shared_multi_queue {
+       uint8_t decode_queue_mode;
+       uint8_t encode_generalpurpose_queue_mode;
+       uint8_t encode_lowlatency_queue_mode;
+       uint8_t encode_realtime_queue_mode;
+       uint8_t padding[4];
+};
+
+struct amdgpu_fw_shared {
+       uint32_t present_flag_0;
+       uint8_t pad[53];
+       struct amdgpu_fw_shared_multi_queue multi_queue;
+} __attribute__((__packed__));
+
 int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
 int amdgpu_vcn_suspend(struct amdgpu_device *adev);