*/
                        if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
                            acrtc_attach->dm_irq_params.allow_psr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+                           !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
                            !acrtc_state->stream->link->psr_settings.psr_allow_active)
                                amdgpu_dm_psr_enable(acrtc_state->stream);
                } else {
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
                                if (amdgpu_dm_crc_window_is_activated(crtc)) {
                                        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
-                                       acrtc->dm_irq_params.crc_window.update_win = true;
-                                       acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
+                                       acrtc->dm_irq_params.window_param.update_win = true;
+                                       acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
                                        spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
                                        crc_rd_wrk->crtc = crtc;
                                        spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
 
        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 
        spin_lock_irq(&drm_dev->event_lock);
-       acrtc->dm_irq_params.crc_window.x_start = 0;
-       acrtc->dm_irq_params.crc_window.y_start = 0;
-       acrtc->dm_irq_params.crc_window.x_end = 0;
-       acrtc->dm_irq_params.crc_window.y_end = 0;
-       acrtc->dm_irq_params.crc_window.activated = false;
-       acrtc->dm_irq_params.crc_window.update_win = false;
-       acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0;
+       acrtc->dm_irq_params.window_param.roi.x_start = 0;
+       acrtc->dm_irq_params.window_param.roi.y_start = 0;
+       acrtc->dm_irq_params.window_param.roi.x_end = 0;
+       acrtc->dm_irq_params.window_param.roi.y_end = 0;
+       acrtc->dm_irq_params.window_param.activated = false;
+       acrtc->dm_irq_params.window_param.update_win = false;
+       acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
        spin_unlock_irq(&drm_dev->event_lock);
 }
 
        }
 }
 
+static void
+amdgpu_dm_forward_crc_window(struct work_struct *work)
+{
+       struct crc_fw_work *crc_fw_wrk;
+       struct amdgpu_display_manager *dm;
+
+       crc_fw_wrk = container_of(work, struct crc_fw_work, forward_roi_work);
+       dm = crc_fw_wrk->dm;
+
+       mutex_lock(&dm->dc_lock);
+       dc_stream_forward_crc_window(dm->dc, &crc_fw_wrk->roi, crc_fw_wrk->stream, crc_fw_wrk->is_stop_cmd);
+       mutex_unlock(&dm->dc_lock);
+
+       kfree(crc_fw_wrk);
+}
+
 bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
 {
        struct drm_device *drm_dev = crtc->dev;
        bool ret = false;
 
        spin_lock_irq(&drm_dev->event_lock);
-       ret = acrtc->dm_irq_params.crc_window.activated;
+       ret = acrtc->dm_irq_params.window_param.activated;
        spin_unlock_irq(&drm_dev->event_lock);
 
        return ret;
                        if (adev->dm.crc_rd_wrk) {
                                flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
                                spin_lock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock);
+
                                if (adev->dm.crc_rd_wrk->crtc == crtc) {
-                                       dc_stream_stop_dmcu_crc_win_update(stream_state->ctx->dc,
-                                                                       dm_crtc_state->stream);
+                                       /* stop ROI update on this crtc */
+                                       dc_stream_forward_crc_window(stream_state->ctx->dc,
+                                                       NULL, stream_state, true);
                                        adev->dm.crc_rd_wrk->crtc = NULL;
                                }
                                spin_unlock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock);
        enum amdgpu_dm_pipe_crc_source cur_crc_src;
        struct amdgpu_crtc *acrtc = NULL;
        struct amdgpu_device *adev = NULL;
-       struct crc_rd_work *crc_rd_wrk = NULL;
-       struct crc_params *crc_window = NULL, tmp_window;
+       struct crc_rd_work *crc_rd_wrk;
+       struct crc_fw_work *crc_fw_wrk;
        unsigned long flags1, flags2;
-       struct crtc_position position;
-       uint32_t v_blank;
-       uint32_t v_back_porch;
-       uint32_t crc_window_latch_up_line;
-       struct dc_crtc_timing *timing_out;
 
        if (crtc == NULL)
                return;
        spin_lock_irqsave(&drm_dev->event_lock, flags1);
        stream_state = acrtc->dm_irq_params.stream;
        cur_crc_src = acrtc->dm_irq_params.crc_src;
-       timing_out = &stream_state->timing;
 
        /* Early return if CRC capture is not enabled. */
        if (!amdgpu_dm_is_valid_crc_source(cur_crc_src))
                goto cleanup;
 
-       if (dm_is_crc_source_crtc(cur_crc_src)) {
-               if (acrtc->dm_irq_params.crc_window.activated) {
-                       if (acrtc->dm_irq_params.crc_window.update_win) {
-                               if (acrtc->dm_irq_params.crc_window.skip_frame_cnt) {
-                                       acrtc->dm_irq_params.crc_window.skip_frame_cnt -= 1;
-                                       goto cleanup;
-                               }
-                               crc_window = &tmp_window;
-
-                               tmp_window.windowa_x_start =
-                                                       acrtc->dm_irq_params.crc_window.x_start;
-                               tmp_window.windowa_y_start =
-                                                       acrtc->dm_irq_params.crc_window.y_start;
-                               tmp_window.windowa_x_end =
-                                                       acrtc->dm_irq_params.crc_window.x_end;
-                               tmp_window.windowa_y_end =
-                                                       acrtc->dm_irq_params.crc_window.y_end;
-                               tmp_window.windowb_x_start =
-                                                       acrtc->dm_irq_params.crc_window.x_start;
-                               tmp_window.windowb_y_start =
-                                                       acrtc->dm_irq_params.crc_window.y_start;
-                               tmp_window.windowb_x_end =
-                                                       acrtc->dm_irq_params.crc_window.x_end;
-                               tmp_window.windowb_y_end =
-                                                       acrtc->dm_irq_params.crc_window.y_end;
-
-                               dc_stream_forward_dmcu_crc_window(stream_state->ctx->dc,
-                                                                       stream_state, crc_window);
-
-                               acrtc->dm_irq_params.crc_window.update_win = false;
-
-                               dc_stream_get_crtc_position(stream_state->ctx->dc, &stream_state, 1,
-                                       &position.vertical_count,
-                                       &position.nominal_vcount);
-
-                               v_blank = timing_out->v_total - timing_out->v_border_top -
-                                       timing_out->v_addressable - timing_out->v_border_bottom;
-
-                               v_back_porch = v_blank - timing_out->v_front_porch -
-                                       timing_out->v_sync_width;
-
-                               crc_window_latch_up_line = v_back_porch + timing_out->v_sync_width;
-
-                               /* take 3 lines margin*/
-                               if ((position.vertical_count + 3) >= crc_window_latch_up_line)
-                                       acrtc->dm_irq_params.crc_window.skip_frame_cnt = 1;
-                               else
-                                       acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0;
-                       } else {
-                               if (acrtc->dm_irq_params.crc_window.skip_frame_cnt == 0) {
-                                       if (adev->dm.crc_rd_wrk) {
-                                               crc_rd_wrk = adev->dm.crc_rd_wrk;
-                                               spin_lock_irqsave(&crc_rd_wrk->crc_rd_work_lock, flags2);
-                                               crc_rd_wrk->phy_inst =
-                                                       stream_state->link->link_enc_hw_inst;
-                                               spin_unlock_irqrestore(&crc_rd_wrk->crc_rd_work_lock, flags2);
-                                               schedule_work(&crc_rd_wrk->notify_ta_work);
-                                       }
-                               } else {
-                                       acrtc->dm_irq_params.crc_window.skip_frame_cnt -= 1;
-                               }
-                       }
+       if (!dm_is_crc_source_crtc(cur_crc_src))
+               goto cleanup;
+
+       if (!acrtc->dm_irq_params.window_param.activated)
+               goto cleanup;
+
+       if (acrtc->dm_irq_params.window_param.update_win) {
+               if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
+                       acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
+                       goto cleanup;
+               }
+
+               /* prepare work for dmub to update ROI */
+               crc_fw_wrk = kzalloc(sizeof(*crc_fw_wrk), GFP_ATOMIC);
+               if (!crc_fw_wrk)
+                       goto cleanup;
+
+               INIT_WORK(&crc_fw_wrk->forward_roi_work, amdgpu_dm_forward_crc_window);
+               crc_fw_wrk->dm = &adev->dm;
+               crc_fw_wrk->stream = stream_state;
+               crc_fw_wrk->roi.x_start = acrtc->dm_irq_params.window_param.roi.x_start;
+               crc_fw_wrk->roi.y_start = acrtc->dm_irq_params.window_param.roi.y_start;
+               crc_fw_wrk->roi.x_end = acrtc->dm_irq_params.window_param.roi.x_end;
+               crc_fw_wrk->roi.y_end = acrtc->dm_irq_params.window_param.roi.y_end;
+               schedule_work(&crc_fw_wrk->forward_roi_work);
+
+               acrtc->dm_irq_params.window_param.update_win = false;
+               acrtc->dm_irq_params.window_param.skip_frame_cnt = 1;
+
+       } else {
+               if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
+                       acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
+                       goto cleanup;
+               }
+
+               if (adev->dm.crc_rd_wrk) {
+                       crc_rd_wrk = adev->dm.crc_rd_wrk;
+                       spin_lock_irqsave(&crc_rd_wrk->crc_rd_work_lock, flags2);
+                       crc_rd_wrk->phy_inst = stream_state->link->link_enc_hw_inst;
+                       spin_unlock_irqrestore(&crc_rd_wrk->crc_rd_work_lock, flags2);
+                       schedule_work(&crc_rd_wrk->notify_ta_work);
                }
        }
 
 
 };
 
 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
-struct crc_window_parm {
-       uint16_t x_start;
-       uint16_t y_start;
-       uint16_t x_end;
-       uint16_t y_end;
+struct crc_window_param {
+       struct crc_region roi;
        /* CRC windwo is activated or not*/
        bool activated;
        /* Update crc window during vertical blank or not */
        int skip_frame_cnt;
 };
 
+/* read_work for driver to call PSP to read */
 struct crc_rd_work {
        struct work_struct notify_ta_work;
        /* To protect crc_rd_work carried fields*/
        struct drm_crtc *crtc;
        uint8_t phy_inst;
 };
+
+/* forward_work for driver to forward ROI to dmu */
+struct crc_fw_work {
+       struct work_struct forward_roi_work;
+       struct amdgpu_display_manager *dm;
+       struct dc_stream_state *stream;
+       struct crc_region roi;
+       bool is_stop_cmd;
+};
 #endif
 
 static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source source)
 
                                amdgpu_dm_psr_disable(vblank_work->stream);
                } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
                           !vblank_work->stream->link->psr_settings.psr_allow_active &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+                          !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
+#endif
                           vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
                        amdgpu_dm_psr_enable(vblank_work->stream);
                }
 
 #include "link_hwss.h"
 #include "dc/dc_dmub_srv.h"
 
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+#include "amdgpu_dm_psr.h"
+#endif
+
 struct dmub_debugfs_trace_header {
        uint32_t entry_count;
        uint32_t reserved[3];
        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 
        spin_lock_irq(&drm_dev->event_lock);
-       acrtc->dm_irq_params.crc_window.x_start = (uint16_t) val;
-       acrtc->dm_irq_params.crc_window.update_win = false;
+       acrtc->dm_irq_params.window_param.roi.x_start = (uint16_t) val;
+       acrtc->dm_irq_params.window_param.update_win = false;
        spin_unlock_irq(&drm_dev->event_lock);
 
        return 0;
        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 
        spin_lock_irq(&drm_dev->event_lock);
-       *val = acrtc->dm_irq_params.crc_window.x_start;
+       *val = acrtc->dm_irq_params.window_param.roi.x_start;
        spin_unlock_irq(&drm_dev->event_lock);
 
        return 0;
        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 
        spin_lock_irq(&drm_dev->event_lock);
-       acrtc->dm_irq_params.crc_window.y_start = (uint16_t) val;
-       acrtc->dm_irq_params.crc_window.update_win = false;
+       acrtc->dm_irq_params.window_param.roi.y_start = (uint16_t) val;
+       acrtc->dm_irq_params.window_param.update_win = false;
        spin_unlock_irq(&drm_dev->event_lock);
 
        return 0;
        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 
        spin_lock_irq(&drm_dev->event_lock);
-       *val = acrtc->dm_irq_params.crc_window.y_start;
+       *val = acrtc->dm_irq_params.window_param.roi.y_start;
        spin_unlock_irq(&drm_dev->event_lock);
 
        return 0;
        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 
        spin_lock_irq(&drm_dev->event_lock);
-       acrtc->dm_irq_params.crc_window.x_end = (uint16_t) val;
-       acrtc->dm_irq_params.crc_window.update_win = false;
+       acrtc->dm_irq_params.window_param.roi.x_end = (uint16_t) val;
+       acrtc->dm_irq_params.window_param.update_win = false;
        spin_unlock_irq(&drm_dev->event_lock);
 
        return 0;
        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 
        spin_lock_irq(&drm_dev->event_lock);
-       *val = acrtc->dm_irq_params.crc_window.x_end;
+       *val = acrtc->dm_irq_params.window_param.roi.x_end;
        spin_unlock_irq(&drm_dev->event_lock);
 
        return 0;
        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 
        spin_lock_irq(&drm_dev->event_lock);
-       acrtc->dm_irq_params.crc_window.y_end = (uint16_t) val;
-       acrtc->dm_irq_params.crc_window.update_win = false;
+       acrtc->dm_irq_params.window_param.roi.y_end = (uint16_t) val;
+       acrtc->dm_irq_params.window_param.update_win = false;
        spin_unlock_irq(&drm_dev->event_lock);
 
        return 0;
        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 
        spin_lock_irq(&drm_dev->event_lock);
-       *val = acrtc->dm_irq_params.crc_window.y_end;
+       *val = acrtc->dm_irq_params.window_param.roi.y_end;
        spin_unlock_irq(&drm_dev->event_lock);
 
        return 0;
                return 0;
 
        if (val) {
+               new_acrtc = to_amdgpu_crtc(new_crtc);
+               mutex_lock(&adev->dm.dc_lock);
+               /* PSR may write to OTG CRC window control register,
+                * so close it before starting secure_display.
+                */
+               amdgpu_dm_psr_disable(new_acrtc->dm_irq_params.stream);
+
                spin_lock_irq(&adev_to_drm(adev)->event_lock);
                spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
                if (crc_rd_wrk->crtc) {
                        old_crtc = crc_rd_wrk->crtc;
                        old_acrtc = to_amdgpu_crtc(old_crtc);
                }
-               new_acrtc = to_amdgpu_crtc(new_crtc);
 
                if (old_crtc && old_crtc != new_crtc) {
-                       old_acrtc->dm_irq_params.crc_window.activated = false;
-                       old_acrtc->dm_irq_params.crc_window.update_win = false;
-                       old_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0;
+                       old_acrtc->dm_irq_params.window_param.activated = false;
+                       old_acrtc->dm_irq_params.window_param.update_win = false;
+                       old_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
 
-                       new_acrtc->dm_irq_params.crc_window.activated = true;
-                       new_acrtc->dm_irq_params.crc_window.update_win = true;
-                       new_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0;
+                       new_acrtc->dm_irq_params.window_param.activated = true;
+                       new_acrtc->dm_irq_params.window_param.update_win = true;
+                       new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
                        crc_rd_wrk->crtc = new_crtc;
                } else {
-                       new_acrtc->dm_irq_params.crc_window.activated = true;
-                       new_acrtc->dm_irq_params.crc_window.update_win = true;
-                       new_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0;
+                       new_acrtc->dm_irq_params.window_param.activated = true;
+                       new_acrtc->dm_irq_params.window_param.update_win = true;
+                       new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
                        crc_rd_wrk->crtc = new_crtc;
                }
                spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
                spin_unlock_irq(&adev_to_drm(adev)->event_lock);
+               mutex_unlock(&adev->dm.dc_lock);
        }
 
        return 0;
 
 #ifdef CONFIG_DEBUG_FS
        enum amdgpu_dm_pipe_crc_source crc_src;
 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
-       struct crc_window_parm crc_window;
+       struct crc_window_param window_param;
 #endif
 #endif
 };
 
 }
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
-                            struct crc_params *crc_window)
+static inline void
+dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
+               struct crc_region *roi, struct otg_phy_mux *mux_mapping, bool is_stop)
 {
-       int i;
-       struct dmcu *dmcu = dc->res_pool->dmcu;
-       struct pipe_ctx *pipe;
-       struct crc_region tmp_win, *crc_win;
-       struct otg_phy_mux mapping_tmp, *mux_mapping;
-
-       /*crc window can't be null*/
-       if (!crc_window)
-               return false;
-
-       if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
-               crc_win = &tmp_win;
-               mux_mapping = &mapping_tmp;
-               /*set crc window*/
-               tmp_win.x_start = crc_window->windowa_x_start;
-               tmp_win.y_start = crc_window->windowa_y_start;
-               tmp_win.x_end = crc_window->windowa_x_end;
-               tmp_win.y_end = crc_window->windowa_y_end;
-
-               for (i = 0; i < MAX_PIPES; i++) {
-                       pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-                       if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
-                               break;
-               }
-
-               /* Stream not found */
-               if (i == MAX_PIPES)
-                       return false;
-
+       union dmub_rb_cmd cmd = {0};
 
-               /*set mux routing info*/
-               mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
-               mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
+       cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num;
+       cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num;
 
-               dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
+       if (is_stop) {
+               cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
+               cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;
        } else {
-               DC_LOG_DC("dmcu is not initialized");
-               return false;
+               cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
+               cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY;
+               cmd.secure_display.roi_info.x_start = roi->x_start;
+               cmd.secure_display.roi_info.y_start = roi->y_start;
+               cmd.secure_display.roi_info.x_end = roi->x_end;
+               cmd.secure_display.roi_info.y_end = roi->y_end;
        }
 
-       return true;
+       dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dmub_srv);
 }
 
-bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
+static inline void
+dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
+               struct crc_region *roi, struct otg_phy_mux *mux_mapping, bool is_stop)
 {
-       int i;
-       struct dmcu *dmcu = dc->res_pool->dmcu;
-       struct pipe_ctx *pipe;
-       struct otg_phy_mux mapping_tmp, *mux_mapping;
+       if (is_stop)
+               dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
+       else
+               dmcu->funcs->forward_crc_window(dmcu, roi, mux_mapping);
+}
 
-       if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
-               mux_mapping = &mapping_tmp;
+bool
+dc_stream_forward_crc_window(struct dc *dc,
+               struct crc_region *roi, struct dc_stream_state *stream, bool is_stop)
+{
+       struct dmcu *dmcu;
+       struct dc_dmub_srv *dmub_srv;
+       struct otg_phy_mux mux_mapping;
+       struct pipe_ctx *pipe;
+       int i;
 
-               for (i = 0; i < MAX_PIPES; i++) {
-                       pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-                       if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
-                               break;
-               }
+       for (i = 0; i < MAX_PIPES; i++) {
+               pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+               if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
+                       break;
+       }
 
-               /* Stream not found */
-               if (i == MAX_PIPES)
-                       return false;
+       /* Stream not found */
+       if (i == MAX_PIPES)
+               return false;
 
+       mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
+       mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
 
-               /*set mux routing info*/
-               mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
-               mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
+       dmcu = dc->res_pool->dmcu;
+       dmub_srv = dc->ctx->dmub_srv;
 
-               dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
-       } else {
-               DC_LOG_DC("dmcu is not initialized");
+       /* forward to dmub */
+       if (dmub_srv)
+               dc_stream_forward_dmub_crc_window(dmub_srv, roi, &mux_mapping, is_stop);
+       /* forward to dmcu */
+       else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
+               dc_stream_forward_dmcu_crc_window(dmcu, roi, &mux_mapping, is_stop);
+       else
                return false;
-       }
 
        return true;
 }
-#endif
+#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
 
 /**
  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
 
                                 unsigned int *nom_v_pos);
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
-                            struct crc_params *crc_window);
-bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc,
-                                struct dc_stream_state *stream);
+bool dc_stream_forward_crc_window(struct dc *dc,
+               struct crc_region *roi,
+               struct dc_stream_state *stream,
+               bool is_stop);
 #endif
 
 bool dc_stream_configure_crc(struct dc *dc,
 
        enum display_endpoint_type ep_type;
 };
 
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+struct crc_region {
+       uint16_t x_start;
+       uint16_t y_start;
+       uint16_t x_end;
+       uint16_t y_end;
+};
+
+struct otg_phy_mux {
+       uint8_t phy_output_num;
+       uint8_t otg_output_num;
+};
+#endif
+
 #endif /* DC_TYPES_H_ */
 
        bool auto_load_dmcu;
 };
 
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-struct crc_region {
-       uint16_t x_start;
-       uint16_t y_start;
-       uint16_t x_end;
-       uint16_t y_end;
-};
-
-struct otg_phy_mux {
-       uint8_t phy_output_num;
-       uint8_t otg_output_num;
-};
-#endif
-
 struct dmcu_funcs {
        bool (*dmcu_init)(struct dmcu *dmcu);
        bool (*load_iram)(struct dmcu *dmcu,
 
        /**
         * Command type used for all VBIOS interface commands.
         */
+       /**
+        * Command type used for all SECURE_DISPLAY commands.
+        */
+       DMUB_CMD__SECURE_DISPLAY = 85,
 
        /**
         * Command type used to set DPIA HPD interrupt state
        } data;
 };
 
+enum dmub_cmd_secure_display_type {
+       DMUB_CMD__SECURE_DISPLAY_TEST_CMD = 0,
+       DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE,
+       DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY
+};
+
+struct dmub_rb_cmd_secure_display {
+       struct dmub_cmd_header header;
+       struct dmub_cmd_roi_info {
+               uint16_t x_start;
+               uint16_t x_end;
+               uint16_t y_start;
+               uint16_t y_end;
+               uint8_t otg_id;
+               uint8_t phy_id;
+       } roi_info;
+};
+
 /**
  * union dmub_rb_cmd - DMUB inbox command.
  */
         * Definition of a DMUB_CMD__QUERY_HPD_STATE command.
         */
        struct dmub_rb_cmd_query_hpd_state query_hpd;
+       /**
+        * Definition of a DMUB_CMD__SECURE_DISPLAY command.
+        */
+       struct dmub_rb_cmd_secure_display secure_display;
        /**
         * Definition of a DMUB_CMD__DPIA_HPD_INT_ENABLE command.
         */