if (dc->debug.disable_idle_power_optimizations)
return;
+ if (dc->caps.ips_support && dc->config.disable_ips)
+ return;
+
if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
return;
dc->idle_optimizations_allowed = allow;
}
+bool dc_dmub_is_ips_idle_state(struct dc *dc)
+{
+ uint32_t idle_state = 0;
+
+ if (dc->debug.disable_idle_power_optimizations)
+ return false;
+
+ if (!dc->caps.ips_support || dc->config.disable_ips)
+ return false;
+
+ if (dc->hwss.get_idle_state)
+ idle_state = dc->hwss.get_idle_state(dc);
+
+ if ((idle_state & DMUB_IPS1_ALLOW_MASK) ||
+ (idle_state & DMUB_IPS2_ALLOW_MASK))
+ return true;
+
+ return false;
+}
+
/* set min and max memory clock to lowest and highest DPM level, respectively */
void dc_unlock_memory_clock_frequency(struct dc *dc)
{
struct dc_cursor_attributes *cursor_attr);
void dc_allow_idle_optimizations(struct dc *dc, bool allow);
+bool dc_dmub_is_ips_idle_state(struct dc *dc);
/* set min and max memory clock to lowest and highest DPM level, respectively */
void dc_unlock_memory_clock_frequency(struct dc *dc);
struct clock_source *old_clk = pipe_ctx_old->clock_source;
/* Reset pipe which is seamless boot stream. */
- if (!pipe_ctx_old->plane_state) {
+ if (!pipe_ctx_old->plane_state &&
+ dc->res_pool->hubbub->funcs->program_det_size &&
+ dc->res_pool->hubbub->funcs->wait_for_det_apply) {
dc->res_pool->hubbub->funcs->program_det_size(
dc->res_pool->hubbub, pipe_ctx_old->plane_res.hubp->inst, 0);
/* Wait det size changed. */
if (dc->clk_mgr->funcs->set_low_power_state)
dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
- if (dc->clk_mgr->clks.pwr_state == DCN_PWR_STATE_LOW_POWER) {
- if (!dc->idle_optimizations_allowed) {
- dc_dmub_srv_notify_idle(dc, true);
- dc->idle_optimizations_allowed = true;
- }
- }
+ if (dc->clk_mgr->clks.pwr_state == DCN_PWR_STATE_LOW_POWER)
+ dc_allow_idle_optimizations(dc, true);
}
bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
struct pg_block_update *update_state, bool power_on);
void (*root_clock_control)(struct dc *dc,
struct pg_block_update *update_state, bool power_on);
+ void (*set_idle_state)(const struct dc *dc, bool allow_idle);
+ uint32_t (*get_idle_state)(const struct dc *dc);
bool (*is_pipe_topology_transition_seamless)(struct dc *dc,
const struct dc_state *cur_ctx,
void (*set_low_power_state)(struct clk_mgr *clk_mgr);
void (*exit_low_power_state)(struct clk_mgr *clk_mgr);
bool (*is_ips_supported)(struct clk_mgr *clk_mgr);
+ void (*set_idle_state)(struct clk_mgr *clk_mgr, bool allow_idle);
+ uint32_t (*get_idle_state)(struct clk_mgr *clk_mgr);
void (*init_clocks)(struct clk_mgr *clk_mgr);
funcs->init_reg_offsets = dmub_srv_dcn35_regs_init;
funcs->is_hw_powered_up = dmub_dcn35_is_hw_powered_up;
+ funcs->should_detect = dmub_dcn35_should_detect;
break;
default: