}
}
+static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr);
+
+ if (dc->config.disable_ips == 0) {
+ val |= DMUB_IPS1_ALLOW_MASK;
+ val |= DMUB_IPS2_ALLOW_MASK;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
+ val = val & ~DMUB_IPS1_ALLOW_MASK;
+ val = val & ~DMUB_IPS2_ALLOW_MASK;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
+ val |= DMUB_IPS1_ALLOW_MASK;
+ val = val & ~DMUB_IPS2_ALLOW_MASK;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
+ val |= DMUB_IPS1_ALLOW_MASK;
+ val |= DMUB_IPS2_ALLOW_MASK;
+ }
+
+ if (!allow_idle) {
+ val = val & ~DMUB_IPS1_ALLOW_MASK;
+ val = val & ~DMUB_IPS2_ALLOW_MASK;
+ }
+
+ dcn35_smu_write_ips_scratch(clk_mgr, val);
+}
+
static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
return ips_supported;
}
+static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return dcn35_smu_read_ips_scratch(clk_mgr);
+}
+
static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr)
{
dcn35_init_clocks(clk_mgr);
.set_low_power_state = dcn35_set_low_power_state,
.exit_low_power_state = dcn35_exit_low_power_state,
.is_ips_supported = dcn35_is_ips_supported,
+ .set_idle_state = dcn35_set_idle_state,
+ .get_idle_state = dcn35_get_idle_state
};
struct clk_mgr_funcs dcn35_fpga_funcs = {
enable);
}
-void dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
+int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
{
- dcn35_smu_send_msg_with_param(
+ return dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_DispPsrExit,
0);
VBIOSSMC_MSG_QueryIPS2Support,
0);
}
+
+void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)
+{
+ REG_WRITE(MP1_SMN_C2PMSG_71, param);
+}
+
+uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)
+{
+ return REG_READ(MP1_SMN_C2PMSG_71);
+}
void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable);
void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);
-void dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr);
+int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr);
+void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param);
+uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_35_SMU_H_ */
bool replay_skip_crtc_disabled;
bool ignore_pg;/*do nothing, let pmfw control it*/
bool psp_disabled_wa;
+ unsigned int ips2_eval_delay_us;
+ unsigned int ips2_entry_delay_us;
};
struct gpu_info_soc_bounding_box_v1_0;
cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ if (allow_idle) {
+ if (dc->hwss.set_idle_state)
+ dc->hwss.set_idle_state(dc, true);
+ }
- if (allow_idle)
- udelay(500);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
+ const uint32_t max_num_polls = 10000;
+ uint32_t allow_state = 0;
+ uint32_t commit_state = 0;
+ uint32_t i;
+
if (dc->debug.dmcub_emulation)
return;
if (!dc->idle_optimizations_allowed)
return;
- // Tell PMFW to exit low power state
- if (dc->clk_mgr->funcs->exit_low_power_state)
- dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ if (dc->hwss.get_idle_state &&
+ dc->hwss.set_idle_state &&
+ dc->clk_mgr->funcs->exit_low_power_state) {
+
+ allow_state = dc->hwss.get_idle_state(dc);
+ dc->hwss.set_idle_state(dc, false);
+
+ if (allow_state & DMUB_IPS2_ALLOW_MASK) {
+ // Wait for evaluation time
+ udelay(dc->debug.ips2_eval_delay_us);
+ commit_state = dc->hwss.get_idle_state(dc);
+ if (commit_state & DMUB_IPS2_COMMIT_MASK) {
+ // Tell PMFW to exit low power state
+ dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+
+ // Wait for IPS2 entry upper bound
+ udelay(dc->debug.ips2_entry_delay_us);
+ dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+
+ for (i = 0; i < max_num_polls; ++i) {
+ commit_state = dc->hwss.get_idle_state(dc);
+ if (!(commit_state & DMUB_IPS2_COMMIT_MASK))
+ break;
+
+ udelay(1);
+ }
+ ASSERT(i < max_num_polls);
+
+ if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
+ ASSERT(0);
+
+ /* TODO: See if we can return early here - IPS2 should go
+ * back directly to IPS0 and clear the flags, but it will
+ * be safer to directly notify DMCUB of this.
+ */
+ allow_state = dc->hwss.get_idle_state(dc);
+ }
+ }
- // Wait for dmcub to load up
- dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true);
+ dc_dmub_srv_notify_idle(dc, false);
+ if (allow_state & DMUB_IPS1_ALLOW_MASK) {
+ for (i = 0; i < max_num_polls; ++i) {
+ commit_state = dc->hwss.get_idle_state(dc);
+ if (!(commit_state & DMUB_IPS1_COMMIT_MASK))
+ break;
- // Notify dmcub disallow idle
- dc_dmub_srv_notify_idle(dc, false);
+ udelay(1);
+ }
+ ASSERT(i < max_num_polls);
+ }
+ }
- // Confirm dmu is powered up
- dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true);
+ if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
+ ASSERT(0);
}
.calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
.block_power_control = dcn35_block_power_control,
.root_clock_control = dcn35_root_clock_control,
+ .set_idle_state = dcn35_set_idle_state,
+ .get_idle_state = dcn35_get_idle_state
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
.disable_z10 = false,
.ignore_pg = true,
.psp_disabled_wa = true,
+ .ips2_eval_delay_us = 200,
+ .ips2_entry_delay_us = 400
};
static const struct dc_panel_config panel_config_defaults = {
// TODO: review other cases when idle optimization is allowed
- if (!enable) {
- // Tell PMFW to exit low power state
- if (dc->clk_mgr->funcs->exit_low_power_state)
- dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
-
- dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true);
- }
-
- dc_dmub_srv_notify_idle(dc, enable);
-
if (!enable)
- dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true);
+ dc_dmub_srv_exit_low_power_state(dc);
+ else
+ dc_dmub_srv_notify_idle(dc, enable);
return true;
}
dc->hwss.root_clock_control(dc, &pg_update_state, false);
}
}
+
+void dcn35_set_idle_state(const struct dc *dc, bool allow_idle)
+{
+ // TODO: Find a more suitable communcation
+ if (dc->clk_mgr->funcs->set_idle_state)
+ dc->clk_mgr->funcs->set_idle_state(dc->clk_mgr, allow_idle);
+}
+
+uint32_t dcn35_get_idle_state(const struct dc *dc)
+{
+ // TODO: Find a more suitable communcation
+ if (dc->clk_mgr->funcs->get_idle_state)
+ return dc->clk_mgr->funcs->get_idle_state(dc->clk_mgr);
+
+ return 0;
+}
struct dce_hwseq *hws,
unsigned int dsc_inst,
bool power_on);
+
+void dcn35_set_idle_state(const struct dc *dc, bool allow_idle);
+uint32_t dcn35_get_idle_state(const struct dc *dc);
#endif /* __DC_HWSS_DCN35_H__ */
struct pg_block_update *update_state, bool power_on);
void (*set_idle_state)(const struct dc *dc, bool allow_idle);
uint32_t (*get_idle_state)(const struct dc *dc);
-
bool (*is_pipe_topology_transition_seamless)(struct dc *dc,
const struct dc_state *cur_ctx,
const struct dc_state *new_ctx);