return (src_w != dst_w || src_h != dst_h);
}
+static bool intel_plane_do_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ if (!plane->async_flip)
+ return false;
+
+ if (!new_crtc_state->uapi.async_flip)
+ return false;
+
+ /*
+ * In platforms after DISPLAY13, we might need to override
+ * first async flip in order to change watermark levels
+ * as part of optimization.
+ * So for those, we are checking if this is a first async flip.
+ * For platforms earlier than DISPLAY13 we always do async flip.
+ */
+ return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip;
+}
+
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
needs_scaling(new_plane_state))))
new_crtc_state->disable_lp_wm = true;
- if (new_crtc_state->uapi.async_flip && plane->async_flip)
+ if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
new_plane_state->do_async_flip = true;
return 0;
return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
}
+static bool
+use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ return DISPLAY_VER(i915) >= 13 &&
+ crtc_state->uapi.async_flip &&
+ plane->async_flip;
+}
+
static u64
skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
uint_fixed_16_16_t selected_result;
u32 blocks, lines, min_ddb_alloc = 0;
- if (latency == 0) {
+ if (latency == 0 ||
+ (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
/* reject it */
result->min_ddb_alloc = U16_MAX;
return;