* NB: The GuC API only supports 32bit values. However, the limit is further
* reduced due to internal calculations which would otherwise overflow.
*/
- if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
+ if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt)))
value = min_t(u64, value, guc_policy_max_preempt_timeout_ms());
value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
* NB: The GuC API only supports 32bit values. However, the limit is further
* reduced due to internal calculations which would otherwise overflow.
*/
- if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
+ if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt)))
value = min_t(u64, value, guc_policy_max_exec_quantum_ms());
value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
struct intel_uncore *uncore = gt->uncore;
intel_wakeref_t wakeref;
- with_intel_runtime_pm_if_active(uncore->rpm, wakeref) {
- struct intel_guc *guc = >->uc.guc;
-
- intel_guc_invalidate_tlb_guc(guc);
- }
+ with_intel_runtime_pm_if_active(uncore->rpm, wakeref)
+ intel_guc_invalidate_tlb_guc(gt_to_guc(gt));
}
static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
gen8_ggtt_invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
- if (intel_guc_tlb_invalidation_is_available(>->uc.guc))
+ if (intel_guc_tlb_invalidation_is_available(gt_to_guc(gt)))
guc_ggtt_ct_invalidate(gt);
else if (GRAPHICS_VER(i915) >= 12)
intel_uncore_write_fw(gt->uncore,
return guc_to_gt(guc)->i915;
}
+static inline struct intel_guc *gt_to_guc(struct intel_gt *gt)
+{
+ return >->uc.guc;
+}
+
void intel_gt_common_init_early(struct intel_gt *gt);
int intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
struct intel_gt *media_gt = gt->i915->media_gt;
if (instance == OTHER_GUC_INSTANCE)
- return guc_irq_handler(>->uc.guc, iir);
+ return guc_irq_handler(gt_to_guc(gt), iir);
if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt)
- return guc_irq_handler(&media_gt->uc.guc, iir);
+ return guc_irq_handler(gt_to_guc(media_gt), iir);
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(>->rps, iir);
iir = raw_reg_read(regs, GEN8_GT_IIR(2));
if (likely(iir)) {
gen6_rps_irq_handler(>->rps, iir);
- guc_irq_handler(>->uc.guc, iir >> 16);
+ guc_irq_handler(gt_to_guc(gt), iir >> 16);
raw_reg_write(regs, GEN8_GT_IIR(2), iir);
}
}
{
struct intel_gt *gt = data;
- if (intel_guc_slpc_is_used(>->uc.guc))
+ if (intel_guc_slpc_is_used(gt_to_guc(gt)))
return false;
else
return HAS_RPS(gt->i915);
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = >->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc;
return sysfs_emit(buff, "%u\n", slpc->ignore_eff_freq);
}
const char *buff, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = >->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc;
int err;
u32 val;
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = >->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc;
intel_wakeref_t wakeref;
u32 mode;
const char *buff, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = >->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc;
u32 factor, mode;
int err;
* thus allowing GuC to control RC6 entry/exit fully instead.
* We will not set the HW ENABLE and EI bits
*/
- if (!intel_guc_rc_enable(>->uc.guc))
+ if (!intel_guc_rc_enable(gt_to_guc(gt)))
rc6->ctl_enable = GEN6_RC_CTL_RC6_ENABLE;
else
rc6->ctl_enable =
struct intel_gt *gt = rc6_to_gt(rc6);
/* Take control of RC6 back from GuC */
- intel_guc_rc_disable(>->uc.guc);
+ intel_guc_rc_disable(gt_to_guc(gt));
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
if (GRAPHICS_VER(i915) >= 9)
{
struct intel_gt *gt = rps_to_gt(rps);
- return >->uc.guc.slpc;
+ return >_to_guc(gt)->slpc;
}
static bool rps_uses_slpc(struct intel_rps *rps)
return;
with_intel_gt_pm_if_awake(gt, wakeref) {
- struct intel_guc *guc = >->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
mutex_lock(>->tlb.invalidate_lock);
if (tlb_seqno_passed(gt, seqno))
static int slpc_set_freq(struct intel_gt *gt, u32 freq)
{
int err;
- struct intel_guc_slpc *slpc = >->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc;
err = slpc_set_max_freq(slpc, freq);
if (err) {
static int slpc_power(struct intel_gt *gt, struct intel_engine_cs *engine)
{
- struct intel_guc_slpc *slpc = >->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc;
struct {
u64 power;
int freq;
static int run_test(struct intel_gt *gt, int test_type)
{
- struct intel_guc_slpc *slpc = >->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc;
struct intel_rps *rps = >->rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;