struct xe_device *xe = test->priv;
struct xe_reg_sr *reg_sr = &xe->gt[0].reg_sr;
const struct xe_reg_sr_entry *sre, *sr_entry = NULL;
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(&xe->gt[0]);
unsigned long idx, count = 0;
xe_reg_sr_init(reg_sr, "xe_rtp_tests", xe);
- xe_rtp_process(param->entries, reg_sr, &xe->gt[0], NULL);
+ xe_rtp_process(&ctx, param->entries, reg_sr);
xa_for_each(®_sr->xa, idx, sre) {
if (idx == param->expected_reg.addr)
const u8 mocs_read_idx = gt->mocs.uc_index;
u32 blit_cctl_val = REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, mocs_write_idx) |
REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, mocs_read_idx);
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
const struct xe_rtp_entry lrc_was[] = {
/*
* Some blitter commands do not have a field for MOCS, those
{}
};
- xe_rtp_process(lrc_was, &hwe->reg_lrc, gt, hwe);
+ xe_rtp_process(&ctx, lrc_was, &hwe->reg_lrc);
}
static void
const u8 mocs_read_idx = gt->mocs.uc_index;
u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, mocs_read_idx);
- const struct xe_rtp_entry engine_was[] = {
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
+ const struct xe_rtp_entry engine_entries[] = {
/*
* RING_CMD_CCTL specifies the default MOCS entry that will be
* used by the command streamer when executing commands that
{}
};
- xe_rtp_process(engine_was, &hwe->reg_sr, gt, hwe);
+ xe_rtp_process(&ctx, engine_entries, &hwe->reg_sr);
}
static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
*/
void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe)
{
- xe_rtp_process(register_whitelist, &hwe->reg_whitelist, hwe->gt, hwe);
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
+
+ xe_rtp_process(&ctx, register_whitelist, &hwe->reg_whitelist);
}
/**
* the values to the registers that have matching rules.
*/
-static bool rule_matches(struct xe_gt *gt,
+static bool rule_matches(const struct xe_device *xe,
+ struct xe_gt *gt,
struct xe_hw_engine *hwe,
const struct xe_rtp_entry *entry)
{
- const struct xe_device *xe = gt_to_xe(gt);
const struct xe_rtp_rule *r;
unsigned int i;
bool match;
match = xe->info.step.graphics >= r->step_start &&
xe->info.step.graphics < r->step_end;
break;
+ case XE_RTP_MATCH_INTEGRATED:
+ match = !xe->info.is_dgfx;
+ break;
+ case XE_RTP_MATCH_DISCRETE:
+ match = xe->info.is_dgfx;
+ break;
case XE_RTP_MATCH_ENGINE_CLASS:
+ if (drm_WARN_ON(&xe->drm, !hwe))
+ return false;
+
match = hwe->class == r->engine_class;
break;
case XE_RTP_MATCH_NOT_ENGINE_CLASS:
+ if (drm_WARN_ON(&xe->drm, !hwe))
+ return false;
+
match = hwe->class != r->engine_class;
break;
case XE_RTP_MATCH_FUNC:
match = r->match_func(gt, hwe);
break;
- case XE_RTP_MATCH_INTEGRATED:
- match = !xe->info.is_dgfx;
- break;
- case XE_RTP_MATCH_DISCRETE:
- match = xe->info.is_dgfx;
- break;
-
default:
XE_WARN_ON(r->match_type);
}
xe_reg_sr_add(sr, &sr_entry);
}
-static void rtp_process_one(const struct xe_rtp_entry *entry, struct xe_gt *gt,
+static void rtp_process_one(const struct xe_rtp_entry *entry,
+ struct xe_device *xe, struct xe_gt *gt,
struct xe_hw_engine *hwe, struct xe_reg_sr *sr)
{
const struct xe_rtp_action *action;
u32 mmio_base;
unsigned int i;
- if (!rule_matches(gt, hwe, entry))
+ if (!rule_matches(xe, gt, hwe, entry))
return;
for (action = &entry->actions[0]; i < entry->n_actions; action++, i++) {
}
}
+static void rtp_get_context(struct xe_rtp_process_ctx *ctx,
+ struct xe_hw_engine **hwe,
+ struct xe_gt **gt,
+ struct xe_device **xe)
+{
+ switch (ctx->type) {
+ case XE_RTP_PROCESS_TYPE_GT:
+ *hwe = NULL;
+ *gt = ctx->gt;
+ *xe = gt_to_xe(*gt);
+ break;
+ case XE_RTP_PROCESS_TYPE_ENGINE:
+ *hwe = ctx->hwe;
+ *gt = (*hwe)->gt;
+ *xe = gt_to_xe(*gt);
+ break;
+ };
+}
+
/**
* xe_rtp_process - Process all rtp @entries, adding the matching ones to @sr
+ * @ctx: The context for processing the table, with one of device, gt or hwe
* @entries: Table with RTP definitions
* @sr: Where to add an entry to with the values for matching. This can be
* viewed as the "coalesced view" of multiple the tables. The bits for each
* register set are expected not to collide with previously added entries
- * @gt: The GT to be used for matching rules
- * @hwe: Engine instance to use for matching rules and as mmio base
*
* Walk the table pointed by @entries (with an empty sentinel) and add all
* entries with matching rules to @sr. If @hwe is not NULL, its mmio_base is
* used to calculate the right register offset
*/
-void xe_rtp_process(const struct xe_rtp_entry *entries, struct xe_reg_sr *sr,
- struct xe_gt *gt, struct xe_hw_engine *hwe)
+void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
+ const struct xe_rtp_entry *entries, struct xe_reg_sr *sr)
{
const struct xe_rtp_entry *entry;
+ struct xe_hw_engine *hwe = NULL;
+ struct xe_gt *gt = NULL;
+ struct xe_device *xe = NULL;
+
+ rtp_get_context(ctx, &hwe, >, &xe);
for (entry = entries; entry && entry->name; entry++) {
if (entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) {
enum xe_hw_engine_id id;
for_each_hw_engine(each_hwe, gt, id)
- rtp_process_one(entry, gt, each_hwe, sr);
+ rtp_process_one(entry, xe, gt, each_hwe, sr);
} else {
- rtp_process_one(entry, gt, hwe, sr);
+ rtp_process_one(entry, xe, gt, hwe, sr);
}
}
}
XE_RTP_PASTE_FOREACH(ACTION_, COMMA, (__VA_ARGS__)) \
}
-void xe_rtp_process(const struct xe_rtp_entry *entries, struct xe_reg_sr *sr,
- struct xe_gt *gt, struct xe_hw_engine *hwe);
+#define XE_RTP_PROCESS_CTX_INITIALIZER(arg__) _Generic((arg__), \
+ struct xe_hw_engine *: (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_ENGINE }, \
+ struct xe_gt *: (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_GT })
+
+void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
+ const struct xe_rtp_entry *entries,
+ struct xe_reg_sr *sr);
/* Match functions to be used with XE_RTP_MATCH_FUNC */
u8 flags;
};
+enum xe_rtp_process_type {
+ XE_RTP_PROCESS_TYPE_GT,
+ XE_RTP_PROCESS_TYPE_ENGINE,
+};
+
+struct xe_rtp_process_ctx {
+ union {
+ struct xe_gt *gt;
+ struct xe_hw_engine *hwe;
+ };
+ enum xe_rtp_process_type type;
+};
+
#endif
void xe_tuning_process_gt(struct xe_gt *gt)
{
- xe_rtp_process(gt_tunings, >->reg_sr, gt, NULL);
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
+
+ xe_rtp_process(&ctx, gt_tunings, >->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_gt);
*/
void xe_tuning_process_lrc(struct xe_hw_engine *hwe)
{
- xe_rtp_process(lrc_tunings, &hwe->reg_lrc, hwe->gt, hwe);
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
+
+ xe_rtp_process(&ctx, lrc_tunings, &hwe->reg_lrc);
}
*/
void xe_wa_process_gt(struct xe_gt *gt)
{
- xe_rtp_process(gt_was, >->reg_sr, gt, NULL);
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
+
+ xe_rtp_process(&ctx, gt_was, >->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_wa_process_gt);
*/
void xe_wa_process_engine(struct xe_hw_engine *hwe)
{
- xe_rtp_process(engine_was, &hwe->reg_sr, hwe->gt, hwe);
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
+
+ xe_rtp_process(&ctx, engine_was, &hwe->reg_sr);
}
/**
*/
void xe_wa_process_lrc(struct xe_hw_engine *hwe)
{
- xe_rtp_process(lrc_was, &hwe->reg_lrc, hwe->gt, hwe);
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
+
+ xe_rtp_process(&ctx, lrc_was, &hwe->reg_lrc);
}