#define for_each_tile(tile__, xe__, id__) \
for ((id__) = 0; (id__) < (xe__)->info.tile_count; (id__)++) \
- for_each_if ((tile__) = &(xe__)->tiles[(id__)])
+ for_each_if((tile__) = &(xe__)->tiles[(id__)])
/*
* FIXME: This only works for now since multi-tile and standalone media
*/
#define for_each_gt(gt__, xe__, id__) \
for ((id__) = 0; (id__) < (xe__)->info.gt_count; (id__)++) \
- for_each_if ((gt__) = xe_device_get_gt((xe__), (id__)))
+ for_each_if((gt__) = xe_device_get_gt((xe__), (id__)))
static inline struct xe_force_wake * gt_to_fw(struct xe_gt *gt)
{
#define tile_to_xe(tile__) \
_Generic(tile__, \
- const struct xe_tile *: (const struct xe_device *)((tile__)->xe), \
- struct xe_tile *: (tile__)->xe)
+ const struct xe_tile * : (const struct xe_device *)((tile__)->xe), \
+ struct xe_tile * : (tile__)->xe)
/**
* struct xe_tile - hardware tile structure
BIT(0), BIT(16));
}
- for (i = XE_HW_ENGINE_VECS0, j =0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
+ for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
if (!(gt->info.engine_mask & BIT(i)))
continue;
#define for_each_hw_engine(hwe__, gt__, id__) \
for ((id__) = 0; (id__) < ARRAY_SIZE((gt__)->hw_engines); (id__)++) \
- for_each_if (((hwe__) = (gt__)->hw_engines + (id__)) && \
+ for_each_if(((hwe__) = (gt__)->hw_engines + (id__)) && \
xe_hw_engine_is_valid((hwe__)))
struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
rpm_config_reg);
switch (crystal_clock) {
- case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ :
+ case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
return f24_mhz;
- case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ :
+ case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
return f19_2_mhz;
- case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ :
+ case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
return f38_4_mhz;
- case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ :
+ case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
return f25_mhz;
default:
XE_BUG_ON("NOT_POSSIBLE");
#define gt_to_tile(gt__) \
_Generic(gt__, \
- const struct xe_gt *: (const struct xe_tile *)((gt__)->tile), \
- struct xe_gt *: (gt__)->tile)
+ const struct xe_gt * : (const struct xe_tile *)((gt__)->tile), \
+ struct xe_gt * : (gt__)->tile)
#define gt_to_xe(gt__) \
_Generic(gt__, \
- const struct xe_gt *: (const struct xe_device *)(gt_to_tile(gt__)->xe), \
- struct xe_gt *: gt_to_tile(gt__)->xe)
+ const struct xe_gt * : (const struct xe_device *)(gt_to_tile(gt__)->xe), \
+ struct xe_gt * : gt_to_tile(gt__)->xe)
/**
* struct xe_gt - A "Graphics Technology" unit of the GPU
}
#define XE_RTP_PROCESS_CTX_INITIALIZER(arg__) _Generic((arg__), \
- struct xe_hw_engine *: (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_ENGINE }, \
- struct xe_gt *: (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_GT })
+ struct xe_hw_engine * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_ENGINE }, \
+ struct xe_gt * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_GT })
void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
unsigned long *active_entries,
* Helper macros - not to be used outside this header.
*/
#define _XE_ESC(...) __VA_ARGS__
-#define _XE_COUNT_ARGS(...) _XE_ESC(__XE_COUNT_ARGS(__VA_ARGS__,5,4,3,2,1,))
-#define __XE_COUNT_ARGS(_,_5,_4,_3,_2,X_,...) X_
+#define _XE_COUNT_ARGS(...) _XE_ESC(__XE_COUNT_ARGS(__VA_ARGS__, 5, 4, 3, 2, 1,))
+#define __XE_COUNT_ARGS(_, _5, _4, _3, _2, X_, ...) X_
#define _XE_FIRST(...) _XE_ESC(__XE_FIRST(__VA_ARGS__,))
-#define __XE_FIRST(x_,...) x_
+#define __XE_FIRST(x_, ...) x_
#define _XE_TUPLE_TAIL(...) _XE_ESC(__XE_TUPLE_TAIL(__VA_ARGS__))
-#define __XE_TUPLE_TAIL(x_,...) (__VA_ARGS__)
+#define __XE_TUPLE_TAIL(x_, ...) (__VA_ARGS__)
#define _XE_DROP_FIRST(x_, ...) __VA_ARGS__
*
* XE_RTP_TEST_FOO BANANA XE_RTP_TEST_BAR
*/
-#define XE_RTP_PASTE_FOREACH(prefix_, sep_, args_) _XE_ESC(_XE_RTP_CONCAT(PASTE_,_XE_COUNT_ARGS args_)(prefix_, sep_, args_))
+#define XE_RTP_PASTE_FOREACH(prefix_, sep_, args_) _XE_ESC(_XE_RTP_CONCAT(PASTE_, _XE_COUNT_ARGS args_)(prefix_, sep_, args_))
#define XE_RTP_PASTE_1(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, _XE_FIRST args_)
#define XE_RTP_PASTE_2(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, _XE_FIRST args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_1(prefix_, sep_, _XE_TUPLE_TAIL args_)
#define XE_RTP_PASTE_3(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, _XE_FIRST args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_2(prefix_, sep_, _XE_TUPLE_TAIL args_)
kmem_cache_create("xe_sched_job_parallel",
sizeof(struct xe_sched_job) +
sizeof(u64) *
- XE_HW_ENGINE_MAX_INSTANCE , 0,
+ XE_HW_ENGINE_MAX_INSTANCE, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!xe_sched_job_parallel_slab) {
kmem_cache_destroy(xe_sched_job_slab);
};
#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
- fw_def(METEORLAKE, mmp_ver( i915, guc, mtl, 70, 6, 4)) \
- fw_def(PVC, mmp_ver( xe, guc, pvc, 70, 6, 4)) \
+ fw_def(METEORLAKE, mmp_ver(i915, guc, mtl, 70, 6, 4)) \
+ fw_def(PVC, mmp_ver(xe, guc, pvc, 70, 6, 4)) \
fw_def(DG2, major_ver(i915, guc, dg2, 70, 5)) \
fw_def(DG1, major_ver(i915, guc, dg1, 70, 5)) \
fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 5)) \