drm/xe/guc: Add support for workaround KLVs
authorBadal Nilawar <badal.nilawar@intel.com>
Fri, 5 Apr 2024 08:42:30 +0000 (14:12 +0530)
committerMichal Wajdeczko <michal.wajdeczko@intel.com>
Tue, 9 Apr 2024 10:54:02 +0000 (12:54 +0200)
To prevent running out of bits, new workaround (w/a) enable flags are
being added via a KLV system instead of a 32 bit flags word.

v2: GuC version check > 70.10 is not needed as base line xe doesnot
    support anything below < 70.19
v3: Use 64 bit ggtt address for future
    compatibility (John Harrison/Daniele)
v4: %s/PAGE_SIZE/SZ_4K/ (Michal)

Cc: John Harrison <John.C.Harrison@intel.com>
Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
Reviewed-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240405084231.3620848-2-badal.nilawar@intel.com
drivers/gpu/drm/xe/xe_guc_ads.c
drivers/gpu/drm/xe/xe_guc_ads_types.h
drivers/gpu/drm/xe/xe_guc_fwif.h

index e025f3e10c9bfd1b0de65b91228a17ec1f12e87a..0a8f27243c844d4fb875a808470245af83e82566 100644 (file)
@@ -80,6 +80,10 @@ ads_to_map(struct xe_guc_ads *ads)
  *      +---------------------------------------+
  *      | padding                               |
  *      +---------------------------------------+ <== 4K aligned
+ *      | w/a KLVs                              |
+ *      +---------------------------------------+
+ *      | padding                               |
+ *      +---------------------------------------+ <== 4K aligned
  *      | capture lists                         |
  *      +---------------------------------------+
  *      | padding                               |
@@ -131,6 +135,11 @@ static size_t guc_ads_golden_lrc_size(struct xe_guc_ads *ads)
        return PAGE_ALIGN(ads->golden_lrc_size);
 }
 
+static u32 guc_ads_waklv_size(struct xe_guc_ads *ads)
+{
+       return PAGE_ALIGN(ads->ads_waklv_size);
+}
+
 static size_t guc_ads_capture_size(struct xe_guc_ads *ads)
 {
        /* FIXME: Allocate a proper capture list */
@@ -167,12 +176,22 @@ static size_t guc_ads_golden_lrc_offset(struct xe_guc_ads *ads)
        return PAGE_ALIGN(offset);
 }
 
+static size_t guc_ads_waklv_offset(struct xe_guc_ads *ads)
+{
+       u32 offset;
+
+       offset = guc_ads_golden_lrc_offset(ads) +
+                guc_ads_golden_lrc_size(ads);
+
+       return PAGE_ALIGN(offset);
+}
+
 static size_t guc_ads_capture_offset(struct xe_guc_ads *ads)
 {
        size_t offset;
 
-       offset = guc_ads_golden_lrc_offset(ads) +
-               guc_ads_golden_lrc_size(ads);
+       offset = guc_ads_waklv_offset(ads) +
+                guc_ads_waklv_size(ads);
 
        return PAGE_ALIGN(offset);
 }
@@ -260,6 +279,43 @@ static size_t calculate_golden_lrc_size(struct xe_guc_ads *ads)
        return total_size;
 }
 
+static void guc_waklv_init(struct xe_guc_ads *ads)
+{
+       u64 addr_ggtt;
+       u32 offset, remain, size;
+
+       offset = guc_ads_waklv_offset(ads);
+       remain = guc_ads_waklv_size(ads);
+
+       /* Add workarounds here
+        *
+        * if (XE_WA(gt, wa_id))
+        *      guc_waklv_enable_simple(ads,
+        *                              wa_klv_id,
+        *                              &offset, &remain);
+        */
+
+       size = guc_ads_waklv_size(ads) - remain;
+       if (!size)
+               return;
+
+       offset = guc_ads_waklv_offset(ads);
+       addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
+
+       ads_blob_write(ads, ads.wa_klv_addr_lo, lower_32_bits(addr_ggtt));
+       ads_blob_write(ads, ads.wa_klv_addr_hi, upper_32_bits(addr_ggtt));
+       ads_blob_write(ads, ads.wa_klv_size, size);
+}
+
+static int calculate_waklv_size(struct xe_guc_ads *ads)
+{
+       /*
+        * A single page is both the minimum size possible and
+        * is sufficiently large enough for all current platforms.
+        */
+       return SZ_4K;
+}
+
 #define MAX_GOLDEN_LRC_SIZE    (SZ_4K * 64)
 
 int xe_guc_ads_init(struct xe_guc_ads *ads)
@@ -271,6 +327,7 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
 
        ads->golden_lrc_size = calculate_golden_lrc_size(ads);
        ads->regset_size = calculate_regset_size(gt);
+       ads->ads_waklv_size = calculate_waklv_size(ads);
 
        bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
                                          XE_BO_FLAG_SYSTEM |
@@ -598,6 +655,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
        guc_mapping_table_init(gt, &info_map);
        guc_capture_list_init(ads);
        guc_doorbell_init(ads);
+       guc_waklv_init(ads);
 
        if (xe->info.has_usm) {
                guc_um_init_params(ads);
index 4afe44bece4bcd43445db7c642417cf7c35b9070..2de5decfe0fdd174d2de71492189d9ac3a8c48f7 100644 (file)
@@ -20,6 +20,8 @@ struct xe_guc_ads {
        size_t golden_lrc_size;
        /** @regset_size: size of register set passed to GuC for save/restore */
        u32 regset_size;
+       /** @ads_waklv_size: total waklv size supported by platform */
+       u32 ads_waklv_size;
 };
 
 #endif
index 5474025271e3c1d3f1b382cd36bd58b4eb1b18e7..19ee71aeaf17b597976e27ec4c0d55ad3a031c62 100644 (file)
@@ -209,7 +209,10 @@ struct guc_ads {
        u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
        u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
        u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX];
-       u32 reserved[14];
+       u32 wa_klv_addr_lo;
+       u32 wa_klv_addr_hi;
+       u32 wa_klv_size;
+       u32 reserved[11];
 } __packed;
 
 /* Engine usage stats */