// SPDX-License-Identifier: MIT
/*
- * Copyright © 2021 Intel Corporation
+ * Copyright © 2021-2023 Intel Corporation
*/
#include "xe_mmio.h"
#define XEHP_MTCFG_ADDR XE_REG(0x101800)
#define TILE_COUNT REG_GENMASK(15, 8)
-#define GEN12_LMEM_BAR 2
static int xe_set_dma_info(struct xe_device *xe)
{
return true;
}
-int xe_mmio_total_vram_size(struct xe_device *xe, u64 *vram_size, u64 *usable_size)
+/**
+ * xe_mmio_tile_vram_size() - Collect vram size and offset information
+ * @gt: tile to get info for
+ * @vram_size: available vram (size - device reserved portions)
+ * @tile_size: actual vram size
+ * @tile_offset: physical start point in the vram address space
+ *
+ * There are 4 places for size information:
+ * - io size (from pci_resource_len of LMEM bar) (only used for small bar and DG1)
+ * - TILEx size (actual vram size)
+ * - GSMBASE offset (TILEx - "stolen")
+ * - CSSBASE offset (TILEx - CSS space necessary)
+ *
+ * CSSBASE is always a lower/smaller offset then GSMBASE.
+ *
+ * The actual available size of memory is to the CCS or GSM base.
+ * NOTE: multi-tile bases will include the tile offset.
+ *
+ */
+int xe_mmio_tile_vram_size(struct xe_gt *gt, u64 *vram_size, u64 *tile_size, u64 *tile_offset)
{
- struct xe_gt *gt = xe_device_get_gt(xe, 0);
- struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ u64 offset;
int err;
- u32 reg_val;
-
- if (!xe->info.has_flat_ccs) {
- *vram_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
- if (usable_size)
- *usable_size = min(*vram_size,
- xe_mmio_read64(gt, GSMBASE));
- return 0;
- }
+ u32 reg;
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
return err;
- reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE0_ADDR_RANGE);
- *vram_size = (u64)REG_FIELD_GET(GENMASK(14, 8), reg_val) * SZ_1G;
- if (usable_size) {
- reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
- *usable_size = (u64)REG_FIELD_GET(GENMASK(31, 8), reg_val) * SZ_64K;
- drm_info(&xe->drm, "vram_size: 0x%llx usable_size: 0x%llx\n",
- *vram_size, *usable_size);
+ /* actual size */
+ if (unlikely(gt->xe->info.platform == XE_DG1)) {
+ *tile_size = pci_resource_len(to_pci_dev(gt->xe->drm.dev), GEN12_LMEM_BAR);
+ *tile_offset = 0;
+ } else {
+ reg = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE_ADDR_RANGE(gt->info.id));
+ *tile_size = (u64)REG_FIELD_GET(GENMASK(14, 8), reg) * SZ_1G;
+ *tile_offset = (u64)REG_FIELD_GET(GENMASK(7, 1), reg) * SZ_1G;
}
+ /* minus device usage */
+ if (gt->xe->info.has_flat_ccs) {
+ reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
+ offset = (u64)REG_FIELD_GET(GENMASK(31, 8), reg) * SZ_64K;
+ } else {
+ offset = xe_mmio_read64(gt, GSMBASE);
+ }
+
+ /* remove the tile offset so we have just the available size */
+ *vram_size = offset - *tile_offset;
+
return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
}
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct xe_gt *gt;
- u8 id;
- u64 vram_size;
u64 original_size;
- u64 usable_size;
+ u64 tile_offset;
+ u64 tile_size;
+ u64 vram_size;
int err;
+ u8 id;
if (!IS_DGFX(xe)) {
xe->mem.vram.mapping = 0;
gt = xe_device_get_gt(xe, 0);
original_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
- err = xe_mmio_total_vram_size(xe, &vram_size, &usable_size);
+ err = xe_mmio_tile_vram_size(gt, &vram_size, &tile_size, &tile_offset);
if (err)
return err;
xe_resize_vram_bar(xe, vram_size);
xe->mem.vram.io_start = pci_resource_start(pdev, GEN12_LMEM_BAR);
- xe->mem.vram.io_size = min(usable_size,
+ xe->mem.vram.io_size = min(vram_size,
pci_resource_len(pdev, GEN12_LMEM_BAR));
xe->mem.vram.size = xe->mem.vram.io_size;
if (!xe->mem.vram.size)
return -EIO;
- if (usable_size > xe->mem.vram.io_size)
+ if (vram_size > xe->mem.vram.io_size)
drm_warn(&xe->drm, "Restricting VRAM size to PCI resource size (%lluMiB->%lluMiB)\n",
- (u64)usable_size >> 20, (u64)xe->mem.vram.io_size >> 20);
+ (u64)vram_size >> 20, (u64)xe->mem.vram.io_size >> 20);
xe->mem.vram.mapping = ioremap_wc(xe->mem.vram.io_start, xe->mem.vram.io_size);
- xe->mem.vram.size = min_t(u64, xe->mem.vram.size, usable_size);
+ xe->mem.vram.size = min_t(u64, xe->mem.vram.size, vram_size);
drm_info(&xe->drm, "TOTAL VRAM: %pa, %pa\n", &xe->mem.vram.io_start, &xe->mem.vram.size);
// SPDX-License-Identifier: MIT
/*
- * Copyright © 2021-2022 Intel Corporation
+ * Copyright © 2021-2023 Intel Corporation
* Copyright (C) 2021-2002 Red Hat
*/
return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe);
}
-static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+static s64 detect_bar2_dgfx(struct xe_gt *gt, struct xe_ttm_stolen_mgr *mgr)
{
- struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- struct xe_gt *gt = to_gt(xe);
- u64 vram_size, stolen_size;
- int err;
-
- err = xe_mmio_total_vram_size(xe, &vram_size, NULL);
- if (err) {
- drm_info(&xe->drm, "Querying total vram size failed\n");
+ struct pci_dev *pdev = to_pci_dev(gt->xe->drm.dev);
+ u64 stolen_size;
+ u64 tile_offset;
+ u64 tile_size;
+ u64 vram_size;
+
+ if (xe_mmio_tile_vram_size(gt, &vram_size, &tile_size, &tile_offset)) {
+ drm_err(>->xe->drm, "Querying total vram size failed\n");
return 0;
}
/* Use DSM base address instead for stolen memory */
- mgr->stolen_base = xe_mmio_read64(gt, DSMBASE) & BDSM_MASK;
- if (drm_WARN_ON(&xe->drm, vram_size < mgr->stolen_base))
+ mgr->stolen_base = (xe_mmio_read64(gt, DSMBASE) & BDSM_MASK) - tile_offset;
+ if (drm_WARN_ON(>->xe->drm, tile_size < mgr->stolen_base))
return 0;
- stolen_size = vram_size - mgr->stolen_base;
- if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, 2))
- mgr->io_base = pci_resource_start(pdev, 2) + mgr->stolen_base;
+ stolen_size = tile_size - mgr->stolen_base;
+
+ /* Verify usage fits in the actual resource available */
+ if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, GEN12_LMEM_BAR))
+ mgr->io_base = gt->mem.vram.io_start + mgr->stolen_base;
/*
* There may be few KB of platform dependent reserved memory at the end
int err;
if (IS_DGFX(xe))
- stolen_size = detect_bar2_dgfx(xe, mgr);
+ stolen_size = detect_bar2_dgfx(to_gt(xe), mgr);
else if (GRAPHICS_VERx100(xe) >= 1270)
stolen_size = detect_bar2_integrated(xe, mgr);
else