Commit 61f288a8 authored by Michael J. Ruhl's avatar Michael J. Ruhl Committed by Rodrigo Vivi

drm/xe: Rework size helper to be a little more correct

The _total_vram_size helper is device based and is not complete.

Teach the helper to be tile aware and add the ability to size
DG1 correctly.
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 094d739f
......@@ -74,7 +74,7 @@
#define VE1_AUX_INV XE_REG(0x42b8)
#define AUX_INV REG_BIT(0)
#define XEHP_TILE0_ADDR_RANGE XE_REG_MCR(0x4900)
#define XEHP_TILE_ADDR_RANGE(_idx) XE_REG_MCR(0x4900 + (_idx) * 4)
#define XEHP_FLAT_CCS_BASE_ADDR XE_REG_MCR(0x4910)
#define CHICKEN_RASTER_1 XE_REG_MCR(0x6204, XE_REG_OPTION_MASKED)
......
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
* Copyright © 2021-2023 Intel Corporation
*/
#include "xe_mmio.h"
......@@ -20,7 +20,6 @@
#define XEHP_MTCFG_ADDR XE_REG(0x101800)
#define TILE_COUNT REG_GENMASK(15, 8)
#define GEN12_LMEM_BAR 2
static int xe_set_dma_info(struct xe_device *xe)
{
......@@ -145,34 +144,56 @@ static bool xe_pci_resource_valid(struct pci_dev *pdev, int bar)
return true;
}
int xe_mmio_total_vram_size(struct xe_device *xe, u64 *vram_size, u64 *usable_size)
/**
* xe_mmio_tile_vram_size() - Collect vram size and offset information
* @gt: tile to get info for
* @vram_size: available vram (size - device reserved portions)
* @tile_size: actual vram size
* @tile_offset: physical start point in the vram address space
*
* There are 4 places for size information:
* - io size (from pci_resource_len of LMEM bar) (only used for small bar and DG1)
* - TILEx size (actual vram size)
* - GSMBASE offset (TILEx - "stolen")
* - CSSBASE offset (TILEx - CSS space necessary)
*
* CSSBASE is always a lower/smaller offset then GSMBASE.
*
* The actual available size of memory is to the CCS or GSM base.
* NOTE: multi-tile bases will include the tile offset.
*
*/
int xe_mmio_tile_vram_size(struct xe_gt *gt, u64 *vram_size, u64 *tile_size, u64 *tile_offset)
{
struct xe_gt *gt = xe_device_get_gt(xe, 0);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
u64 offset;
int err;
u32 reg_val;
if (!xe->info.has_flat_ccs) {
*vram_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
if (usable_size)
*usable_size = min(*vram_size,
xe_mmio_read64(gt, GSMBASE));
return 0;
}
u32 reg;
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (err)
return err;
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE0_ADDR_RANGE);
*vram_size = (u64)REG_FIELD_GET(GENMASK(14, 8), reg_val) * SZ_1G;
if (usable_size) {
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
*usable_size = (u64)REG_FIELD_GET(GENMASK(31, 8), reg_val) * SZ_64K;
drm_info(&xe->drm, "vram_size: 0x%llx usable_size: 0x%llx\n",
*vram_size, *usable_size);
/* actual size */
if (unlikely(gt->xe->info.platform == XE_DG1)) {
*tile_size = pci_resource_len(to_pci_dev(gt->xe->drm.dev), GEN12_LMEM_BAR);
*tile_offset = 0;
} else {
reg = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE_ADDR_RANGE(gt->info.id));
*tile_size = (u64)REG_FIELD_GET(GENMASK(14, 8), reg) * SZ_1G;
*tile_offset = (u64)REG_FIELD_GET(GENMASK(7, 1), reg) * SZ_1G;
}
/* minus device usage */
if (gt->xe->info.has_flat_ccs) {
reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
offset = (u64)REG_FIELD_GET(GENMASK(31, 8), reg) * SZ_64K;
} else {
offset = xe_mmio_read64(gt, GSMBASE);
}
/* remove the tile offset so we have just the available size */
*vram_size = offset - *tile_offset;
return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
}
......@@ -180,11 +201,12 @@ int xe_mmio_probe_vram(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct xe_gt *gt;
u8 id;
u64 vram_size;
u64 original_size;
u64 usable_size;
u64 tile_offset;
u64 tile_size;
u64 vram_size;
int err;
u8 id;
if (!IS_DGFX(xe)) {
xe->mem.vram.mapping = 0;
......@@ -209,25 +231,25 @@ int xe_mmio_probe_vram(struct xe_device *xe)
gt = xe_device_get_gt(xe, 0);
original_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
err = xe_mmio_total_vram_size(xe, &vram_size, &usable_size);
err = xe_mmio_tile_vram_size(gt, &vram_size, &tile_size, &tile_offset);
if (err)
return err;
xe_resize_vram_bar(xe, vram_size);
xe->mem.vram.io_start = pci_resource_start(pdev, GEN12_LMEM_BAR);
xe->mem.vram.io_size = min(usable_size,
xe->mem.vram.io_size = min(vram_size,
pci_resource_len(pdev, GEN12_LMEM_BAR));
xe->mem.vram.size = xe->mem.vram.io_size;
if (!xe->mem.vram.size)
return -EIO;
if (usable_size > xe->mem.vram.io_size)
if (vram_size > xe->mem.vram.io_size)
drm_warn(&xe->drm, "Restricting VRAM size to PCI resource size (%lluMiB->%lluMiB)\n",
(u64)usable_size >> 20, (u64)xe->mem.vram.io_size >> 20);
(u64)vram_size >> 20, (u64)xe->mem.vram.io_size >> 20);
xe->mem.vram.mapping = ioremap_wc(xe->mem.vram.io_start, xe->mem.vram.io_size);
xe->mem.vram.size = min_t(u64, xe->mem.vram.size, usable_size);
xe->mem.vram.size = min_t(u64, xe->mem.vram.size, vram_size);
drm_info(&xe->drm, "TOTAL VRAM: %pa, %pa\n", &xe->mem.vram.io_start, &xe->mem.vram.size);
......
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
* Copyright © 2021-2023 Intel Corporation
*/
#ifndef _XE_MMIO_H_
......@@ -16,6 +16,8 @@ struct drm_device;
struct drm_file;
struct xe_device;
#define GEN12_LMEM_BAR 2
int xe_mmio_init(struct xe_device *xe);
static inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
......@@ -131,6 +133,6 @@ static inline bool xe_mmio_in_range(const struct xe_mmio_range *range,
}
int xe_mmio_probe_vram(struct xe_device *xe);
int xe_mmio_total_vram_size(struct xe_device *xe, u64 *vram_size, u64 *flat_ccs_base);
int xe_mmio_tile_vram_size(struct xe_gt *gt, u64 *vram_size, u64 *tile_size, u64 *tile_base);
#endif
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021-2022 Intel Corporation
* Copyright © 2021-2023 Intel Corporation
* Copyright (C) 2021-2002 Red Hat
*/
......@@ -51,27 +51,29 @@ bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe)
return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe);
}
static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
static s64 detect_bar2_dgfx(struct xe_gt *gt, struct xe_ttm_stolen_mgr *mgr)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct xe_gt *gt = to_gt(xe);
u64 vram_size, stolen_size;
int err;
err = xe_mmio_total_vram_size(xe, &vram_size, NULL);
if (err) {
drm_info(&xe->drm, "Querying total vram size failed\n");
struct pci_dev *pdev = to_pci_dev(gt->xe->drm.dev);
u64 stolen_size;
u64 tile_offset;
u64 tile_size;
u64 vram_size;
if (xe_mmio_tile_vram_size(gt, &vram_size, &tile_size, &tile_offset)) {
drm_err(&gt->xe->drm, "Querying total vram size failed\n");
return 0;
}
/* Use DSM base address instead for stolen memory */
mgr->stolen_base = xe_mmio_read64(gt, DSMBASE) & BDSM_MASK;
if (drm_WARN_ON(&xe->drm, vram_size < mgr->stolen_base))
mgr->stolen_base = (xe_mmio_read64(gt, DSMBASE) & BDSM_MASK) - tile_offset;
if (drm_WARN_ON(&gt->xe->drm, tile_size < mgr->stolen_base))
return 0;
stolen_size = vram_size - mgr->stolen_base;
if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, 2))
mgr->io_base = pci_resource_start(pdev, 2) + mgr->stolen_base;
stolen_size = tile_size - mgr->stolen_base;
/* Verify usage fits in the actual resource available */
if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, GEN12_LMEM_BAR))
mgr->io_base = gt->mem.vram.io_start + mgr->stolen_base;
/*
* There may be few KB of platform dependent reserved memory at the end
......@@ -139,7 +141,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
int err;
if (IS_DGFX(xe))
stolen_size = detect_bar2_dgfx(xe, mgr);
stolen_size = detect_bar2_dgfx(to_gt(xe), mgr);
else if (GRAPHICS_VERx100(xe) >= 1270)
stolen_size = detect_bar2_integrated(xe, mgr);
else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment