Commit 576c6380 authored by Matt Roper's avatar Matt Roper Committed by Rodrigo Vivi

drm/xe/pat: Move PAT setup to a dedicated file

PAT handling is growing in complexity and will continue to do so in
upcoming platforms.  Separate it out to a dedicated file to keep things
tidy.

The code is moved as-is here (aside from a few unused #define's that are
just dropped); further changes will come in future patches.
Reviewed-by: default avatarNirmoy Das <nirmoy.das@intel.com>
Link: https://lore.kernel.org/r/20230324210415.2434992-2-matthew.d.roper@intel.comSigned-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent cf667aec
......@@ -71,6 +71,7 @@ xe-y += xe_bb.o \
xe_mmio.o \
xe_mocs.o \
xe_module.o \
xe_pat.o \
xe_pci.o \
xe_pcode.o \
xe_pm.o \
......
......@@ -69,7 +69,6 @@
#define GEN12_VE1_AUX_INV _MMIO(0x42b8)
#define AUX_INV REG_BIT(0)
#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
#define XEHP_TILE0_ADDR_RANGE MCR_REG(0x4900)
#define XEHP_FLAT_CCS_BASE_ADDR MCR_REG(0x4910)
......
......@@ -29,6 +29,7 @@
#include "xe_map.h"
#include "xe_migrate.h"
#include "xe_mmio.h"
#include "xe_pat.h"
#include "xe_mocs.h"
#include "xe_reg_sr.h"
#include "xe_ring_ops.h"
......@@ -92,83 +93,6 @@ int xe_gt_alloc(struct xe_device *xe, struct xe_gt *gt)
return 0;
}
/* FIXME: These should be in a common file */
#define CHV_PPAT_SNOOP REG_BIT(6)
#define GEN8_PPAT_AGE(x) ((x)<<4)
#define GEN8_PPAT_LLCeLLC (3<<2)
#define GEN8_PPAT_LLCELLC (2<<2)
#define GEN8_PPAT_LLC (1<<2)
#define GEN8_PPAT_WB (3<<0)
#define GEN8_PPAT_WT (2<<0)
#define GEN8_PPAT_WC (1<<0)
#define GEN8_PPAT_UC (0<<0)
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
#define GEN12_PPAT_CLOS(x) ((x)<<2)
static void tgl_setup_private_ppat(struct xe_gt *gt)
{
/* TGL doesn't support LLC or AGE settings */
xe_mmio_write32(gt, GEN12_PAT_INDEX(0).reg, GEN8_PPAT_WB);
xe_mmio_write32(gt, GEN12_PAT_INDEX(1).reg, GEN8_PPAT_WC);
xe_mmio_write32(gt, GEN12_PAT_INDEX(2).reg, GEN8_PPAT_WT);
xe_mmio_write32(gt, GEN12_PAT_INDEX(3).reg, GEN8_PPAT_UC);
xe_mmio_write32(gt, GEN12_PAT_INDEX(4).reg, GEN8_PPAT_WB);
xe_mmio_write32(gt, GEN12_PAT_INDEX(5).reg, GEN8_PPAT_WB);
xe_mmio_write32(gt, GEN12_PAT_INDEX(6).reg, GEN8_PPAT_WB);
xe_mmio_write32(gt, GEN12_PAT_INDEX(7).reg, GEN8_PPAT_WB);
}
static void pvc_setup_private_ppat(struct xe_gt *gt)
{
xe_mmio_write32(gt, GEN12_PAT_INDEX(0).reg, GEN8_PPAT_UC);
xe_mmio_write32(gt, GEN12_PAT_INDEX(1).reg, GEN8_PPAT_WC);
xe_mmio_write32(gt, GEN12_PAT_INDEX(2).reg, GEN8_PPAT_WT);
xe_mmio_write32(gt, GEN12_PAT_INDEX(3).reg, GEN8_PPAT_WB);
xe_mmio_write32(gt, GEN12_PAT_INDEX(4).reg,
GEN12_PPAT_CLOS(1) | GEN8_PPAT_WT);
xe_mmio_write32(gt, GEN12_PAT_INDEX(5).reg,
GEN12_PPAT_CLOS(1) | GEN8_PPAT_WB);
xe_mmio_write32(gt, GEN12_PAT_INDEX(6).reg,
GEN12_PPAT_CLOS(2) | GEN8_PPAT_WT);
xe_mmio_write32(gt, GEN12_PAT_INDEX(7).reg,
GEN12_PPAT_CLOS(2) | GEN8_PPAT_WB);
}
#define MTL_PPAT_L4_CACHE_POLICY_MASK REG_GENMASK(3, 2)
#define MTL_PAT_INDEX_COH_MODE_MASK REG_GENMASK(1, 0)
#define MTL_PPAT_3_UC REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 3)
#define MTL_PPAT_1_WT REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 1)
#define MTL_PPAT_0_WB REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 0)
#define MTL_3_COH_2W REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 3)
#define MTL_2_COH_1W REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 2)
#define MTL_0_COH_NON REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 0)
static void mtl_setup_private_ppat(struct xe_gt *gt)
{
xe_mmio_write32(gt, GEN12_PAT_INDEX(0).reg, MTL_PPAT_0_WB);
xe_mmio_write32(gt, GEN12_PAT_INDEX(1).reg,
MTL_PPAT_1_WT | MTL_2_COH_1W);
xe_mmio_write32(gt, GEN12_PAT_INDEX(2).reg,
MTL_PPAT_3_UC | MTL_2_COH_1W);
xe_mmio_write32(gt, GEN12_PAT_INDEX(3).reg,
MTL_PPAT_0_WB | MTL_2_COH_1W);
xe_mmio_write32(gt, GEN12_PAT_INDEX(4).reg,
MTL_PPAT_0_WB | MTL_3_COH_2W);
}
static void setup_private_ppat(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
if (xe->info.platform == XE_METEORLAKE)
mtl_setup_private_ppat(gt);
else if (xe->info.platform == XE_PVC)
pvc_setup_private_ppat(gt);
else
tgl_setup_private_ppat(gt);
}
static int gt_ttm_mgr_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
......@@ -447,7 +371,7 @@ static int gt_fw_domain_init(struct xe_gt *gt)
if (err)
goto err_hw_fence_irq;
setup_private_ppat(gt);
xe_pat_init(gt);
if (!xe_gt_is_media_type(gt)) {
err = xe_ggtt_init(gt, gt->mem.ggtt);
......@@ -633,7 +557,7 @@ static int do_gt_restart(struct xe_gt *gt)
enum xe_hw_engine_id id;
int err;
setup_private_ppat(gt);
xe_pat_init(gt);
xe_gt_mcr_set_implicit_defaults(gt);
xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
......
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include "xe_pat.h"
#include "regs/xe_reg_defs.h"
#include "xe_gt.h"
#include "xe_mmio.h"
#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
#define GEN8_PPAT_WB (3<<0)
#define GEN8_PPAT_WT (2<<0)
#define GEN8_PPAT_WC (1<<0)
#define GEN8_PPAT_UC (0<<0)
#define GEN12_PPAT_CLOS(x) ((x)<<2)
static void tgl_setup_private_ppat(struct xe_gt *gt)
{
/* TGL doesn't support LLC or AGE settings */
xe_mmio_write32(gt, GEN12_PAT_INDEX(0).reg, GEN8_PPAT_WB);
xe_mmio_write32(gt, GEN12_PAT_INDEX(1).reg, GEN8_PPAT_WC);
xe_mmio_write32(gt, GEN12_PAT_INDEX(2).reg, GEN8_PPAT_WT);
xe_mmio_write32(gt, GEN12_PAT_INDEX(3).reg, GEN8_PPAT_UC);
xe_mmio_write32(gt, GEN12_PAT_INDEX(4).reg, GEN8_PPAT_WB);
xe_mmio_write32(gt, GEN12_PAT_INDEX(5).reg, GEN8_PPAT_WB);
xe_mmio_write32(gt, GEN12_PAT_INDEX(6).reg, GEN8_PPAT_WB);
xe_mmio_write32(gt, GEN12_PAT_INDEX(7).reg, GEN8_PPAT_WB);
}
static void pvc_setup_private_ppat(struct xe_gt *gt)
{
xe_mmio_write32(gt, GEN12_PAT_INDEX(0).reg, GEN8_PPAT_UC);
xe_mmio_write32(gt, GEN12_PAT_INDEX(1).reg, GEN8_PPAT_WC);
xe_mmio_write32(gt, GEN12_PAT_INDEX(2).reg, GEN8_PPAT_WT);
xe_mmio_write32(gt, GEN12_PAT_INDEX(3).reg, GEN8_PPAT_WB);
xe_mmio_write32(gt, GEN12_PAT_INDEX(4).reg,
GEN12_PPAT_CLOS(1) | GEN8_PPAT_WT);
xe_mmio_write32(gt, GEN12_PAT_INDEX(5).reg,
GEN12_PPAT_CLOS(1) | GEN8_PPAT_WB);
xe_mmio_write32(gt, GEN12_PAT_INDEX(6).reg,
GEN12_PPAT_CLOS(2) | GEN8_PPAT_WT);
xe_mmio_write32(gt, GEN12_PAT_INDEX(7).reg,
GEN12_PPAT_CLOS(2) | GEN8_PPAT_WB);
}
#define MTL_PPAT_L4_CACHE_POLICY_MASK REG_GENMASK(3, 2)
#define MTL_PAT_INDEX_COH_MODE_MASK REG_GENMASK(1, 0)
#define MTL_PPAT_3_UC REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 3)
#define MTL_PPAT_1_WT REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 1)
#define MTL_PPAT_0_WB REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 0)
#define MTL_3_COH_2W REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 3)
#define MTL_2_COH_1W REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 2)
#define MTL_0_COH_NON REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 0)
static void mtl_setup_private_ppat(struct xe_gt *gt)
{
xe_mmio_write32(gt, GEN12_PAT_INDEX(0).reg, MTL_PPAT_0_WB);
xe_mmio_write32(gt, GEN12_PAT_INDEX(1).reg,
MTL_PPAT_1_WT | MTL_2_COH_1W);
xe_mmio_write32(gt, GEN12_PAT_INDEX(2).reg,
MTL_PPAT_3_UC | MTL_2_COH_1W);
xe_mmio_write32(gt, GEN12_PAT_INDEX(3).reg,
MTL_PPAT_0_WB | MTL_2_COH_1W);
xe_mmio_write32(gt, GEN12_PAT_INDEX(4).reg,
MTL_PPAT_0_WB | MTL_3_COH_2W);
}
void xe_pat_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
if (xe->info.platform == XE_METEORLAKE)
mtl_setup_private_ppat(gt);
else if (xe->info.platform == XE_PVC)
pvc_setup_private_ppat(gt);
else
tgl_setup_private_ppat(gt);
}
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef _XE_PAT_H_
#define _XE_PAT_H_
struct xe_gt;
void xe_pat_init(struct xe_gt *gt);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment