Commit 946a4d5b authored by Shaoyun Liu's avatar Shaoyun Liu Committed by Alex Deucher

drm/amdgpu: Avoid use SOC15_REG_OFFSET in static const array

Handle dynamic offsets correctly in static arrays.
Acked-by: default avatarChristian Konig <christian.koenig@amd.com>
Signed-off-by: default avatarShaoyun Liu <Shaoyun.Liu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent b466107e
...@@ -1428,6 +1428,23 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); ...@@ -1428,6 +1428,23 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
/*
* amdgpu nbio functions
*
* Fix me :
* Put more NBIO specifc func wraper here , for now just try to minimize the
* change to avoid use SOC15_REG_OFFSET in the constant array
*/
struct amdgpu_nbio_funcs {
u32 (*get_hdp_flush_req_offset)(struct amdgpu_device*);
u32 (*get_hdp_flush_done_offset)(struct amdgpu_device*);
u32 (*get_pcie_index_offset)(struct amdgpu_device*);
u32 (*get_pcie_data_offset)(struct amdgpu_device*);
};
/* Define the HW IP blocks will be used in driver , add more if necessary */ /* Define the HW IP blocks will be used in driver , add more if necessary */
enum amd_hw_ip_block_type { enum amd_hw_ip_block_type {
GC_HWIP = 1, GC_HWIP = 1,
...@@ -1647,6 +1664,8 @@ struct amdgpu_device { ...@@ -1647,6 +1664,8 @@ struct amdgpu_device {
/* soc15 register offset based on ip, instance and segment */ /* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
const struct amdgpu_nbio_funcs *nbio_funcs;
/* delayed work_func for deferring clockgating during resume */ /* delayed work_func for deferring clockgating during resume */
struct delayed_work late_init_work; struct delayed_work late_init_work;
......
This diff is collapsed.
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include "mmhub/mmhub_1_0_offset.h" #include "mmhub/mmhub_1_0_offset.h"
#include "athub/athub_1_0_offset.h" #include "athub/athub_1_0_offset.h"
#include "soc15.h"
#include "soc15_common.h" #include "soc15_common.h"
#include "umc/umc_6_0_sh_mask.h" #include "umc/umc_6_0_sh_mask.h"
...@@ -74,16 +75,16 @@ static const u32 golden_settings_vega10_hdp[] = ...@@ -74,16 +75,16 @@ static const u32 golden_settings_vega10_hdp[] =
0xf6e, 0x0fffffff, 0x00000000, 0xf6e, 0x0fffffff, 0x00000000,
}; };
static const u32 golden_settings_mmhub_1_0_0[] = static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
{ {
SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa, SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
}; };
static const u32 golden_settings_athub_1_0_0[] = static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
{ {
SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800, SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
}; };
/* Ecc related register addresses, (BASE + reg offset) */ /* Ecc related register addresses, (BASE + reg offset) */
...@@ -883,17 +884,18 @@ static int gmc_v9_0_sw_fini(void *handle) ...@@ -883,17 +884,18 @@ static int gmc_v9_0_sw_fini(void *handle)
static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
amdgpu_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_mmhub_1_0_0, golden_settings_mmhub_1_0_0,
ARRAY_SIZE(golden_settings_mmhub_1_0_0)); ARRAY_SIZE(golden_settings_mmhub_1_0_0));
amdgpu_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_athub_1_0_0, golden_settings_athub_1_0_0,
ARRAY_SIZE(golden_settings_athub_1_0_0)); ARRAY_SIZE(golden_settings_athub_1_0_0));
break; break;
case CHIP_RAVEN: case CHIP_RAVEN:
amdgpu_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_athub_1_0_0, golden_settings_athub_1_0_0,
ARRAY_SIZE(golden_settings_athub_1_0_0)); ARRAY_SIZE(golden_settings_athub_1_0_0));
break; break;
......
...@@ -76,16 +76,13 @@ u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev) ...@@ -76,16 +76,13 @@ u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE); return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
} }
static const u32 nbio_sdma_doorbell_range_reg[] =
{
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE),
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE)
};
void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance, void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index) bool use_doorbell, int doorbell_index)
{ {
u32 doorbell_range = RREG32(nbio_sdma_doorbell_range_reg[instance]); u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
u32 doorbell_range = RREG32(reg);
if (use_doorbell) { if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
...@@ -93,7 +90,8 @@ void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance, ...@@ -93,7 +90,8 @@ void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
} else } else
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
WREG32(nbio_sdma_doorbell_range_reg[instance], doorbell_range); WREG32(reg, doorbell_range);
} }
void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev, void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
...@@ -215,9 +213,27 @@ void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags) ...@@ -215,9 +213,27 @@ void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
*flags |= AMD_CG_SUPPORT_BIF_LS; *flags |= AMD_CG_SUPPORT_BIF_LS;
} }
static u32 get_hdp_flush_req_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
}
static u32 get_hdp_flush_done_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
}
static u32 get_pcie_index_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX);
}
static u32 get_pcie_data_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA);
}
const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ),
.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE),
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK, .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK, .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK, .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
...@@ -232,11 +248,14 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { ...@@ -232,11 +248,14 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
}; };
const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data = { const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX), .get_hdp_flush_req_offset = get_hdp_flush_req_offset,
.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA), .get_hdp_flush_done_offset = get_hdp_flush_done_offset,
.get_pcie_index_offset = get_pcie_index_offset,
.get_pcie_data_offset = get_pcie_data_offset,
}; };
void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev) void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
{ {
uint32_t reg; uint32_t reg;
......
...@@ -27,7 +27,8 @@ ...@@ -27,7 +27,8 @@
#include "soc15_common.h" #include "soc15_common.h"
extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg; extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
extern const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data; extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
int nbio_v6_1_init(struct amdgpu_device *adev); int nbio_v6_1_init(struct amdgpu_device *adev);
u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev, u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
uint32_t idx); uint32_t idx);
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c #define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev) u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
{ {
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
...@@ -73,16 +74,13 @@ u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev) ...@@ -73,16 +74,13 @@ u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE); return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
} }
static const u32 nbio_sdma_doorbell_range_reg[] =
{
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE),
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE)
};
void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance, void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index) bool use_doorbell, int doorbell_index)
{ {
u32 doorbell_range = RREG32(nbio_sdma_doorbell_range_reg[instance]); u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
u32 doorbell_range = RREG32(reg);
if (use_doorbell) { if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
...@@ -90,7 +88,7 @@ void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance, ...@@ -90,7 +88,7 @@ void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
} else } else
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
WREG32(nbio_sdma_doorbell_range_reg[instance], doorbell_range); WREG32(reg, doorbell_range);
} }
void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev, void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
...@@ -185,9 +183,27 @@ void nbio_v7_0_ih_control(struct amdgpu_device *adev) ...@@ -185,9 +183,27 @@ void nbio_v7_0_ih_control(struct amdgpu_device *adev)
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl); WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
} }
static u32 get_hdp_flush_req_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
}
static u32 get_hdp_flush_done_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
}
static u32 get_pcie_index_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
}
static u32 get_pcie_data_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
}
const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = { const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ),
.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE),
.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK, .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK, .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK, .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
...@@ -202,7 +218,10 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = { ...@@ -202,7 +218,10 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK, .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
}; };
const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data = { const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2), .get_hdp_flush_req_offset = get_hdp_flush_req_offset,
.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2) .get_hdp_flush_done_offset = get_hdp_flush_done_offset,
.get_pcie_index_offset = get_pcie_index_offset,
.get_pcie_data_offset = get_pcie_data_offset,
}; };
...@@ -27,7 +27,8 @@ ...@@ -27,7 +27,8 @@
#include "soc15_common.h" #include "soc15_common.h"
extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg; extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
extern const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data; extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
int nbio_v7_0_init(struct amdgpu_device *adev); int nbio_v7_0_init(struct amdgpu_device *adev);
u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev, u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
uint32_t idx); uint32_t idx);
......
...@@ -53,58 +53,58 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev); ...@@ -53,58 +53,58 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev); static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev); static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static const u32 golden_settings_sdma_4[] = { static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xff000ff0, 0x3f000100),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL), 0x800f0100, 0x00000100, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), 0x003ff006, 0x0003c000, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003ff006, 0x0003c000),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL), 0x800f0100, 0x00000100, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL), 0x800f0100, 0x00000100, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UTCL1_PAGE), 0x000003ff, 0x000003c0, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CHICKEN_BITS), 0xfe931f07, 0x02831f07, SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), 0xffffffff, 0x3f000100, SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_IB_CNTL), 0x800f0100, 0x00000100, SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000, SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL), 0x800f0100, 0x00000100, SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000, SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), 0x003ff000, 0x0003c000, SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_POWER_CNTL, 0x003ff000, 0x0003c000),
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL), 0x800f0100, 0x00000100, SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000, SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL), 0x800f0100, 0x00000100, SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000, SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_UTCL1_PAGE), 0x000003ff, 0x000003c0 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
}; };
static const u32 golden_settings_sdma_vg10[] = { static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00104002, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG), 0x0018773f, 0x00104002, SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
}; };
static const u32 golden_settings_sdma_4_1[] = static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
{ {
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xffffffff, 0x3f000100, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0111, 0x00000100, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), 0xfc3fffff, 0x40000051, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0xfc3fffff, 0x40000051),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL), 0x800f0111, 0x00000100, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL), 0x800f0111, 0x00000100, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UTCL1_PAGE), 0x000003ff, 0x000003c0 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
}; };
static const u32 golden_settings_sdma_rv1[] = static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
{ {
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00000002, SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00000002 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002)
}; };
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
...@@ -118,18 +118,18 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -118,18 +118,18 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
amdgpu_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_sdma_4, golden_settings_sdma_4,
ARRAY_SIZE(golden_settings_sdma_4)); ARRAY_SIZE(golden_settings_sdma_4));
amdgpu_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_sdma_vg10, golden_settings_sdma_vg10,
ARRAY_SIZE(golden_settings_sdma_vg10)); ARRAY_SIZE(golden_settings_sdma_vg10));
break; break;
case CHIP_RAVEN: case CHIP_RAVEN:
amdgpu_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_sdma_4_1, golden_settings_sdma_4_1,
ARRAY_SIZE(golden_settings_sdma_4_1)); ARRAY_SIZE(golden_settings_sdma_4_1));
amdgpu_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_sdma_rv1, golden_settings_sdma_rv1,
ARRAY_SIZE(golden_settings_sdma_rv1)); ARRAY_SIZE(golden_settings_sdma_rv1));
break; break;
...@@ -358,6 +358,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -358,6 +358,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
*/ */
static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0; u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg; const struct nbio_hdp_flush_reg *nbio_hf_reg;
...@@ -374,8 +375,8 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) ...@@ -374,8 +375,8 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
amdgpu_ring_write(ring, nbio_hf_reg->hdp_flush_done_offset << 2); amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
amdgpu_ring_write(ring, nbio_hf_reg->hdp_flush_req_offset << 2); amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */ amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */ amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
......
...@@ -101,15 +101,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) ...@@ -101,15 +101,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{ {
unsigned long flags, address, data; unsigned long flags, address, data;
u32 r; u32 r;
const struct nbio_pcie_index_data *nbio_pcie_id; address = adev->nbio_funcs->get_pcie_index_offset(adev);
data = adev->nbio_funcs->get_pcie_data_offset(adev);
if (adev->flags & AMD_IS_APU)
nbio_pcie_id = &nbio_v7_0_pcie_index_data;
else
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
address = nbio_pcie_id->index_offset;
data = nbio_pcie_id->data_offset;
spin_lock_irqsave(&adev->pcie_idx_lock, flags); spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg); WREG32(address, reg);
...@@ -122,15 +115,9 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) ...@@ -122,15 +115,9 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{ {
unsigned long flags, address, data; unsigned long flags, address, data;
const struct nbio_pcie_index_data *nbio_pcie_id;
if (adev->flags & AMD_IS_APU)
nbio_pcie_id = &nbio_v7_0_pcie_index_data;
else
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
address = nbio_pcie_id->index_offset; address = adev->nbio_funcs->get_pcie_index_offset(adev);
data = nbio_pcie_id->data_offset; data = adev->nbio_funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags); spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg); WREG32(address, reg);
...@@ -332,25 +319,34 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, ...@@ -332,25 +319,34 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
return true; return true;
} }
static struct amdgpu_allowed_register_entry soc15_allowed_read_registers[] = { struct soc15_allowed_register_entry {
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)}, uint32_t hwip;
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)}, uint32_t inst;
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)}, uint32_t seg;
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)}, uint32_t reg_offset;
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)}, bool grbm_indexed;
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)}, };
{ SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_STATUS_REG)},
{ SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_STATUS_REG)},
{ SOC15_REG_OFFSET(GC, 0, mmCP_STAT)}, static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
{ SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
{ SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)}, { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
{ SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)}, { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)}, { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)}, { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)}, { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)}, { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
{ SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)}, { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
}; };
static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
...@@ -390,10 +386,13 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, ...@@ -390,10 +386,13 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value) u32 sh_num, u32 reg_offset, u32 *value)
{ {
uint32_t i; uint32_t i;
struct soc15_allowed_register_entry *en;
*value = 0; *value = 0;
for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
if (reg_offset != soc15_allowed_read_registers[i].reg_offset) en = &soc15_allowed_read_registers[i];
if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ en->reg_offset))
continue; continue;
*value = soc15_get_register_value(adev, *value = soc15_get_register_value(adev,
...@@ -404,6 +403,43 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, ...@@ -404,6 +403,43 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL; return -EINVAL;
} }
/**
* soc15_program_register_sequence - program an array of registers.
*
* @adev: amdgpu_device pointer
* @regs: pointer to the register array
* @array_size: size of the register array
*
* Programs an array or registers with and and or masks.
* This is a helper for setting golden registers.
*/
void soc15_program_register_sequence(struct amdgpu_device *adev,
const struct soc15_reg_golden *regs,
const u32 array_size)
{
const struct soc15_reg_golden *entry;
u32 tmp, reg;
int i;
for (i = 0; i < array_size; ++i) {
entry = &regs[i];
reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
if (entry->and_mask == 0xffffffff) {
tmp = entry->or_mask;
} else {
tmp = RREG32(reg);
tmp &= ~(entry->and_mask);
tmp |= entry->or_mask;
}
WREG32(reg, tmp);
}
}
static int soc15_asic_reset(struct amdgpu_device *adev) static int soc15_asic_reset(struct amdgpu_device *adev)
{ {
u32 i; u32 i;
...@@ -619,6 +655,11 @@ static int soc15_common_early_init(void *handle) ...@@ -619,6 +655,11 @@ static int soc15_common_early_init(void *handle)
adev->asic_funcs = &soc15_asic_funcs; adev->asic_funcs = &soc15_asic_funcs;
if (adev->flags & AMD_IS_APU)
adev->nbio_funcs = &nbio_v7_0_funcs;
else
adev->nbio_funcs = &nbio_v6_1_funcs;
if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) && if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP))) (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
psp_enabled = true; psp_enabled = true;
......
...@@ -29,10 +29,28 @@ ...@@ -29,10 +29,28 @@
extern const struct amd_ip_funcs soc15_common_ip_funcs; extern const struct amd_ip_funcs soc15_common_ip_funcs;
struct soc15_reg_golden {
u32 hwip;
u32 instance;
u32 segment;
u32 reg;
u32 and_mask;
u32 or_mask;
};
#define SOC15_REG_ENTRY(ip, inst, reg) ip##_HWIP, inst, reg##_BASE_IDX, reg
#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
{ ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
void soc15_grbm_select(struct amdgpu_device *adev, void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid); u32 me, u32 pipe, u32 queue, u32 vmid);
int soc15_set_ip_blocks(struct amdgpu_device *adev); int soc15_set_ip_blocks(struct amdgpu_device *adev);
void soc15_program_register_sequence(struct amdgpu_device *adev,
const struct soc15_reg_golden *registers,
const u32 array_size);
int vega10_reg_base_init(struct amdgpu_device *adev); int vega10_reg_base_init(struct amdgpu_device *adev);
#endif #endif
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
#define __SOC15_COMMON_H__ #define __SOC15_COMMON_H__
struct nbio_hdp_flush_reg { struct nbio_hdp_flush_reg {
u32 hdp_flush_req_offset;
u32 hdp_flush_done_offset;
u32 ref_and_mask_cp0; u32 ref_and_mask_cp0;
u32 ref_and_mask_cp1; u32 ref_and_mask_cp1;
u32 ref_and_mask_cp2; u32 ref_and_mask_cp2;
...@@ -41,10 +39,6 @@ struct nbio_hdp_flush_reg { ...@@ -41,10 +39,6 @@ struct nbio_hdp_flush_reg {
u32 ref_and_mask_sdma1; u32 ref_and_mask_sdma1;
}; };
struct nbio_pcie_index_data {
u32 index_offset;
u32 data_offset;
};
/* Register Access Macros */ /* Register Access Macros */
#define SOC15_REG_OFFSET(ip, inst, reg) (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \ #define SOC15_REG_OFFSET(ip, inst, reg) (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
......
...@@ -1314,6 +1314,16 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1314,6 +1314,16 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
uvd_v7_0_vm_reg_wait(ring, data0, data1, mask); uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
} }
static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
struct amdgpu_device *adev = ring->adev;
for (i = 0; i < count; i++)
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
}
static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring) static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
{ {
amdgpu_ring_write(ring, HEVC_ENC_CMD_END); amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
...@@ -1681,7 +1691,7 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = { ...@@ -1681,7 +1691,7 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = { static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_UVD, .type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf, .align_mask = 0xf,
.nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0), .nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false, .support_64bit_ptrs = false,
.vmhub = AMDGPU_MMHUB, .vmhub = AMDGPU_MMHUB,
.get_rptr = uvd_v7_0_ring_get_rptr, .get_rptr = uvd_v7_0_ring_get_rptr,
...@@ -1700,7 +1710,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = { ...@@ -1700,7 +1710,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.emit_hdp_invalidate = uvd_v7_0_ring_emit_hdp_invalidate, .emit_hdp_invalidate = uvd_v7_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v7_0_ring_test_ring, .test_ring = uvd_v7_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib, .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = uvd_v7_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use, .begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use, .end_use = amdgpu_uvd_ring_end_use,
......
...@@ -1077,6 +1077,17 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev, ...@@ -1077,6 +1077,17 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
return 0; return 0;
} }
static void vcn_v1_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
struct amdgpu_device *adev = ring->adev;
for (i = 0; i < count; i++)
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
}
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0", .name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init, .early_init = vcn_v1_0_early_init,
...@@ -1100,7 +1111,7 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { ...@@ -1100,7 +1111,7 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC, .type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf, .align_mask = 0xf,
.nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0), .nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false, .support_64bit_ptrs = false,
.vmhub = AMDGPU_MMHUB, .vmhub = AMDGPU_MMHUB,
.get_rptr = vcn_v1_0_dec_ring_get_rptr, .get_rptr = vcn_v1_0_dec_ring_get_rptr,
...@@ -1118,7 +1129,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { ...@@ -1118,7 +1129,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.emit_hdp_invalidate = vcn_v1_0_dec_ring_emit_hdp_invalidate, .emit_hdp_invalidate = vcn_v1_0_dec_ring_emit_hdp_invalidate,
.test_ring = amdgpu_vcn_dec_ring_test_ring, .test_ring = amdgpu_vcn_dec_ring_test_ring,
.test_ib = amdgpu_vcn_dec_ring_test_ib, .test_ib = amdgpu_vcn_dec_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = vcn_v1_0_ring_insert_nop,
.insert_start = vcn_v1_0_dec_ring_insert_start, .insert_start = vcn_v1_0_dec_ring_insert_start,
.insert_end = vcn_v1_0_dec_ring_insert_end, .insert_end = vcn_v1_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment