Commit 5aa998ba authored by Le Ma's avatar Le Ma Committed by Alex Deucher

drm/amdgpu: add xcc index argument to soc15_grbm_select

To support grbm select for multiple XCD case.

v2: unify naming style
Signed-off-by: default avatarLe Ma <le.ma@amd.com>
Reviewed-by: default avatarHawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 86301129
...@@ -50,12 +50,12 @@ static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, ...@@ -50,12 +50,12 @@ static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid) uint32_t queue, uint32_t vmid)
{ {
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, mec, pipe, queue, vmid); soc15_grbm_select(adev, mec, pipe, queue, vmid, 0);
} }
static void unlock_srbm(struct amdgpu_device *adev) static void unlock_srbm(struct amdgpu_device *adev)
{ {
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
} }
...@@ -700,7 +700,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, ...@@ -700,7 +700,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
*wave_cnt = 0; *wave_cnt = 0;
pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe; pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe; queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0); soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, 0);
reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) + reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
queue_slot); queue_slot);
*wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK; *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
...@@ -772,7 +772,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, ...@@ -772,7 +772,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES); DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
lock_spi_csq_mutexes(adev); lock_spi_csq_mutexes(adev);
soc15_grbm_select(adev, 1, 0, 0, 0); soc15_grbm_select(adev, 1, 0, 0, 0, 0);
/* /*
* Iterate through the shader engines and arrays of the device * Iterate through the shader engines and arrays of the device
...@@ -821,7 +821,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, ...@@ -821,7 +821,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
} }
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
unlock_spi_csq_mutexes(adev); unlock_spi_csq_mutexes(adev);
/* Update the output parameters and return */ /* Update the output parameters and return */
......
...@@ -1831,7 +1831,7 @@ static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, ...@@ -1831,7 +1831,7 @@ static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev, static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q, u32 vm) u32 me, u32 pipe, u32 q, u32 vm)
{ {
soc15_grbm_select(adev, me, pipe, q, vm); soc15_grbm_select(adev, me, pipe, q, vm, 0);
} }
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
...@@ -2324,12 +2324,12 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) ...@@ -2324,12 +2324,12 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
soc15_grbm_select(adev, 0, 0, 0, i); soc15_grbm_select(adev, 0, 0, 0, i, 0);
/* CP and shaders */ /* CP and shaders */
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases); WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
} }
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA /* Initialize all compute VMIDs to have no GDS, GWS, or OA
...@@ -2394,7 +2394,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) ...@@ -2394,7 +2394,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
/* where to put LDS, scratch, GPUVM in FSA64 space */ /* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
soc15_grbm_select(adev, 0, 0, 0, i); soc15_grbm_select(adev, 0, 0, 0, i, 0);
/* CP and shaders */ /* CP and shaders */
if (i == 0) { if (i == 0) {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
...@@ -2416,7 +2416,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) ...@@ -2416,7 +2416,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp); WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
} }
} }
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
...@@ -3540,9 +3540,9 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) ...@@ -3540,9 +3540,9 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
amdgpu_ring_clear_ring(ring); amdgpu_ring_clear_ring(ring);
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_0_kiq_init_register(ring); gfx_v9_0_kiq_init_register(ring);
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
} else { } else {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
...@@ -3551,10 +3551,10 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) ...@@ -3551,10 +3551,10 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
if (amdgpu_sriov_vf(adev) && adev->in_suspend) if (amdgpu_sriov_vf(adev) && adev->in_suspend)
amdgpu_ring_clear_ring(ring); amdgpu_ring_clear_ring(ring);
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_0_mqd_init(ring); gfx_v9_0_mqd_init(ring);
gfx_v9_0_kiq_init_register(ring); gfx_v9_0_kiq_init_register(ring);
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.kiq[0].mqd_backup) if (adev->gfx.kiq[0].mqd_backup)
...@@ -3582,9 +3582,9 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) ...@@ -3582,9 +3582,9 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_0_mqd_init(ring); gfx_v9_0_mqd_init(ring);
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
...@@ -3791,9 +3791,9 @@ static int gfx_v9_0_hw_fini(void *handle) ...@@ -3791,9 +3791,9 @@ static int gfx_v9_0_hw_fini(void *handle)
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me, soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me,
adev->gfx.kiq[0].ring.pipe, adev->gfx.kiq[0].ring.pipe,
adev->gfx.kiq[0].ring.queue, 0); adev->gfx.kiq[0].ring.queue, 0, 0);
gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring); gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring);
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
} }
......
...@@ -761,7 +761,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, ...@@ -761,7 +761,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
for (i = first_vmid; i < last_vmid; i++) { for (i = first_vmid; i < last_vmid; i++) {
data = 0; data = 0;
soc15_grbm_select(adev, 0, 0, 0, i); soc15_grbm_select(adev, 0, 0, 0, i, 0);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE,
...@@ -769,7 +769,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, ...@@ -769,7 +769,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL), data); WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL), data);
} }
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
} }
......
...@@ -658,7 +658,7 @@ static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd ...@@ -658,7 +658,7 @@ static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd
static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q, u32 vm) u32 me, u32 pipe, u32 q, u32 vm)
{ {
soc15_grbm_select(adev, me, pipe, q, vm); soc15_grbm_select(adev, me, pipe, q, vm, 0);
} }
static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
...@@ -926,12 +926,12 @@ static void gfx_v9_4_3_init_compute_vmid(struct amdgpu_device *adev) ...@@ -926,12 +926,12 @@ static void gfx_v9_4_3_init_compute_vmid(struct amdgpu_device *adev)
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
soc15_grbm_select(adev, 0, 0, 0, i); soc15_grbm_select(adev, 0, 0, 0, i, 0);
/* CP and shaders */ /* CP and shaders */
WREG32_SOC15_RLC(GC, 0, regSH_MEM_CONFIG, sh_mem_config); WREG32_SOC15_RLC(GC, 0, regSH_MEM_CONFIG, sh_mem_config);
WREG32_SOC15_RLC(GC, 0, regSH_MEM_BASES, sh_mem_bases); WREG32_SOC15_RLC(GC, 0, regSH_MEM_BASES, sh_mem_bases);
} }
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA /* Initialize all compute VMIDs to have no GDS, GWS, or OA
...@@ -977,7 +977,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) ...@@ -977,7 +977,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
/* where to put LDS, scratch, GPUVM in FSA64 space */ /* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
soc15_grbm_select(adev, 0, 0, 0, i); soc15_grbm_select(adev, 0, 0, 0, i, 0);
/* CP and shaders */ /* CP and shaders */
if (i == 0) { if (i == 0) {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
...@@ -999,7 +999,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) ...@@ -999,7 +999,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
WREG32_SOC15_RLC(GC, 0, regSH_MEM_BASES, tmp); WREG32_SOC15_RLC(GC, 0, regSH_MEM_BASES, tmp);
} }
} }
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
...@@ -1706,19 +1706,19 @@ static int gfx_v9_4_3_kiq_init_queue(struct amdgpu_ring *ring) ...@@ -1706,19 +1706,19 @@ static int gfx_v9_4_3_kiq_init_queue(struct amdgpu_ring *ring)
amdgpu_ring_clear_ring(ring); amdgpu_ring_clear_ring(ring);
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_4_3_kiq_init_register(ring); gfx_v9_4_3_kiq_init_register(ring);
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
} else { } else {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_4_3_mqd_init(ring); gfx_v9_4_3_mqd_init(ring);
gfx_v9_4_3_kiq_init_register(ring); gfx_v9_4_3_kiq_init_register(ring);
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.kiq[0].mqd_backup) if (adev->gfx.kiq[0].mqd_backup)
...@@ -1746,9 +1746,9 @@ static int gfx_v9_4_3_kcq_init_queue(struct amdgpu_ring *ring) ...@@ -1746,9 +1746,9 @@ static int gfx_v9_4_3_kcq_init_queue(struct amdgpu_ring *ring)
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_4_3_mqd_init(ring); gfx_v9_4_3_mqd_init(ring);
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
...@@ -1896,9 +1896,9 @@ static int gfx_v9_4_3_hw_fini(void *handle) ...@@ -1896,9 +1896,9 @@ static int gfx_v9_4_3_hw_fini(void *handle)
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me, soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me,
adev->gfx.kiq[0].ring.pipe, adev->gfx.kiq[0].ring.pipe,
adev->gfx.kiq[0].ring.queue, 0); adev->gfx.kiq[0].ring.queue, 0, 0);
gfx_v9_4_3_kiq_fini_register(&adev->gfx.kiq[0].ring); gfx_v9_4_3_kiq_fini_register(&adev->gfx.kiq[0].ring);
soc15_grbm_select(adev, 0, 0, 0, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
} }
......
...@@ -311,7 +311,7 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev) ...@@ -311,7 +311,7 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
void soc15_grbm_select(struct amdgpu_device *adev, void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid) u32 me, u32 pipe, u32 queue, u32 vmid, int xcc_id)
{ {
u32 grbm_gfx_cntl = 0; u32 grbm_gfx_cntl = 0;
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
...@@ -319,7 +319,7 @@ void soc15_grbm_select(struct amdgpu_device *adev, ...@@ -319,7 +319,7 @@ void soc15_grbm_select(struct amdgpu_device *adev,
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl); WREG32_SOC15_RLC_SHADOW(GC, xcc_id, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
} }
static void soc15_vga_set_state(struct amdgpu_device *adev, bool state) static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
......
...@@ -100,7 +100,7 @@ struct soc15_ras_field_entry { ...@@ -100,7 +100,7 @@ struct soc15_ras_field_entry {
#define SOC15_RAS_REG_FIELD_VAL(val, entry, field) SOC15_REG_FIELD_VAL((val), (entry).field##_count_mask, (entry).field##_count_shift) #define SOC15_RAS_REG_FIELD_VAL(val, entry, field) SOC15_REG_FIELD_VAL((val), (entry).field##_count_mask, (entry).field##_count_shift)
void soc15_grbm_select(struct amdgpu_device *adev, void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid); u32 me, u32 pipe, u32 queue, u32 vmid, int xcc_id);
void soc15_set_virt_ops(struct amdgpu_device *adev); void soc15_set_virt_ops(struct amdgpu_device *adev);
void soc15_program_register_sequence(struct amdgpu_device *adev, void soc15_program_register_sequence(struct amdgpu_device *adev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment