Commit 9ecefb19 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: cleanup IB pool handling a bit

Fix the coding style, move and rename the definitions to
better match what they are supposed to be doing.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e2085864
...@@ -204,8 +204,6 @@ extern int amdgpu_cik_support; ...@@ -204,8 +204,6 @@ extern int amdgpu_cik_support;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
#define AMDGPU_IB_POOL_SIZE 16
#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
#define AMDGPUFB_CONN_LIMIT 4 #define AMDGPUFB_CONN_LIMIT 4
#define AMDGPU_BIOS_NUM_SCRATCH 16 #define AMDGPU_BIOS_NUM_SCRATCH 16
...@@ -402,13 +400,6 @@ struct amdgpu_sa_bo { ...@@ -402,13 +400,6 @@ struct amdgpu_sa_bo {
int amdgpu_fence_slab_init(void); int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void); void amdgpu_fence_slab_fini(void);
enum amdgpu_ib_pool_type {
AMDGPU_IB_POOL_NORMAL = 0,
AMDGPU_IB_POOL_VM,
AMDGPU_IB_POOL_DIRECT,
AMDGPU_IB_POOL_MAX
};
/* /*
* IRQS. * IRQS.
*/ */
...@@ -866,7 +857,7 @@ struct amdgpu_device { ...@@ -866,7 +857,7 @@ struct amdgpu_device {
unsigned num_rings; unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
bool ib_pool_ready; bool ib_pool_ready;
struct amdgpu_sa_manager ring_tmp_bo[AMDGPU_IB_POOL_MAX]; struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX];
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
/* interrupts */ /* interrupts */
......
...@@ -924,7 +924,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -924,7 +924,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
ring = to_amdgpu_ring(entity->rq->sched); ring = to_amdgpu_ring(entity->rq->sched);
r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ? r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
chunk_ib->ib_bytes : 0, AMDGPU_IB_POOL_NORMAL, ib); chunk_ib->ib_bytes : 0,
AMDGPU_IB_POOL_DELAYED, ib);
if (r) { if (r) {
DRM_ERROR("Failed to get ib !\n"); DRM_ERROR("Failed to get ib !\n");
return r; return r;
......
...@@ -61,14 +61,13 @@ ...@@ -61,14 +61,13 @@
* Returns 0 on success, error on failure. * Returns 0 on success, error on failure.
*/ */
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, unsigned size, enum amdgpu_ib_pool_type pool_type,
enum amdgpu_ib_pool_type pool_type, struct amdgpu_ib *ib)
struct amdgpu_ib *ib)
{ {
int r; int r;
if (size) { if (size) {
r = amdgpu_sa_bo_new(&adev->ring_tmp_bo[pool_type], r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
&ib->sa_bo, size, 256); &ib->sa_bo, size, 256);
if (r) { if (r) {
dev_err(adev->dev, "failed to get a new IB (%d)\n", r); dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
...@@ -305,30 +304,32 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -305,30 +304,32 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
*/ */
int amdgpu_ib_pool_init(struct amdgpu_device *adev) int amdgpu_ib_pool_init(struct amdgpu_device *adev)
{ {
int r, i;
unsigned size; unsigned size;
int r, i;
if (adev->ib_pool_ready) { if (adev->ib_pool_ready)
return 0; return 0;
}
for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) { for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
if (i == AMDGPU_IB_POOL_DIRECT) if (i == AMDGPU_IB_POOL_DIRECT)
size = PAGE_SIZE * 2; size = PAGE_SIZE * 2;
else else
size = AMDGPU_IB_POOL_SIZE*64*1024; size = AMDGPU_IB_POOL_SIZE;
r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo[i],
size, r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
AMDGPU_GPU_PAGE_SIZE, size, AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT); AMDGPU_GEM_DOMAIN_GTT);
if (r) { if (r)
for (i--; i >= 0; i--) goto error;
amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo[i]);
return r;
}
} }
adev->ib_pool_ready = true; adev->ib_pool_ready = true;
return 0; return 0;
error:
while (i--)
amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
return r;
} }
/** /**
...@@ -343,11 +344,12 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev) ...@@ -343,11 +344,12 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
{ {
int i; int i;
if (adev->ib_pool_ready) { if (!adev->ib_pool_ready)
for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) return;
amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo[i]);
adev->ib_pool_ready = false; for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
} amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
adev->ib_pool_ready = false;
} }
/** /**
...@@ -362,9 +364,9 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev) ...@@ -362,9 +364,9 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
*/ */
int amdgpu_ib_ring_tests(struct amdgpu_device *adev) int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{ {
unsigned i;
int r, ret = 0;
long tmo_gfx, tmo_mm; long tmo_gfx, tmo_mm;
int r, ret = 0;
unsigned i;
tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT; tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
...@@ -442,15 +444,16 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data) ...@@ -442,15 +444,16 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
seq_printf(m, "-------------------- NORMAL -------------------- \n"); seq_printf(m, "--------------------- DELAYED --------------------- \n");
amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_NORMAL], m); amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
seq_printf(m, "---------------------- VM ---------------------- \n"); m);
amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_VM], m); seq_printf(m, "-------------------- IMMEDIATE -------------------- \n");
seq_printf(m, "-------------------- DIRECT--------------------- \n"); amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_DIRECT], m); m);
seq_printf(m, "--------------------- DIRECT ---------------------- \n");
amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
return 0; return 0;
} }
static const struct drm_info_list amdgpu_debugfs_sa_list[] = { static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
......
...@@ -50,6 +50,8 @@ ...@@ -50,6 +50,8 @@
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched) #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
enum amdgpu_ring_type { enum amdgpu_ring_type {
AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX, AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX,
AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE, AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE,
...@@ -63,6 +65,17 @@ enum amdgpu_ring_type { ...@@ -63,6 +65,17 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_KIQ AMDGPU_RING_TYPE_KIQ
}; };
enum amdgpu_ib_pool_type {
/* Normal submissions to the top of the pipeline. */
AMDGPU_IB_POOL_DELAYED,
/* Immediate submissions to the bottom of the pipeline. */
AMDGPU_IB_POOL_IMMEDIATE,
/* Direct submission to the ring buffer during init and reset. */
AMDGPU_IB_POOL_DIRECT,
AMDGPU_IB_POOL_MAX
};
struct amdgpu_device; struct amdgpu_device;
struct amdgpu_ring; struct amdgpu_ring;
struct amdgpu_ib; struct amdgpu_ib;
......
...@@ -44,7 +44,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) ...@@ -44,7 +44,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
/* Number of tests = /* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size * (Total GTT - IB pool - writeback page - ring buffers) / test size
*/ */
n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024; n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
if (adev->rings[i]) if (adev->rings[i])
n -= adev->rings[i]->ring_size; n -= adev->rings[i]->ring_size;
......
...@@ -333,7 +333,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, ...@@ -333,7 +333,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
num_bytes = num_pages * 8; num_bytes = num_pages * 8;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_NORMAL, &job); AMDGPU_IB_POOL_DELAYED, &job);
if (r) if (r)
return r; return r;
...@@ -2122,6 +2122,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, ...@@ -2122,6 +2122,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
struct dma_fence **fence, bool direct_submit, struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush, bool tmz) bool vm_needs_flush, bool tmz)
{ {
enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED;
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job; struct amdgpu_job *job;
...@@ -2139,8 +2141,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, ...@@ -2139,8 +2141,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
num_loops = DIV_ROUND_UP(byte_count, max_bytes); num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
direct_submit ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
if (r) if (r)
return r; return r;
...@@ -2229,7 +2230,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, ...@@ -2229,7 +2230,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
/* for IB padding */ /* for IB padding */
num_dw += 64; num_dw += 64;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_NORMAL, &job); r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
&job);
if (r) if (r)
return r; return r;
......
...@@ -1056,8 +1056,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -1056,8 +1056,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err; goto err;
} }
r = amdgpu_job_alloc_with_ib(adev, 64, r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job); AMDGPU_IB_POOL_DELAYED, &job);
if (r) if (r)
goto err; goto err;
......
...@@ -447,7 +447,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -447,7 +447,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
int i, r; int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
AMDGPU_IB_POOL_DIRECT, &job); AMDGPU_IB_POOL_DIRECT, &job);
if (r) if (r)
return r; return r;
...@@ -526,7 +526,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -526,7 +526,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
int i, r; int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job); direct ? AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED, &job);
if (r) if (r)
return r; return r;
......
...@@ -61,11 +61,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, ...@@ -61,11 +61,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
struct dma_resv *resv, struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode) enum amdgpu_sync_mode sync_mode)
{ {
enum amdgpu_ib_pool_type pool = p->direct ? AMDGPU_IB_POOL_IMMEDIATE :
AMDGPU_IB_POOL_DELAYED;
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r; int r;
r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job);
p->direct ? AMDGPU_IB_POOL_VM : AMDGPU_IB_POOL_NORMAL, &p->job);
if (r) if (r)
return r; return r;
...@@ -199,6 +200,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, ...@@ -199,6 +200,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t addr, unsigned count, uint32_t incr, uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags) uint64_t flags)
{ {
enum amdgpu_ib_pool_type pool = p->direct ? AMDGPU_IB_POOL_IMMEDIATE :
AMDGPU_IB_POOL_DELAYED;
unsigned int i, ndw, nptes; unsigned int i, ndw, nptes;
uint64_t *pte; uint64_t *pte;
int r; int r;
...@@ -224,8 +227,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, ...@@ -224,8 +227,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW); ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool,
p->direct ? AMDGPU_IB_POOL_VM : AMDGPU_IB_POOL_NORMAL, &p->job); &p->job);
if (r) if (r)
return r; return r;
......
...@@ -372,7 +372,8 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ...@@ -372,7 +372,8 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* translation. Avoid this by doing the invalidation from the SDMA * translation. Avoid this by doing the invalidation from the SDMA
* itself. * itself.
*/ */
r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_VM, &job); r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
&job);
if (r) if (r)
goto error_alloc; goto error_alloc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment