Commit 00f06b24 authored by John Brooks's avatar John Brooks Committed by Alex Deucher

drm/amdgpu: Throttle visible VRAM moves separately

The BO move throttling code is designed to allow VRAM to fill quickly if it
is relatively empty. However, this does not take into account situations
where the visible VRAM is smaller than total VRAM, and total VRAM may not
be close to full but the visible VRAM segment is under pressure. In such
situations, visible VRAM would experience unrestricted swapping and
performance would drop.

Add a separate counter specifically for moves involving visible VRAM, and
check it before moving BOs there.

v2: Only perform calculations for separate counter if visible VRAM is
    smaller than total VRAM. (Michel Dänzer)
v3: [Michel Dänzer]
* Use BO's location rather than the AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
  flag to determine whether to account a move for visible VRAM in most
  cases.
* Use a single

	if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {

  block in amdgpu_cs_get_threshold_for_moves.

Fixes: 95844d20 (drm/amdgpu: throttle buffer migrations at CS using a fixed MBps limit (v2))
Signed-off-by: default avatarJohn Brooks <john@fastquake.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 218b5dcd
...@@ -1117,7 +1117,9 @@ struct amdgpu_cs_parser { ...@@ -1117,7 +1117,9 @@ struct amdgpu_cs_parser {
struct list_head validated; struct list_head validated;
struct dma_fence *fence; struct dma_fence *fence;
uint64_t bytes_moved_threshold; uint64_t bytes_moved_threshold;
uint64_t bytes_moved_vis_threshold;
uint64_t bytes_moved; uint64_t bytes_moved;
uint64_t bytes_moved_vis;
struct amdgpu_bo_list_entry *evictable; struct amdgpu_bo_list_entry *evictable;
/* user fence */ /* user fence */
...@@ -1555,6 +1557,7 @@ struct amdgpu_device { ...@@ -1555,6 +1557,7 @@ struct amdgpu_device {
spinlock_t lock; spinlock_t lock;
s64 last_update_us; s64 last_update_us;
s64 accum_us; /* accumulated microseconds */ s64 accum_us; /* accumulated microseconds */
s64 accum_us_vis; /* for visible VRAM */
u32 log2_max_MBps; u32 log2_max_MBps;
} mm_stats; } mm_stats;
...@@ -1846,7 +1849,8 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev); ...@@ -1846,7 +1849,8 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_need_post(struct amdgpu_device *adev); bool amdgpu_need_post(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
......
...@@ -223,10 +223,11 @@ static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) ...@@ -223,10 +223,11 @@ static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
* ticks. The accumulated microseconds (us) are converted to bytes and * ticks. The accumulated microseconds (us) are converted to bytes and
* returned. * returned.
*/ */
static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
u64 *max_bytes,
u64 *max_vis_bytes)
{ {
s64 time_us, increment_us; s64 time_us, increment_us;
u64 max_bytes;
u64 free_vram, total_vram, used_vram; u64 free_vram, total_vram, used_vram;
/* Allow a maximum of 200 accumulated ms. This is basically per-IB /* Allow a maximum of 200 accumulated ms. This is basically per-IB
...@@ -238,8 +239,11 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) ...@@ -238,8 +239,11 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
*/ */
const s64 us_upper_bound = 200000; const s64 us_upper_bound = 200000;
if (!adev->mm_stats.log2_max_MBps) if (!adev->mm_stats.log2_max_MBps) {
return 0; *max_bytes = 0;
*max_vis_bytes = 0;
return;
}
total_vram = adev->mc.real_vram_size - adev->vram_pin_size; total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
used_vram = atomic64_read(&adev->vram_usage); used_vram = atomic64_read(&adev->vram_usage);
...@@ -280,23 +284,45 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) ...@@ -280,23 +284,45 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
} }
/* This returns 0 if the driver is in debt to disallow (optional) /* This is set to 0 if the driver is in debt to disallow (optional)
* buffer moves. * buffer moves.
*/ */
max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
/* Do the same for visible VRAM if half of it is free */
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
u64 total_vis_vram = adev->mc.visible_vram_size;
u64 used_vis_vram = atomic64_read(&adev->vram_vis_usage);
if (used_vis_vram < total_vis_vram) {
u64 free_vis_vram = total_vis_vram - used_vis_vram;
adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
increment_us, us_upper_bound);
if (free_vis_vram >= total_vis_vram / 2)
adev->mm_stats.accum_us_vis =
max(bytes_to_us(adev, free_vis_vram / 2),
adev->mm_stats.accum_us_vis);
}
*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
} else {
*max_vis_bytes = 0;
}
spin_unlock(&adev->mm_stats.lock); spin_unlock(&adev->mm_stats.lock);
return max_bytes;
} }
/* Report how many bytes have really been moved for the last command /* Report how many bytes have really been moved for the last command
* submission. This can result in a debt that can stop buffer migrations * submission. This can result in a debt that can stop buffer migrations
* temporarily. * temporarily.
*/ */
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes) void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes)
{ {
spin_lock(&adev->mm_stats.lock); spin_lock(&adev->mm_stats.lock);
adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
spin_unlock(&adev->mm_stats.lock); spin_unlock(&adev->mm_stats.lock);
} }
...@@ -304,7 +330,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, ...@@ -304,7 +330,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo) struct amdgpu_bo *bo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved; u64 initial_bytes_moved, bytes_moved;
uint32_t domain; uint32_t domain;
int r; int r;
...@@ -314,17 +340,35 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, ...@@ -314,17 +340,35 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
/* Don't move this buffer if we have depleted our allowance /* Don't move this buffer if we have depleted our allowance
* to move it. Don't move anything if the threshold is zero. * to move it. Don't move anything if the threshold is zero.
*/ */
if (p->bytes_moved < p->bytes_moved_threshold) if (p->bytes_moved < p->bytes_moved_threshold) {
domain = bo->prefered_domains; if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
else (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
/* And don't move a CPU_ACCESS_REQUIRED BO to limited
* visible VRAM if we've depleted our allowance to do
* that.
*/
if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
domain = bo->prefered_domains;
else
domain = bo->allowed_domains;
} else {
domain = bo->prefered_domains;
}
} else {
domain = bo->allowed_domains; domain = bo->allowed_domains;
}
retry: retry:
amdgpu_ttm_placement_from_domain(bo, domain); amdgpu_ttm_placement_from_domain(bo, domain);
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved; initial_bytes_moved;
p->bytes_moved += bytes_moved;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
p->bytes_moved_vis += bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains; domain = bo->allowed_domains;
...@@ -350,7 +394,8 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, ...@@ -350,7 +394,8 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *candidate = p->evictable; struct amdgpu_bo_list_entry *candidate = p->evictable;
struct amdgpu_bo *bo = candidate->robj; struct amdgpu_bo *bo = candidate->robj;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved; u64 initial_bytes_moved, bytes_moved;
bool update_bytes_moved_vis;
uint32_t other; uint32_t other;
/* If we reached our current BO we can forget it */ /* If we reached our current BO we can forget it */
...@@ -370,10 +415,17 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, ...@@ -370,10 +415,17 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */ /* Good we can try to move this BO somewhere else */
amdgpu_ttm_placement_from_domain(bo, other); amdgpu_ttm_placement_from_domain(bo, other);
update_bytes_moved_vis =
adev->mc.visible_vram_size < adev->mc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT;
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved; initial_bytes_moved;
p->bytes_moved += bytes_moved;
if (update_bytes_moved_vis)
p->bytes_moved_vis += bytes_moved;
if (unlikely(r)) if (unlikely(r))
break; break;
...@@ -554,8 +606,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -554,8 +606,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_splice(&need_pages, &p->validated); list_splice(&need_pages, &p->validated);
} }
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
&p->bytes_moved_vis_threshold);
p->bytes_moved = 0; p->bytes_moved = 0;
p->bytes_moved_vis = 0;
p->evictable = list_last_entry(&p->validated, p->evictable = list_last_entry(&p->validated,
struct amdgpu_bo_list_entry, struct amdgpu_bo_list_entry,
tv.head); tv.head);
...@@ -579,8 +633,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -579,8 +633,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto error_validate; goto error_validate;
} }
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved); amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
p->bytes_moved_vis);
fpriv->vm.last_eviction_counter = fpriv->vm.last_eviction_counter =
atomic64_read(&p->adev->num_evictions); atomic64_read(&p->adev->num_evictions);
......
...@@ -322,7 +322,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, ...@@ -322,7 +322,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
enum ttm_bo_type type; enum ttm_bo_type type;
unsigned long page_align; unsigned long page_align;
u64 initial_bytes_moved; u64 initial_bytes_moved, bytes_moved;
size_t acc_size; size_t acc_size;
int r; int r;
...@@ -398,8 +398,14 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, ...@@ -398,8 +398,14 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL, &bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv, &amdgpu_ttm_bo_destroy); acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
amdgpu_cs_report_moved_bytes(adev, bytes_moved = atomic64_read(&adev->num_bytes_moved) -
atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved); initial_bytes_moved;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
else
amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment