Commit dc947770 authored by Roger He's avatar Roger He Committed by Alex Deucher

drm/ttm: enable swapout for reserved BOs during allocation

if the bo shares same reservation object then not lock it again
at swapout time to make it possible to swap out.

v2: refine the commmit message
Reviewed-by: default avatarThomas Hellström <thellstrom@vmware.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChuming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarRoger He <Hongbo.He@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent d5769ba3
...@@ -1699,19 +1699,21 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_release); ...@@ -1699,19 +1699,21 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
* A buffer object shrink method that tries to swap out the first * A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list. * buffer object on the bo_global::swap_lru list.
*/ */
int ttm_bo_swapout(struct ttm_bo_global *glob) int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
{ {
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
int ret = -EBUSY; int ret = -EBUSY;
bool locked;
unsigned i; unsigned i;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) { list_for_each_entry(bo, &glob->swap_lru[i], swap) {
ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY; if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) {
if (!ret) ret = 0;
break; break;
} }
}
if (!ret) if (!ret)
break; break;
} }
...@@ -1786,7 +1788,12 @@ EXPORT_SYMBOL(ttm_bo_swapout); ...@@ -1786,7 +1788,12 @@ EXPORT_SYMBOL(ttm_bo_swapout);
void ttm_bo_swapout_all(struct ttm_bo_device *bdev) void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
{ {
while (ttm_bo_swapout(bdev->glob) == 0) struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
; ;
} }
EXPORT_SYMBOL(ttm_bo_swapout_all); EXPORT_SYMBOL(ttm_bo_swapout_all);
......
...@@ -211,7 +211,7 @@ static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, ...@@ -211,7 +211,7 @@ static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
*/ */
static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
uint64_t extra) uint64_t extra, struct ttm_operation_ctx *ctx)
{ {
int ret; int ret;
...@@ -219,7 +219,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, ...@@ -219,7 +219,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
while (ttm_zones_above_swap_target(glob, from_wq, extra)) { while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
spin_unlock(&glob->lock); spin_unlock(&glob->lock);
ret = ttm_bo_swapout(glob->bo_glob); ret = ttm_bo_swapout(glob->bo_glob, ctx);
spin_lock(&glob->lock); spin_lock(&glob->lock);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
break; break;
...@@ -230,10 +230,14 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, ...@@ -230,10 +230,14 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
static void ttm_shrink_work(struct work_struct *work) static void ttm_shrink_work(struct work_struct *work)
{ {
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
struct ttm_mem_global *glob = struct ttm_mem_global *glob =
container_of(work, struct ttm_mem_global, work); container_of(work, struct ttm_mem_global, work);
ttm_shrink(glob, true, 0ULL); ttm_shrink(glob, true, 0ULL, &ctx);
} }
static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
...@@ -520,7 +524,7 @@ static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, ...@@ -520,7 +524,7 @@ static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
return -ENOMEM; return -ENOMEM;
if (unlikely(count-- == 0)) if (unlikely(count-- == 0))
return -ENOMEM; return -ENOMEM;
ttm_shrink(glob, false, memory + (memory >> 2) + 16); ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
} }
return 0; return 0;
......
...@@ -752,7 +752,8 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, ...@@ -752,7 +752,8 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
const char __user *wbuf, char __user *rbuf, const char __user *wbuf, char __user *rbuf,
size_t count, loff_t *f_pos, bool write); size_t count, loff_t *f_pos, bool write);
int ttm_bo_swapout(struct ttm_bo_global *glob); int ttm_bo_swapout(struct ttm_bo_global *glob,
struct ttm_operation_ctx *ctx);
void ttm_bo_swapout_all(struct ttm_bo_device *bdev); void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment