Commit 38392633 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: drop bo->glob

The pointer is available as bo->bdev->glob as well.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 886a16b3
......@@ -458,7 +458,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
trace_dma_fence_emit(&release->base);
driver = bdev->driver;
glob = bo->glob;
glob = bdev->glob;
spin_lock(&glob->lru_lock);
......
......@@ -149,7 +149,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
atomic_dec(&bo->bdev->glob->bo_count);
dma_fence_put(bo->moving);
reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
......@@ -174,7 +174,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
if (bo->ttm && !(bo->ttm->page_flags &
(TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap,
&bo->glob->swap_lru[bo->priority]);
&bdev->glob->swap_lru[bo->priority]);
kref_get(&bo->list_kref);
}
}
......@@ -205,9 +205,11 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
{
spin_lock(&bo->glob->lru_lock);
struct ttm_bo_global *glob = bo->bdev->glob;
spin_lock(&glob->lru_lock);
ttm_bo_del_from_lru(bo);
spin_unlock(&bo->glob->lru_lock);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
......@@ -226,7 +228,7 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_bo_global *glob = bdev->glob;
int ret = 0;
uint32_t page_flags = 0;
......@@ -429,7 +431,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_bo_global *glob = bdev->glob;
int ret;
ret = ttm_bo_individualize_resv(bo);
......@@ -500,7 +502,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_gpu,
bool unlock_resv)
{
struct ttm_bo_global *glob = bo->glob;
struct ttm_bo_global *glob = bo->bdev->glob;
struct reservation_object *resv;
int ret;
......@@ -1191,7 +1193,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->io_reserve_lru);
mutex_init(&bo->wu_mutex);
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
bo->num_pages = num_pages;
bo->mem.size = num_pages << PAGE_SHIFT;
......@@ -1213,7 +1214,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->resv = &bo->ttm_resv;
}
reservation_object_init(&bo->ttm_resv);
atomic_inc(&bo->glob->bo_count);
atomic_inc(&bo->bdev->glob->bo_count);
drm_vma_node_reset(&bo->vma_node);
bo->priority = 0;
......@@ -1246,9 +1247,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
}
if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
spin_lock(&bo->glob->lru_lock);
spin_lock(&bdev->glob->lru_lock);
ttm_bo_add_to_lru(bo);
spin_unlock(&bo->glob->lru_lock);
spin_unlock(&bdev->glob->lru_lock);
}
return ret;
......
......@@ -470,7 +470,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
atomic_inc(&bo->glob->bo_count);
atomic_inc(&bo->bdev->glob->bo_count);
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
......
......@@ -62,7 +62,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
return;
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
glob = entry->bo->bdev->glob;
spin_lock(&glob->lru_lock);
list_for_each_entry(entry, list, head) {
......@@ -102,7 +102,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
return 0;
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
glob = entry->bo->bdev->glob;
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
......@@ -194,7 +194,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
bdev = bo->bdev;
driver = bdev->driver;
glob = bo->glob;
glob = bo->bdev->glob;
spin_lock(&glob->lru_lock);
......
......@@ -41,6 +41,8 @@
#include <linux/bitmap.h>
#include <linux/reservation.h>
struct ttm_bo_global;
struct ttm_bo_device;
struct drm_mm_node;
......@@ -169,7 +171,6 @@ struct ttm_buffer_object {
* Members constant at init.
*/
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *);
......
......@@ -956,9 +956,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
spin_lock(&bo->glob->lru_lock);
spin_lock(&bo->bdev->glob->lru_lock);
ttm_bo_add_to_lru(bo);
spin_unlock(&bo->glob->lru_lock);
spin_unlock(&bo->bdev->glob->lru_lock);
}
reservation_object_unlock(bo->resv);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment