Commit 02376d82 authored by Michel Dänzer's avatar Michel Dänzer Committed by Alex Deucher

drm/radeon: Allow write-combined CPU mappings of BOs in GTT (v2)

v2: fix rebase onto drm-fixes
Signed-off-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 77497f27
...@@ -4676,7 +4676,7 @@ static int cik_mec_init(struct radeon_device *rdev) ...@@ -4676,7 +4676,7 @@ static int cik_mec_init(struct radeon_device *rdev)
r = radeon_bo_create(rdev, r = radeon_bo_create(rdev,
rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
PAGE_SIZE, true, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, NULL, RADEON_GEM_DOMAIN_GTT, 0, NULL,
&rdev->mec.hpd_eop_obj); &rdev->mec.hpd_eop_obj);
if (r) { if (r) {
dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r); dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
...@@ -4846,7 +4846,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) ...@@ -4846,7 +4846,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
r = radeon_bo_create(rdev, r = radeon_bo_create(rdev,
sizeof(struct bonaire_mqd), sizeof(struct bonaire_mqd),
PAGE_SIZE, true, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, NULL, RADEON_GEM_DOMAIN_GTT, 0, NULL,
&rdev->ring[idx].mqd_obj); &rdev->ring[idx].mqd_obj);
if (r) { if (r) {
dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r); dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
......
...@@ -771,7 +771,8 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, ...@@ -771,7 +771,8 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
trace_radeon_vm_set_page(pe, addr, count, incr, flags); trace_radeon_vm_set_page(pe, addr, count, incr, flags);
if (flags == R600_PTE_GART) { /* XXX: How to distinguish between GART and other system memory pages? */
if (flags & R600_PTE_SYSTEM) {
uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
while (count) { while (count) {
unsigned bytes = count * 8; unsigned bytes = count * 8;
......
...@@ -4022,7 +4022,8 @@ int sumo_rlc_init(struct radeon_device *rdev) ...@@ -4022,7 +4022,8 @@ int sumo_rlc_init(struct radeon_device *rdev)
/* save restore block */ /* save restore block */
if (rdev->rlc.save_restore_obj == NULL) { if (rdev->rlc.save_restore_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj); RADEON_GEM_DOMAIN_VRAM, 0, NULL,
&rdev->rlc.save_restore_obj);
if (r) { if (r) {
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
return r; return r;
...@@ -4100,7 +4101,8 @@ int sumo_rlc_init(struct radeon_device *rdev) ...@@ -4100,7 +4101,8 @@ int sumo_rlc_init(struct radeon_device *rdev)
if (rdev->rlc.clear_state_obj == NULL) { if (rdev->rlc.clear_state_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); RADEON_GEM_DOMAIN_VRAM, 0, NULL,
&rdev->rlc.clear_state_obj);
if (r) { if (r) {
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
sumo_rlc_fini(rdev); sumo_rlc_fini(rdev);
...@@ -4174,8 +4176,10 @@ int sumo_rlc_init(struct radeon_device *rdev) ...@@ -4174,8 +4176,10 @@ int sumo_rlc_init(struct radeon_device *rdev)
if (rdev->rlc.cp_table_size) { if (rdev->rlc.cp_table_size) {
if (rdev->rlc.cp_table_obj == NULL) { if (rdev->rlc.cp_table_obj == NULL) {
r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true, r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj); PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
&rdev->rlc.cp_table_obj);
if (r) { if (r) {
dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r); dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
sumo_rlc_fini(rdev); sumo_rlc_fini(rdev);
......
...@@ -1338,7 +1338,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev) ...@@ -1338,7 +1338,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
if (rdev->vram_scratch.robj == NULL) { if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
NULL, &rdev->vram_scratch.robj); 0, NULL, &rdev->vram_scratch.robj);
if (r) { if (r) {
return r; return r;
} }
...@@ -3226,7 +3226,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev) ...@@ -3226,7 +3226,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
if (rdev->ih.ring_obj == NULL) { if (rdev->ih.ring_obj == NULL) {
r = radeon_bo_create(rdev, rdev->ih.ring_size, r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, RADEON_GEM_DOMAIN_GTT, 0,
NULL, &rdev->ih.ring_obj); NULL, &rdev->ih.ring_obj);
if (r) { if (r) {
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
......
...@@ -468,6 +468,7 @@ struct radeon_bo { ...@@ -468,6 +468,7 @@ struct radeon_bo {
struct ttm_placement placement; struct ttm_placement placement;
struct ttm_buffer_object tbo; struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap; struct ttm_bo_kmap_obj kmap;
u32 flags;
unsigned pin_count; unsigned pin_count;
void *kptr; void *kptr;
u32 tiling_flags; u32 tiling_flags;
...@@ -548,7 +549,7 @@ int radeon_gem_init(struct radeon_device *rdev); ...@@ -548,7 +549,7 @@ int radeon_gem_init(struct radeon_device *rdev);
void radeon_gem_fini(struct radeon_device *rdev); void radeon_gem_fini(struct radeon_device *rdev);
int radeon_gem_object_create(struct radeon_device *rdev, int size, int radeon_gem_object_create(struct radeon_device *rdev, int size,
int alignment, int initial_domain, int alignment, int initial_domain,
bool discardable, bool kernel, u32 flags, bool discardable, bool kernel,
struct drm_gem_object **obj); struct drm_gem_object **obj);
int radeon_mode_dumb_create(struct drm_file *file_priv, int radeon_mode_dumb_create(struct drm_file *file_priv,
......
...@@ -97,7 +97,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, ...@@ -97,7 +97,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
int time; int time;
n = RADEON_BENCHMARK_ITERATIONS; n = RADEON_BENCHMARK_ITERATIONS;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj); r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
...@@ -109,7 +109,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, ...@@ -109,7 +109,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj); r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
......
...@@ -385,7 +385,8 @@ int radeon_wb_init(struct radeon_device *rdev) ...@@ -385,7 +385,8 @@ int radeon_wb_init(struct radeon_device *rdev)
if (rdev->wb.wb_obj == NULL) { if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj); RADEON_GEM_DOMAIN_GTT, 0, NULL,
&rdev->wb.wb_obj);
if (r) { if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r; return r;
......
...@@ -127,7 +127,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, ...@@ -127,7 +127,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
aligned_size = ALIGN(size, PAGE_SIZE); aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0, ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM, RADEON_GEM_DOMAIN_VRAM,
false, true, 0, false, true,
&gobj); &gobj);
if (ret) { if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n", printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
......
...@@ -128,7 +128,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) ...@@ -128,7 +128,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
if (rdev->gart.robj == NULL) { if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size, r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
NULL, &rdev->gart.robj); 0, NULL, &rdev->gart.robj);
if (r) { if (r) {
return r; return r;
} }
......
...@@ -42,7 +42,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj) ...@@ -42,7 +42,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
int radeon_gem_object_create(struct radeon_device *rdev, int size, int radeon_gem_object_create(struct radeon_device *rdev, int size,
int alignment, int initial_domain, int alignment, int initial_domain,
bool discardable, bool kernel, u32 flags, bool discardable, bool kernel,
struct drm_gem_object **obj) struct drm_gem_object **obj)
{ {
struct radeon_bo *robj; struct radeon_bo *robj;
...@@ -64,7 +64,8 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, ...@@ -64,7 +64,8 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
} }
retry: retry:
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
flags, NULL, &robj);
if (r) { if (r) {
if (r != -ERESTARTSYS) { if (r != -ERESTARTSYS) {
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
...@@ -252,8 +253,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -252,8 +253,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */ /* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE); args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment, r = radeon_gem_object_create(rdev, args->size, args->alignment,
args->initial_domain, false, args->initial_domain, args->flags,
false, &gobj); false, false, &gobj);
if (r) { if (r) {
up_read(&rdev->exclusive_lock); up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r); r = radeon_gem_handle_lockup(rdev, r);
...@@ -461,11 +462,6 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -461,11 +462,6 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
args->operation = RADEON_VA_RESULT_ERROR; args->operation = RADEON_VA_RESULT_ERROR;
return -EINVAL; return -EINVAL;
} }
if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
args->operation = RADEON_VA_RESULT_ERROR;
return -EINVAL;
}
switch (args->operation) { switch (args->operation) {
case RADEON_VA_MAP: case RADEON_VA_MAP:
...@@ -572,7 +568,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv, ...@@ -572,7 +568,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
args->size = ALIGN(args->size, PAGE_SIZE); args->size = ALIGN(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, 0, r = radeon_gem_object_create(rdev, args->size, 0,
RADEON_GEM_DOMAIN_VRAM, RADEON_GEM_DOMAIN_VRAM, 0,
false, ttm_bo_type_device, false, ttm_bo_type_device,
&gobj); &gobj);
if (r) if (r)
......
...@@ -114,15 +114,23 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) ...@@ -114,15 +114,23 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM; TTM_PL_FLAG_VRAM;
if (domain & RADEON_GEM_DOMAIN_GTT) { if (domain & RADEON_GEM_DOMAIN_GTT) {
if (rbo->rdev->flags & RADEON_IS_AGP) { if (rbo->flags & RADEON_GEM_GTT_UC) {
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT; rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT;
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
(rbo->rdev->flags & RADEON_IS_AGP)) {
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_TT;
} else { } else {
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
} }
} }
if (domain & RADEON_GEM_DOMAIN_CPU) { if (domain & RADEON_GEM_DOMAIN_CPU) {
if (rbo->rdev->flags & RADEON_IS_AGP) { if (rbo->flags & RADEON_GEM_GTT_UC) {
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM; rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM;
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
rbo->rdev->flags & RADEON_IS_AGP) {
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_SYSTEM;
} else { } else {
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
} }
...@@ -146,7 +154,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) ...@@ -146,7 +154,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
int radeon_bo_create(struct radeon_device *rdev, int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain, unsigned long size, int byte_align, bool kernel, u32 domain,
struct sg_table *sg, struct radeon_bo **bo_ptr) u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr)
{ {
struct radeon_bo *bo; struct radeon_bo *bo;
enum ttm_bo_type type; enum ttm_bo_type type;
...@@ -183,6 +191,12 @@ int radeon_bo_create(struct radeon_device *rdev, ...@@ -183,6 +191,12 @@ int radeon_bo_create(struct radeon_device *rdev,
bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_GTT |
RADEON_GEM_DOMAIN_CPU); RADEON_GEM_DOMAIN_CPU);
bo->flags = flags;
/* PCI GART is always snooped */
if (!(rdev->flags & RADEON_IS_PCIE))
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
radeon_ttm_placement_from_domain(bo, domain); radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */ /* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock); down_read(&rdev->pm.mclk_lock);
......
...@@ -124,7 +124,7 @@ extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, ...@@ -124,7 +124,7 @@ extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
extern int radeon_bo_create(struct radeon_device *rdev, extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, unsigned long size, int byte_align,
bool kernel, u32 domain, bool kernel, u32 domain, u32 flags,
struct sg_table *sg, struct sg_table *sg,
struct radeon_bo **bo_ptr); struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
...@@ -170,7 +170,8 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo) ...@@ -170,7 +170,8 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev, extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager, struct radeon_sa_manager *sa_manager,
unsigned size, u32 align, u32 domain); unsigned size, u32 align, u32 domain,
u32 flags);
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev, extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager); struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev, extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
......
...@@ -65,7 +65,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -65,7 +65,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
int ret; int ret;
ret = radeon_bo_create(rdev, size, PAGE_SIZE, false, ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
RADEON_GEM_DOMAIN_GTT, sg, &bo); RADEON_GEM_DOMAIN_GTT, 0, sg, &bo);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
......
...@@ -204,7 +204,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev) ...@@ -204,7 +204,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
RADEON_IB_POOL_SIZE*64*1024, RADEON_IB_POOL_SIZE*64*1024,
RADEON_GPU_PAGE_SIZE, RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_GTT); RADEON_GEM_DOMAIN_GTT, 0);
if (r) { if (r) {
return r; return r;
} }
...@@ -640,7 +640,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig ...@@ -640,7 +640,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
/* Allocate ring buffer */ /* Allocate ring buffer */
if (ring->ring_obj == NULL) { if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, RADEON_GEM_DOMAIN_GTT, 0,
NULL, &ring->ring_obj); NULL, &ring->ring_obj);
if (r) { if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r); dev_err(rdev->dev, "(%d) ring create failed\n", r);
......
...@@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager); ...@@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
int radeon_sa_bo_manager_init(struct radeon_device *rdev, int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager, struct radeon_sa_manager *sa_manager,
unsigned size, u32 align, u32 domain) unsigned size, u32 align, u32 domain, u32 flags)
{ {
int i, r; int i, r;
...@@ -65,7 +65,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev, ...@@ -65,7 +65,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
} }
r = radeon_bo_create(rdev, size, align, true, r = radeon_bo_create(rdev, size, align, true,
domain, NULL, &sa_manager->bo); domain, flags, NULL, &sa_manager->bo);
if (r) { if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r; return r;
......
...@@ -73,7 +73,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) ...@@ -73,7 +73,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
} }
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
NULL, &vram_obj); 0, NULL, &vram_obj);
if (r) { if (r) {
DRM_ERROR("Failed to create VRAM object\n"); DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup; goto out_cleanup;
...@@ -93,7 +93,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) ...@@ -93,7 +93,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
struct radeon_fence *fence = NULL; struct radeon_fence *fence = NULL;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i); RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
if (r) { if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i); DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean; goto out_lclean;
......
...@@ -730,7 +730,7 @@ int radeon_ttm_init(struct radeon_device *rdev) ...@@ -730,7 +730,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, RADEON_GEM_DOMAIN_VRAM, 0,
NULL, &rdev->stollen_vga_memory); NULL, &rdev->stollen_vga_memory);
if (r) { if (r) {
return r; return r;
......
...@@ -117,7 +117,7 @@ int radeon_uvd_init(struct radeon_device *rdev) ...@@ -117,7 +117,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo);
if (r) { if (r) {
dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
return r; return r;
...@@ -674,7 +674,7 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, ...@@ -674,7 +674,7 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
int r, i; int r, i;
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &bo); RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (r) if (r)
return r; return r;
...@@ -720,7 +720,7 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, ...@@ -720,7 +720,7 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
int r, i; int r, i;
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &bo); RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (r) if (r)
return r; return r;
......
...@@ -126,7 +126,7 @@ int radeon_vce_init(struct radeon_device *rdev) ...@@ -126,7 +126,7 @@ int radeon_vce_init(struct radeon_device *rdev)
size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) + size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE; RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->vce.vcpu_bo); RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo);
if (r) { if (r) {
dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r); dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
return r; return r;
......
...@@ -510,7 +510,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, ...@@ -510,7 +510,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
RADEON_GPU_PAGE_SIZE, true, RADEON_GPU_PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &pt); RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt);
if (r) if (r)
return r; return r;
...@@ -858,6 +858,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, ...@@ -858,6 +858,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
bo_va->flags &= ~RADEON_VM_PAGE_VALID; bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
if (mem) { if (mem) {
addr = mem->start << PAGE_SHIFT; addr = mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) { if (mem->mem_type != TTM_PL_SYSTEM) {
...@@ -866,6 +867,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev, ...@@ -866,6 +867,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
} }
if (mem->mem_type == TTM_PL_TT) { if (mem->mem_type == TTM_PL_TT) {
bo_va->flags |= RADEON_VM_PAGE_SYSTEM; bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
} else { } else {
addr += rdev->vm_manager.vram_base_offset; addr += rdev->vm_manager.vram_base_offset;
} }
...@@ -1031,7 +1035,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -1031,7 +1035,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
} }
r = radeon_bo_create(rdev, pd_size, align, true, r = radeon_bo_create(rdev, pd_size, align, true,
RADEON_GEM_DOMAIN_VRAM, NULL, RADEON_GEM_DOMAIN_VRAM, 0, NULL,
&vm->page_directory); &vm->page_directory);
if (r) if (r)
return r; return r;
......
...@@ -79,7 +79,8 @@ void si_dma_vm_set_page(struct radeon_device *rdev, ...@@ -79,7 +79,8 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
trace_radeon_vm_set_page(pe, addr, count, incr, flags); trace_radeon_vm_set_page(pe, addr, count, incr, flags);
if (flags == R600_PTE_GART) { /* XXX: How to distinguish between GART and other system memory pages? */
if (flags & R600_PTE_SYSTEM) {
uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
while (count) { while (count) {
unsigned bytes = count * 8; unsigned bytes = count * 8;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment