Commit e11bfb99 authored by Christian König's avatar Christian König

drm/ttm: cleanup BO size handling v3

Based on an idea from Dave, but cleaned up a bit.

We had multiple fields for essentially the same thing.

Now bo->base.size is the original size of the BO in
arbitrary units, usually bytes.

bo->mem.num_pages is the size in number of pages in the
resource domain of bo->mem.mem_type.

v2: use the GEM object size instead of the BO size
v3: fix printks in some places
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com> (v1)
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/406831/
parent dc379303
...@@ -269,7 +269,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, ...@@ -269,7 +269,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
case TTM_PL_TT: case TTM_PL_TT:
sgt = drm_prime_pages_to_sg(obj->dev, sgt = drm_prime_pages_to_sg(obj->dev,
bo->tbo.ttm->pages, bo->tbo.ttm->pages,
bo->tbo.num_pages); bo->tbo.ttm->num_pages);
if (IS_ERR(sgt)) if (IS_ERR(sgt))
return sgt; return sgt;
......
...@@ -121,7 +121,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) ...@@ -121,7 +121,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached) if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
return AMDGPU_BO_INVALID_OFFSET; return AMDGPU_BO_INVALID_OFFSET;
if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
......
...@@ -787,7 +787,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) ...@@ -787,7 +787,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
if (r < 0) if (r < 0)
return r; return r;
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
if (r) if (r)
return r; return r;
......
...@@ -174,12 +174,12 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) ...@@ -174,12 +174,12 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
{ {
return bo->tbo.num_pages << PAGE_SHIFT; return bo->tbo.base.size;
} }
static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
{ {
return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
} }
static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
......
...@@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create, ...@@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create,
TP_fast_assign( TP_fast_assign(
__entry->bo = bo; __entry->bo = bo;
__entry->pages = bo->tbo.num_pages; __entry->pages = bo->tbo.mem.num_pages;
__entry->type = bo->tbo.mem.mem_type; __entry->type = bo->tbo.mem.mem_type;
__entry->prefer = bo->preferred_domains; __entry->prefer = bo->preferred_domains;
__entry->allow = bo->allowed_domains; __entry->allow = bo->allowed_domains;
......
...@@ -636,7 +636,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -636,7 +636,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
out: out:
/* update statistics */ /* update statistics */
atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); atomic64_add(bo->base.size, &adev->num_bytes_moved);
amdgpu_bo_move_notify(bo, evict, new_mem); amdgpu_bo_move_notify(bo, evict, new_mem);
return 0; return 0;
} }
...@@ -2131,7 +2131,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, ...@@ -2131,7 +2131,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return r; return r;
} }
num_pages = bo->tbo.num_pages; num_pages = bo->tbo.mem.num_pages;
mm_node = bo->tbo.mem.mm_node; mm_node = bo->tbo.mem.mm_node;
num_loops = 0; num_loops = 0;
while (num_pages) { while (num_pages) {
...@@ -2161,7 +2161,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, ...@@ -2161,7 +2161,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
} }
} }
num_pages = bo->tbo.num_pages; num_pages = bo->tbo.mem.num_pages;
mm_node = bo->tbo.mem.mm_node; mm_node = bo->tbo.mem.mm_node;
while (num_pages) { while (num_pages) {
......
...@@ -554,7 +554,7 @@ static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev) ...@@ -554,7 +554,7 @@ static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev)
return r; return r;
} }
memset(eop, 0, adev->mes.eop_gpu_obj->tbo.mem.size); memset(eop, 0, adev->mes.eop_gpu_obj->tbo.base.size);
amdgpu_bo_kunmap(adev->mes.eop_gpu_obj); amdgpu_bo_kunmap(adev->mes.eop_gpu_obj);
amdgpu_bo_unreserve(adev->mes.eop_gpu_obj); amdgpu_bo_unreserve(adev->mes.eop_gpu_obj);
......
...@@ -473,10 +473,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) ...@@ -473,10 +473,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
switch (bo->mem.mem_type) { switch (bo->mem.mem_type) {
case TTM_PL_VRAM: case TTM_PL_VRAM:
drm->gem.vram_available -= bo->mem.size; drm->gem.vram_available -= bo->base.size;
break; break;
case TTM_PL_TT: case TTM_PL_TT:
drm->gem.gart_available -= bo->mem.size; drm->gem.gart_available -= bo->base.size;
break; break;
default: default:
break; break;
...@@ -504,10 +504,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) ...@@ -504,10 +504,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
if (!nvbo->bo.pin_count) { if (!nvbo->bo.pin_count) {
switch (bo->mem.mem_type) { switch (bo->mem.mem_type) {
case TTM_PL_VRAM: case TTM_PL_VRAM:
drm->gem.vram_available += bo->mem.size; drm->gem.vram_available += bo->base.size;
break; break;
case TTM_PL_TT: case TTM_PL_TT:
drm->gem.gart_available += bo->mem.size; drm->gem.gart_available += bo->base.size;
break; break;
default: default:
break; break;
...@@ -913,7 +913,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg, ...@@ -913,7 +913,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
return 0; return 0;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size, *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size,
nvbo->mode, nvbo->zeta); nvbo->mode, nvbo->zeta);
} }
......
...@@ -286,11 +286,11 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo, ...@@ -286,11 +286,11 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
bl_size = bw * bh * (1 << tile_mode) * gob_size; bl_size = bw * bh * (1 << tile_mode) * gob_size;
DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n", DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n",
offset, stride, h, tile_mode, bw, bh, gob_size, bl_size, offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
nvbo->bo.mem.size); nvbo->bo.base.size);
if (bl_size + offset > nvbo->bo.mem.size) if (bl_size + offset > nvbo->bo.base.size)
return -ERANGE; return -ERANGE;
return 0; return 0;
...@@ -363,7 +363,7 @@ nouveau_framebuffer_new(struct drm_device *dev, ...@@ -363,7 +363,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
} else { } else {
uint32_t size = mode_cmd->pitches[i] * height; uint32_t size = mode_cmd->pitches[i] * height;
if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size) if (size + mode_cmd->offsets[i] > nvbo->bo.base.size)
return -ERANGE; return -ERANGE;
} }
} }
......
...@@ -30,9 +30,9 @@ ...@@ -30,9 +30,9 @@
struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj) struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
{ {
struct nouveau_bo *nvbo = nouveau_gem_object(obj); struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int npages = nvbo->bo.num_pages;
return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages); return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages,
nvbo->bo.ttm->num_pages);
} }
struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
......
...@@ -80,7 +80,7 @@ nv17_fence_context_new(struct nouveau_channel *chan) ...@@ -80,7 +80,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_chan *fctx; struct nv10_fence_chan *fctx;
struct ttm_resource *reg = &priv->bo->bo.mem; struct ttm_resource *reg = &priv->bo->bo.mem;
u32 start = reg->start * PAGE_SIZE; u32 start = reg->start * PAGE_SIZE;
u32 limit = start + reg->size - 1; u32 limit = start + priv->bo->bo.base.size - 1;
int ret = 0; int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
......
...@@ -39,7 +39,7 @@ nv50_fence_context_new(struct nouveau_channel *chan) ...@@ -39,7 +39,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_chan *fctx; struct nv10_fence_chan *fctx;
struct ttm_resource *reg = &priv->bo->bo.mem; struct ttm_resource *reg = &priv->bo->bo.mem;
u32 start = reg->start * PAGE_SIZE; u32 start = reg->start * PAGE_SIZE;
u32 limit = start + reg->size - 1; u32 limit = start + priv->bo->bo.base.size - 1;
int ret; int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
......
...@@ -50,7 +50,7 @@ static inline void qxl_bo_unreserve(struct qxl_bo *bo) ...@@ -50,7 +50,7 @@ static inline void qxl_bo_unreserve(struct qxl_bo *bo)
static inline unsigned long qxl_bo_size(struct qxl_bo *bo) static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
{ {
return bo->tbo.num_pages << PAGE_SHIFT; return bo->tbo.base.size;
} }
static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
......
...@@ -401,7 +401,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a, ...@@ -401,7 +401,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head); struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
/* Sort A before B if A is smaller. */ /* Sort A before B if A is smaller. */
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; return (int)la->robj->tbo.mem.num_pages -
(int)lb->robj->tbo.mem.num_pages;
} }
/** /**
......
...@@ -54,20 +54,19 @@ static void radeon_update_memory_usage(struct radeon_bo *bo, ...@@ -54,20 +54,19 @@ static void radeon_update_memory_usage(struct radeon_bo *bo,
unsigned mem_type, int sign) unsigned mem_type, int sign)
{ {
struct radeon_device *rdev = bo->rdev; struct radeon_device *rdev = bo->rdev;
u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
switch (mem_type) { switch (mem_type) {
case TTM_PL_TT: case TTM_PL_TT:
if (sign > 0) if (sign > 0)
atomic64_add(size, &rdev->gtt_usage); atomic64_add(bo->tbo.base.size, &rdev->gtt_usage);
else else
atomic64_sub(size, &rdev->gtt_usage); atomic64_sub(bo->tbo.base.size, &rdev->gtt_usage);
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
if (sign > 0) if (sign > 0)
atomic64_add(size, &rdev->vram_usage); atomic64_add(bo->tbo.base.size, &rdev->vram_usage);
else else
atomic64_sub(size, &rdev->vram_usage); atomic64_sub(bo->tbo.base.size, &rdev->vram_usage);
break; break;
} }
} }
...@@ -256,7 +255,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) ...@@ -256,7 +255,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
} }
return 0; return 0;
} }
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
if (r) { if (r) {
return r; return r;
} }
...@@ -610,7 +609,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) ...@@ -610,7 +609,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
out: out:
radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
bo->tbo.mem.start << PAGE_SHIFT, bo->tbo.mem.start << PAGE_SHIFT,
bo->tbo.num_pages << PAGE_SHIFT); bo->tbo.base.size);
return 0; return 0;
} }
......
...@@ -109,12 +109,12 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo) ...@@ -109,12 +109,12 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
static inline unsigned long radeon_bo_size(struct radeon_bo *bo) static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
{ {
return bo->tbo.num_pages << PAGE_SHIFT; return bo->tbo.base.size;
} }
static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo) static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
{ {
return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; return bo->tbo.base.size / RADEON_GPU_PAGE_SIZE;
} }
static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo) static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
......
...@@ -34,9 +34,9 @@ ...@@ -34,9 +34,9 @@
struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj) struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
{ {
struct radeon_bo *bo = gem_to_radeon_bo(obj); struct radeon_bo *bo = gem_to_radeon_bo(obj);
int npages = bo->tbo.num_pages;
return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages); return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages,
bo->tbo.ttm->num_pages);
} }
struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
......
...@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create, ...@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
TP_fast_assign( TP_fast_assign(
__entry->bo = bo; __entry->bo = bo;
__entry->pages = bo->tbo.num_pages; __entry->pages = bo->tbo.mem.num_pages;
), ),
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
); );
......
...@@ -274,7 +274,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -274,7 +274,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
out: out:
/* update statistics */ /* update statistics */
atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); atomic64_add(bo->base.size, &rdev->num_bytes_moved);
radeon_bo_move_notify(bo, evict, new_mem); radeon_bo_move_notify(bo, evict, new_mem);
return 0; return 0;
} }
......
...@@ -72,9 +72,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, ...@@ -72,9 +72,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man; struct ttm_resource_manager *man;
int i, mem_type; int i, mem_type;
drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n", drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
bo, bo->mem.num_pages, bo->mem.size >> 10, bo, bo->mem.num_pages, bo->base.size >> 10,
bo->mem.size >> 20); bo->base.size >> 20);
for (i = 0; i < placement->num_placement; i++) { for (i = 0; i < placement->num_placement; i++) {
mem_type = placement->placement[i].mem_type; mem_type = placement->placement[i].mem_type;
drm_printf(&p, " placement[%d]=0x%08X (%d)\n", drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
...@@ -268,7 +268,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -268,7 +268,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
goto out_err; goto out_err;
} }
ctx->bytes_moved += bo->num_pages << PAGE_SHIFT; ctx->bytes_moved += bo->base.size;
return 0; return 0;
out_err: out_err:
...@@ -985,8 +985,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ...@@ -985,8 +985,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
memset(&hop, 0, sizeof(hop)); memset(&hop, 0, sizeof(hop));
mem.num_pages = bo->num_pages; mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment; mem.page_alignment = bo->mem.page_alignment;
mem.bus.offset = 0; mem.bus.offset = 0;
mem.bus.addr = NULL; mem.bus.addr = NULL;
...@@ -1102,7 +1101,7 @@ EXPORT_SYMBOL(ttm_bo_validate); ...@@ -1102,7 +1101,7 @@ EXPORT_SYMBOL(ttm_bo_validate);
int ttm_bo_init_reserved(struct ttm_bo_device *bdev, int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
unsigned long size, size_t size,
enum ttm_bo_type type, enum ttm_bo_type type,
struct ttm_placement *placement, struct ttm_placement *placement,
uint32_t page_alignment, uint32_t page_alignment,
...@@ -1113,9 +1112,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -1113,9 +1112,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
void (*destroy) (struct ttm_buffer_object *)) void (*destroy) (struct ttm_buffer_object *))
{ {
struct ttm_mem_global *mem_glob = &ttm_mem_glob; struct ttm_mem_global *mem_glob = &ttm_mem_glob;
int ret = 0;
unsigned long num_pages;
bool locked; bool locked;
int ret = 0;
ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx); ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
if (ret) { if (ret) {
...@@ -1127,16 +1125,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -1127,16 +1125,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return -ENOMEM; return -ENOMEM;
} }
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) {
pr_err("Illegal buffer object size\n");
if (destroy)
(*destroy)(bo);
else
kfree(bo);
ttm_mem_global_free(mem_glob, acc_size);
return -EINVAL;
}
bo->destroy = destroy ? destroy : ttm_bo_default_destroy; bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
kref_init(&bo->kref); kref_init(&bo->kref);
...@@ -1145,10 +1133,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -1145,10 +1133,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->swap); INIT_LIST_HEAD(&bo->swap);
bo->bdev = bdev; bo->bdev = bdev;
bo->type = type; bo->type = type;
bo->num_pages = num_pages;
bo->mem.size = num_pages << PAGE_SHIFT;
bo->mem.mem_type = TTM_PL_SYSTEM; bo->mem.mem_type = TTM_PL_SYSTEM;
bo->mem.num_pages = bo->num_pages; bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
bo->mem.mm_node = NULL; bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment; bo->mem.page_alignment = page_alignment;
bo->mem.bus.offset = 0; bo->mem.bus.offset = 0;
...@@ -1166,9 +1152,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -1166,9 +1152,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
} }
if (!ttm_bo_uses_embedded_gem_object(bo)) { if (!ttm_bo_uses_embedded_gem_object(bo)) {
/* /*
* bo.gem is not initialized, so we have to setup the * bo.base is not initialized, so we have to setup the
* struct elements we want use regardless. * struct elements we want use regardless.
*/ */
bo->base.size = size;
dma_resv_init(&bo->base._resv); dma_resv_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node); drm_vma_node_reset(&bo->base.vma_node);
} }
...@@ -1210,7 +1197,7 @@ EXPORT_SYMBOL(ttm_bo_init_reserved); ...@@ -1210,7 +1197,7 @@ EXPORT_SYMBOL(ttm_bo_init_reserved);
int ttm_bo_init(struct ttm_bo_device *bdev, int ttm_bo_init(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
unsigned long size, size_t size,
enum ttm_bo_type type, enum ttm_bo_type type,
struct ttm_placement *placement, struct ttm_placement *placement,
uint32_t page_alignment, uint32_t page_alignment,
......
...@@ -431,9 +431,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, ...@@ -431,9 +431,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
map->virtual = NULL; map->virtual = NULL;
map->bo = bo; map->bo = bo;
if (num_pages > bo->num_pages) if (num_pages > bo->mem.num_pages)
return -EINVAL; return -EINVAL;
if (start_page > bo->num_pages) if ((start_page + num_pages) > bo->mem.num_pages)
return -EINVAL; return -EINVAL;
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
...@@ -485,14 +485,14 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) ...@@ -485,14 +485,14 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
if (mem->bus.is_iomem) { if (mem->bus.is_iomem) {
void __iomem *vaddr_iomem; void __iomem *vaddr_iomem;
size_t size = bo->num_pages << PAGE_SHIFT;
if (mem->bus.addr) if (mem->bus.addr)
vaddr_iomem = (void __iomem *)mem->bus.addr; vaddr_iomem = (void __iomem *)mem->bus.addr;
else if (mem->bus.caching == ttm_write_combined) else if (mem->bus.caching == ttm_write_combined)
vaddr_iomem = ioremap_wc(mem->bus.offset, size); vaddr_iomem = ioremap_wc(mem->bus.offset,
bo->base.size);
else else
vaddr_iomem = ioremap(mem->bus.offset, size); vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
if (!vaddr_iomem) if (!vaddr_iomem)
return -ENOMEM; return -ENOMEM;
...@@ -517,7 +517,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) ...@@ -517,7 +517,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
* or to make the buffer object look contiguous. * or to make the buffer object look contiguous.
*/ */
prot = ttm_io_prot(bo, mem, PAGE_KERNEL); prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
vaddr = vmap(ttm->pages, bo->num_pages, 0, prot); vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
if (!vaddr) if (!vaddr)
return -ENOMEM; return -ENOMEM;
......
...@@ -198,7 +198,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, ...@@ -198,7 +198,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
/* Fault should not cross bo boundary. */ /* Fault should not cross bo boundary. */
page_offset &= ~(fault_page_size - 1); page_offset &= ~(fault_page_size - 1);
if (page_offset + fault_page_size > bo->num_pages) if (page_offset + fault_page_size > bo->mem.num_pages)
goto out_fallback; goto out_fallback;
if (bo->mem.bus.is_iomem) if (bo->mem.bus.is_iomem)
...@@ -306,7 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, ...@@ -306,7 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
page_last = vma_pages(vma) + vma->vm_pgoff - page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->base.vma_node); drm_vma_node_start(&bo->base.vma_node);
if (unlikely(page_offset >= bo->num_pages)) if (unlikely(page_offset >= bo->mem.num_pages))
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
prot = ttm_io_prot(bo, &bo->mem, prot); prot = ttm_io_prot(bo, &bo->mem, prot);
...@@ -469,7 +469,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, ...@@ -469,7 +469,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
<< PAGE_SHIFT); << PAGE_SHIFT);
int ret; int ret;
if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages) if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages)
return -EIO; return -EIO;
ret = ttm_bo_reserve(bo, true, false, NULL); ret = ttm_bo_reserve(bo, true, false, NULL);
......
...@@ -129,7 +129,7 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm, ...@@ -129,7 +129,7 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
uint32_t page_flags, uint32_t page_flags,
enum ttm_caching caching) enum ttm_caching caching)
{ {
ttm->num_pages = bo->num_pages; ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
ttm->caching = ttm_cached; ttm->caching = ttm_cached;
ttm->page_flags = page_flags; ttm->page_flags = page_flags;
ttm->dma_address = NULL; ttm->dma_address = NULL;
......
...@@ -482,8 +482,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, ...@@ -482,8 +482,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
d.src_addr = NULL; d.src_addr = NULL;
d.dst_pages = dst->ttm->pages; d.dst_pages = dst->ttm->pages;
d.src_pages = src->ttm->pages; d.src_pages = src->ttm->pages;
d.dst_num_pages = dst->num_pages; d.dst_num_pages = dst->mem.num_pages;
d.src_num_pages = src->num_pages; d.src_num_pages = src->mem.num_pages;
d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL); d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL); d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
d.diff = diff; d.diff = diff;
......
...@@ -223,7 +223,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, ...@@ -223,7 +223,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
uint32_t new_flags; uint32_t new_flags;
place = vmw_vram_placement.placement[0]; place = vmw_vram_placement.placement[0];
place.lpfn = bo->num_pages; place.lpfn = bo->mem.num_pages;
placement.num_placement = 1; placement.num_placement = 1;
placement.placement = &place; placement.placement = &place;
placement.num_busy_placement = 1; placement.num_busy_placement = 1;
...@@ -244,7 +244,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, ...@@ -244,7 +244,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
* that situation. * that situation.
*/ */
if (bo->mem.mem_type == TTM_PL_VRAM && if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages && bo->mem.start < bo->mem.num_pages &&
bo->mem.start > 0 && bo->mem.start > 0 &&
buf->base.pin_count == 0) { buf->base.pin_count == 0) {
ctx.interruptible = false; ctx.interruptible = false;
...@@ -391,7 +391,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo) ...@@ -391,7 +391,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
if (virtual) if (virtual)
return virtual; return virtual;
ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map); ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
if (ret) if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret); DRM_ERROR("Buffer object map failed: %d.\n", ret);
......
...@@ -430,7 +430,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -430,7 +430,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* Do a page by page copy of COTables. This eliminates slow vmap()s. * Do a page by page copy of COTables. This eliminates slow vmap()s.
* This should really be a TTM utility. * This should really be a TTM utility.
*/ */
for (i = 0; i < old_bo->num_pages; ++i) { for (i = 0; i < old_bo->mem.num_pages; ++i) {
bool dummy; bool dummy;
ret = ttm_bo_kmap(old_bo, i, 1, &old_map); ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
......
...@@ -1042,7 +1042,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, ...@@ -1042,7 +1042,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) { if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
if (unlikely(new_query_bo->base.num_pages > 4)) { if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n"); VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -1541,7 +1541,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, ...@@ -1541,7 +1541,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
return ret; return ret;
/* Make sure DMA doesn't cross BO boundaries. */ /* Make sure DMA doesn't cross BO boundaries. */
bo_size = vmw_bo->base.num_pages * PAGE_SIZE; bo_size = vmw_bo->base.base.size;
if (unlikely(cmd->body.guest.ptr.offset > bo_size)) { if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
VMW_DEBUG_USER("Invalid DMA offset.\n"); VMW_DEBUG_USER("Invalid DMA offset.\n");
return -EINVAL; return -EINVAL;
......
...@@ -64,20 +64,19 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, ...@@ -64,20 +64,19 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
spin_lock(&gman->lock); spin_lock(&gman->lock);
if (gman->max_gmr_pages > 0) { if (gman->max_gmr_pages > 0) {
gman->used_gmr_pages += bo->num_pages; gman->used_gmr_pages += mem->num_pages;
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
goto nospace; goto nospace;
} }
mem->mm_node = gman; mem->mm_node = gman;
mem->start = id; mem->start = id;
mem->num_pages = bo->num_pages;
spin_unlock(&gman->lock); spin_unlock(&gman->lock);
return 0; return 0;
nospace: nospace:
gman->used_gmr_pages -= bo->num_pages; gman->used_gmr_pages -= mem->num_pages;
spin_unlock(&gman->lock); spin_unlock(&gman->lock);
ida_free(&gman->gmr_ida, id); ida_free(&gman->gmr_ida, id);
return -ENOSPC; return -ENOSPC;
......
...@@ -1220,7 +1220,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, ...@@ -1220,7 +1220,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
int ret; int ret;
requested_size = mode_cmd->height * mode_cmd->pitches[0]; requested_size = mode_cmd->height * mode_cmd->pitches[0];
if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) { if (unlikely(requested_size > bo->base.base.size)) {
DRM_ERROR("Screen buffer object size is too small " DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n"); "for requested mode.\n");
return -EINVAL; return -EINVAL;
......
...@@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, ...@@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
{ {
struct vmw_bo_dirty *dirty = vbo->dirty; struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t num_pages = vbo->base.num_pages; pgoff_t num_pages = vbo->base.mem.num_pages;
size_t size, acc_size; size_t size, acc_size;
int ret; int ret;
static struct ttm_operation_ctx ctx = { static struct ttm_operation_ctx ctx = {
...@@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) ...@@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
return ret; return ret;
page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
if (unlikely(page_offset >= bo->num_pages)) { if (unlikely(page_offset >= bo->mem.num_pages)) {
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
goto out_unlock; goto out_unlock;
} }
...@@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) ...@@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
page_offset = vmf->pgoff - page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node); drm_vma_node_start(&bo->base.vma_node);
if (page_offset >= bo->num_pages || if (page_offset >= bo->mem.num_pages ||
vmw_resources_clean(vbo, page_offset, vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE, page_offset + PAGE_SIZE,
&allowed_prefault)) { &allowed_prefault)) {
...@@ -531,7 +531,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, ...@@ -531,7 +531,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
page_offset = vmf->pgoff - page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node); drm_vma_node_start(&bo->base.vma_node);
if (page_offset >= bo->num_pages || if (page_offset >= bo->mem.num_pages ||
vmw_resources_clean(vbo, page_offset, vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE, page_offset + PAGE_SIZE,
&allowed_prefault)) { &allowed_prefault)) {
......
...@@ -360,7 +360,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, ...@@ -360,7 +360,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
int ret; int ret;
if (likely(res->backup)) { if (likely(res->backup)) {
BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size); BUG_ON(res->backup->base.base.size < size);
return 0; return 0;
} }
......
...@@ -856,8 +856,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, ...@@ -856,8 +856,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
return ret; return ret;
} }
if ((u64)buffer->base.num_pages * PAGE_SIZE < if ((u64)buffer->base.base.size < (u64)size + (u64)offset) {
(u64)size + (u64)offset) {
VMW_DEBUG_USER("Illegal buffer- or shader size.\n"); VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
ret = -EINVAL; ret = -EINVAL;
goto out_bad_arg; goto out_bad_arg;
......
...@@ -512,7 +512,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty) ...@@ -512,7 +512,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
cmd->body.host.mipmap = 0; cmd->body.host.mipmap = 0;
cmd->body.transfer = ddirty->transfer; cmd->body.transfer = ddirty->transfer;
suffix->suffixSize = sizeof(*suffix); suffix->suffixSize = sizeof(*suffix);
suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE; suffix->maximumOffset = ddirty->buf->base.base.size;
if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) { if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
blit_size += sizeof(struct vmw_stdu_update); blit_size += sizeof(struct vmw_stdu_update);
...@@ -1238,7 +1238,7 @@ static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane *update, ...@@ -1238,7 +1238,7 @@ static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane *update,
vfbbo = container_of(update->vfb, typeof(*vfbbo), base); vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
suffix->suffixSize = sizeof(*suffix); suffix->suffixSize = sizeof(*suffix);
suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE; suffix->maximumOffset = vfbbo->buffer->base.base.size;
vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2, vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
bb->y1, bb->y2); bb->y1, bb->y2);
......
...@@ -1550,8 +1550,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1550,8 +1550,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
&res->backup, &res->backup,
&user_srf->backup_base); &user_srf->backup_base);
if (ret == 0) { if (ret == 0) {
if (res->backup->base.num_pages * PAGE_SIZE < if (res->backup->base.base.size < res->backup_size) {
res->backup_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n"); VMW_DEBUG_USER("Surface backup buffer too small.\n");
vmw_bo_unreference(&res->backup); vmw_bo_unreference(&res->backup);
ret = -EINVAL; ret = -EINVAL;
...@@ -1614,7 +1613,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1614,7 +1613,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (res->backup) { if (res->backup) {
rep->buffer_map_handle = rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->backup->base.base.vma_node); drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle; rep->buffer_handle = backup_handle;
} else { } else {
rep->buffer_map_handle = 0; rep->buffer_map_handle = 0;
...@@ -1692,7 +1691,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, ...@@ -1692,7 +1691,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->crep.buffer_handle = backup_handle; rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle = rep->crep.buffer_map_handle =
drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node); drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; rep->crep.buffer_size = srf->res.backup->base.base.size;
rep->creq.version = drm_vmw_gb_surface_v1; rep->creq.version = drm_vmw_gb_surface_v1;
rep->creq.svga3d_flags_upper_32_bits = rep->creq.svga3d_flags_upper_32_bits =
......
...@@ -125,7 +125,6 @@ struct ttm_buffer_object { ...@@ -125,7 +125,6 @@ struct ttm_buffer_object {
struct ttm_bo_device *bdev; struct ttm_bo_device *bdev;
enum ttm_bo_type type; enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *); void (*destroy) (struct ttm_buffer_object *);
unsigned long num_pages;
size_t acc_size; size_t acc_size;
/** /**
...@@ -397,13 +396,11 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, ...@@ -397,13 +396,11 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
int ttm_bo_init_reserved(struct ttm_bo_device *bdev, int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
unsigned long size, size_t size, enum ttm_bo_type type,
enum ttm_bo_type type,
struct ttm_placement *placement, struct ttm_placement *placement,
uint32_t page_alignment, uint32_t page_alignment,
struct ttm_operation_ctx *ctx, struct ttm_operation_ctx *ctx,
size_t acc_size, size_t acc_size, struct sg_table *sg,
struct sg_table *sg,
struct dma_resv *resv, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *)); void (*destroy) (struct ttm_buffer_object *));
...@@ -445,7 +442,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -445,7 +442,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/ */
int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
unsigned long size, enum ttm_bo_type type, size_t size, enum ttm_bo_type type,
struct ttm_placement *placement, struct ttm_placement *placement,
uint32_t page_alignment, bool interrubtible, size_t acc_size, uint32_t page_alignment, bool interrubtible, size_t acc_size,
struct sg_table *sg, struct dma_resv *resv, struct sg_table *sg, struct dma_resv *resv,
......
...@@ -171,7 +171,6 @@ struct ttm_bus_placement { ...@@ -171,7 +171,6 @@ struct ttm_bus_placement {
struct ttm_resource { struct ttm_resource {
void *mm_node; void *mm_node;
unsigned long start; unsigned long start;
unsigned long size;
unsigned long num_pages; unsigned long num_pages;
uint32_t page_alignment; uint32_t page_alignment;
uint32_t mem_type; uint32_t mem_type;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment