Commit 82ad6443 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gtt: Rename i915_hw_ppgtt base member

In the near future, I want to subclass gen6_hw_ppgtt as it contains a
few specialised members and I wish to add more. To avoid the ugliness of
using ppgtt->base.base, rename the i915_hw_ppgtt base member
(i915_address_space) as vm, which is our common shorthand for an
i915_address_space local.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180605153758.18422-1-chris@chris-wilson.co.uk
parent cd68e04c
......@@ -61,7 +61,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
}
mutex_lock(&dev_priv->drm.struct_mutex);
ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
......
......@@ -361,9 +361,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
#define gvt_ggtt_sz(gvt) \
((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0)
......
......@@ -328,7 +328,7 @@ static int per_file_stats(int id, void *ptr, void *data)
} else {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
if (ppgtt->base.file != stats->file_priv)
if (ppgtt->vm.file != stats->file_priv)
continue;
}
......@@ -508,7 +508,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
dpy_count, dpy_size);
seq_printf(m, "%llu [%pa] gtt total\n",
ggtt->base.total, &ggtt->mappable_end);
ggtt->vm.total, &ggtt->mappable_end);
seq_printf(m, "Supported page sizes: %s\n",
stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
buf, sizeof(buf)));
......
......@@ -3213,7 +3213,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
return container_of(vm, struct i915_hw_ppgtt, base);
return container_of(vm, struct i915_hw_ppgtt, vm);
}
/* i915_gem_fence_reg.c */
......
......@@ -65,7 +65,7 @@ insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size)
{
memset(node, 0, sizeof(*node));
return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
size, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
......@@ -249,17 +249,17 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct i915_vma *vma;
u64 pinned;
pinned = ggtt->base.reserved;
pinned = ggtt->vm.reserved;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
args->aper_size = ggtt->base.total;
args->aper_size = ggtt->vm.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
......@@ -1223,7 +1223,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb();
ggtt->base.insert_page(&ggtt->base,
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
wmb();
......@@ -1246,8 +1246,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
out_unpin:
if (node.allocated) {
wmb();
ggtt->base.clear_range(&ggtt->base,
node.start, node.size);
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
......@@ -1426,7 +1425,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb(); /* flush the write before we modify the GGTT */
ggtt->base.insert_page(&ggtt->base,
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
wmb(); /* flush modifications to the GGTT (insert_page) */
......@@ -1455,8 +1454,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
out_unpin:
if (node.allocated) {
wmb();
ggtt->base.clear_range(&ggtt->base,
node.start, node.size);
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
......@@ -4374,7 +4372,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 flags)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_address_space *vm = &dev_priv->ggtt.base;
struct i915_address_space *vm = &dev_priv->ggtt.vm;
struct i915_vma *vma;
int ret;
......
......@@ -197,7 +197,7 @@ static void context_close(struct i915_gem_context *ctx)
*/
lut_close(ctx);
if (ctx->ppgtt)
i915_ppgtt_close(&ctx->ppgtt->base);
i915_ppgtt_close(&ctx->ppgtt->vm);
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_put(ctx);
......@@ -249,7 +249,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
......@@ -810,11 +810,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_CONTEXT_PARAM_GTT_SIZE:
if (ctx->ppgtt)
args->value = ctx->ppgtt->base.total;
args->value = ctx->ppgtt->vm.total;
else if (to_i915(dev)->mm.aliasing_ppgtt)
args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
else
args->value = to_i915(dev)->ggtt.base.total;
args->value = to_i915(dev)->ggtt.vm.total;
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
args->value = i915_gem_context_no_error_capture(ctx);
......
......@@ -703,7 +703,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->ctx = ctx;
eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
eb->context_flags = 0;
if (ctx->flags & CONTEXT_NO_ZEROMAP)
......@@ -943,7 +943,7 @@ static void reloc_cache_reset(struct reloc_cache *cache)
if (cache->node.allocated) {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
ggtt->base.clear_range(&ggtt->base,
ggtt->vm.clear_range(&ggtt->vm,
cache->node.start,
cache->node.size);
drm_mm_remove_node(&cache->node);
......@@ -1016,7 +1016,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
err = drm_mm_insert_node_in_range
(&ggtt->base.mm, &cache->node,
(&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
......@@ -1037,7 +1037,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start;
if (cache->node.allocated) {
wmb();
ggtt->base.insert_page(&ggtt->base,
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
} else {
......
This diff is collapsed.
......@@ -65,7 +65,7 @@ typedef u64 gen8_pde_t;
typedef u64 gen8_ppgtt_pdpe_t;
typedef u64 gen8_ppgtt_pml4e_t;
#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
......@@ -367,7 +367,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
* the spec.
*/
struct i915_ggtt {
struct i915_address_space base;
struct i915_address_space vm;
struct io_mapping iomap; /* Mapping to our CPU mappable region */
struct resource gmadr; /* GMADR resource */
......@@ -385,7 +385,7 @@ struct i915_ggtt {
};
struct i915_hw_ppgtt {
struct i915_address_space base;
struct i915_address_space vm;
struct kref ref;
struct drm_mm_node node;
unsigned long pd_dirty_rings;
......@@ -543,7 +543,7 @@ static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm)
{
GEM_BUG_ON(!i915_is_ggtt(vm));
return container_of(vm, struct i915_ggtt, base);
return container_of(vm, struct i915_ggtt, vm);
}
#define INTEL_MAX_PPAT_ENTRIES 8
......
......@@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
if (IS_ERR(so.obj))
return PTR_ERR(so.obj);
so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL);
so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(so.vma)) {
err = PTR_ERR(so.vma);
goto err_obj;
......
......@@ -480,7 +480,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,
&i915->ggtt.base.inactive_list, vm_link) {
&i915->ggtt.vm.inactive_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
if (vma->iomap && i915_vma_unbind(vma) == 0)
freed_pages += count;
......
......@@ -642,7 +642,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (ret)
goto err;
vma = i915_vma_instance(obj, &ggtt->base, NULL);
vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_pages;
......@@ -653,7 +653,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur
* later.
*/
ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
size, gtt_offset, obj->cache_level,
0);
if (ret) {
......@@ -666,7 +666,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
spin_lock(&dev_priv->mm.obj_lock);
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
......
......@@ -973,8 +973,7 @@ i915_error_object_create(struct drm_i915_private *i915,
void __iomem *s;
int ret;
ggtt->base.insert_page(&ggtt->base, dma, slot,
I915_CACHE_NONE, 0);
ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst);
......@@ -993,7 +992,7 @@ i915_error_object_create(struct drm_i915_private *i915,
out:
compress_fini(&compress, dst);
ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst;
}
......@@ -1466,7 +1465,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring;
ee->vm = ctx->ppgtt ? &ctx->ppgtt->base : &ggtt->base;
ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
record_context(&ee->context, ctx);
......@@ -1564,7 +1563,7 @@ static void capture_active_buffers(struct i915_gpu_state *error)
static void capture_pinned_buffers(struct i915_gpu_state *error)
{
struct i915_address_space *vm = &error->i915->ggtt.base;
struct i915_address_space *vm = &error->i915->ggtt.vm;
struct drm_i915_error_buffer *bo;
struct i915_vma *vma;
int count_inactive, count_active;
......
......@@ -956,7 +956,7 @@ DECLARE_EVENT_CLASS(i915_context,
__entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
__entry->hw_id = ctx->hw_id;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
......@@ -997,7 +997,7 @@ TRACE_EVENT(switch_mm,
__entry->class = engine->uabi_class;
__entry->instance = engine->instance;
__entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
__entry->vm = to->ppgtt ? &to->ppgtt->vm : NULL;
__entry->dev = engine->i915->drm.primary->index;
),
......
......@@ -105,7 +105,7 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
node->start + node->size,
node->size / 1024);
ggtt->base.reserved -= node->size;
ggtt->vm.reserved -= node->size;
drm_mm_remove_node(node);
}
......@@ -141,11 +141,11 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
ret = i915_gem_gtt_reserve(&ggtt->base, node,
ret = i915_gem_gtt_reserve(&ggtt->vm, node,
size, start, I915_COLOR_UNEVICTABLE,
0);
if (!ret)
ggtt->base.reserved += size;
ggtt->vm.reserved += size;
return ret;
}
......@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
int intel_vgt_balloon(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long ggtt_end = ggtt->base.total;
unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
......
......@@ -85,7 +85,7 @@ vma_create(struct drm_i915_gem_object *obj,
int i;
/* The aliasing_ppgtt should never be used directly! */
GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
if (vma == NULL)
......
......@@ -515,7 +515,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
return PTR_ERR(obj);
}
vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
......@@ -585,7 +585,7 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret)
goto err;
vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
......
......@@ -570,7 +570,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(obj))
return ERR_CAST(obj);
vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
if (IS_ERR(vma))
goto err;
......
......@@ -536,7 +536,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
*/
static void flush_ggtt_writes(struct i915_vma *vma)
{
struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev);
struct drm_i915_private *dev_priv = vma->vm->i915;
if (i915_vma_is_map_and_fenceable(vma))
POSTING_READ_FW(GUC_STATUS);
......
......@@ -431,7 +431,7 @@ static u64 execlists_update_context(struct i915_request *rq)
* PML4 is allocated during ppgtt init, so this is not needed
* in 48-bit mode.
*/
if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
execlists_update_context_pdps(ppgtt, reg_state);
return ce->lrc_desc;
......@@ -1672,7 +1672,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
if (IS_ERR(obj))
return PTR_ERR(obj);
vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
......@@ -2070,7 +2070,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
* not needed in 48-bit.*/
if (rq->gem_context->ppgtt &&
(intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
!i915_vm_is_48bit(&rq->gem_context->ppgtt->base) &&
!i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
!intel_vgpu_active(rq->i915)) {
ret = intel_logical_ring_emit_pdps(rq);
if (ret)
......@@ -2668,7 +2668,7 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
......@@ -2774,7 +2774,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
goto error_deref_obj;
}
vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
......
......@@ -1121,7 +1121,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
if (IS_ERR(vma))
goto err;
......@@ -1300,7 +1300,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
}
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
......
......@@ -338,7 +338,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
static int igt_check_page_sizes(struct i915_vma *vma)
{
struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
struct drm_i915_private *i915 = vma->vm->i915;
unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
int err = 0;
......@@ -379,7 +379,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
static int igt_mock_exhaust_device_supported_pages(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->base.i915;
struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
......@@ -415,7 +415,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
goto out_put;
}
vma = i915_vma_instance(obj, &ppgtt->base, NULL);
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
......@@ -458,7 +458,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->base.i915;
struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned long supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
int bit;
......@@ -500,7 +500,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
/* Force the page size for this object */
obj->mm.page_sizes.sg = page_size;
vma = i915_vma_instance(obj, &ppgtt->base, NULL);
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_unpin;
......@@ -591,7 +591,7 @@ static void close_object_list(struct list_head *objects,
list_for_each_entry_safe(obj, on, objects, st_link) {
struct i915_vma *vma;
vma = i915_vma_instance(obj, &ppgtt->base, NULL);
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (!IS_ERR(vma))
i915_vma_close(vma);
......@@ -604,8 +604,8 @@ static void close_object_list(struct list_head *objects,
static int igt_mock_ppgtt_huge_fill(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->base.i915;
unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT;
struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
unsigned long page_num;
bool single = false;
LIST_HEAD(objects);
......@@ -641,7 +641,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
list_add(&obj->st_link, &objects);
vma = i915_vma_instance(obj, &ppgtt->base, NULL);
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
break;
......@@ -725,7 +725,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
static int igt_mock_ppgtt_64K(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->base.i915;
struct drm_i915_private *i915 = ppgtt->vm.i915;
struct drm_i915_gem_object *obj;
const struct object_info {
unsigned int size;
......@@ -819,7 +819,7 @@ static int igt_mock_ppgtt_64K(void *arg)
*/
obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
vma = i915_vma_instance(obj, &ppgtt->base, NULL);
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_object_unpin;
......@@ -887,8 +887,8 @@ static int igt_mock_ppgtt_64K(void *arg)
static struct i915_vma *
gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
{
struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
const int gen = INTEL_GEN(vma->vm->i915);
struct drm_i915_private *i915 = vma->vm->i915;
const int gen = INTEL_GEN(i915);
unsigned int count = vma->size >> PAGE_SHIFT;
struct drm_i915_gem_object *obj;
struct i915_vma *batch;
......@@ -1047,7 +1047,8 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
u32 dword, u32 val)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
......@@ -1100,7 +1101,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine;
I915_RND_STATE(prng);
......@@ -1439,7 +1441,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
vma = i915_vma_instance(obj, &ppgtt->base, NULL);
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
......@@ -1493,7 +1495,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
vma = i915_vma_instance(obj, &ppgtt->base, NULL);
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
......@@ -1531,7 +1533,8 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
......@@ -1587,7 +1590,8 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
......@@ -1696,14 +1700,14 @@ int i915_gem_huge_page_mock_selftests(void)
goto out_unlock;
}
if (!i915_vm_is_48bit(&ppgtt->base)) {
if (!i915_vm_is_48bit(&ppgtt->vm)) {
pr_err("failed to create 48b PPGTT\n");
err = -EINVAL;
goto out_close;
}
/* If we were ever hit this then it's time to mock the 64K scratch */
if (!i915_vm_has_scratch_64K(&ppgtt->base)) {
if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
pr_err("PPGTT missing 64K scratch page\n");
err = -EINVAL;
goto out_close;
......@@ -1712,7 +1716,7 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt);
out_close:
i915_ppgtt_close(&ppgtt->base);
i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
......@@ -1758,7 +1762,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
}
if (ctx->ppgtt)
ctx->ppgtt->base.scrub_64K = true;
ctx->ppgtt->vm.scrub_64K = true;
err = i915_subtests(tests, ctx);
......
......@@ -115,7 +115,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct i915_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
......@@ -290,7 +290,7 @@ create_test_object(struct i915_gem_context *ctx,
{
struct drm_i915_gem_object *obj;
struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
u64 size;
int err;
......@@ -557,7 +557,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
struct i915_vma *vma;
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
continue;
......
......@@ -35,7 +35,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
u64 size;
for (size = 0;
size + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
size += I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
......@@ -57,7 +57,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
return -EINVAL;
}
if (list_empty(&i915->ggtt.base.inactive_list)) {
if (list_empty(&i915->ggtt.vm.inactive_list)) {
pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL;
}
......@@ -69,7 +69,7 @@ static void unpin_ggtt(struct drm_i915_private *i915)
{
struct i915_vma *vma;
list_for_each_entry(vma, &i915->ggtt.base.inactive_list, vm_link)
list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link)
i915_vma_unpin(vma);
}
......@@ -103,7 +103,7 @@ static int igt_evict_something(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
err = i915_gem_evict_something(&ggtt->base,
err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
......@@ -116,7 +116,7 @@ static int igt_evict_something(void *arg)
unpin_ggtt(i915);
/* Everything is unpinned, we should be able to evict something */
err = i915_gem_evict_something(&ggtt->base,
err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
......@@ -181,7 +181,7 @@ static int igt_evict_for_vma(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
err);
......@@ -191,7 +191,7 @@ static int igt_evict_for_vma(void *arg)
unpin_ggtt(i915);
/* Everything is unpinned, we should be able to evict the node */
err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err) {
pr_err("i915_gem_evict_for_node returned err=%d\n",
err);
......@@ -229,7 +229,7 @@ static int igt_evict_for_cache_color(void *arg)
* i915_gtt_color_adjust throughout our driver, so using a mock color
* adjust will work just fine for our purposes.
*/
ggtt->base.mm.color_adjust = mock_color_adjust;
ggtt->vm.mm.color_adjust = mock_color_adjust;
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
......@@ -265,7 +265,7 @@ static int igt_evict_for_cache_color(void *arg)
i915_vma_unpin(vma);
/* Remove just the second vma */
err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err) {
pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
goto cleanup;
......@@ -276,7 +276,7 @@ static int igt_evict_for_cache_color(void *arg)
*/
target.color = I915_CACHE_L3_LLC;
err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (!err) {
pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
err = -EINVAL;
......@@ -288,7 +288,7 @@ static int igt_evict_for_cache_color(void *arg)
cleanup:
unpin_ggtt(i915);
cleanup_objects(i915);
ggtt->base.mm.color_adjust = NULL;
ggtt->vm.mm.color_adjust = NULL;
return err;
}
......@@ -305,7 +305,7 @@ static int igt_evict_vm(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
err = i915_gem_evict_vm(&ggtt->base);
err = i915_gem_evict_vm(&ggtt->vm);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
......@@ -314,7 +314,7 @@ static int igt_evict_vm(void *arg)
unpin_ggtt(i915);
err = i915_gem_evict_vm(&ggtt->base);
err = i915_gem_evict_vm(&ggtt->vm);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
......@@ -359,9 +359,9 @@ static int igt_evict_contexts(void *arg)
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
err = i915_gem_gtt_insert(&i915->ggtt.base, &hole,
err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, i915->ggtt.base.total,
0, i915->ggtt.vm.total,
PIN_NOEVICT);
if (err)
goto out_locked;
......@@ -377,9 +377,9 @@ static int igt_evict_contexts(void *arg)
goto out_locked;
}
if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node,
if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
1ul << 20, 0, I915_COLOR_UNEVICTABLE,
0, i915->ggtt.base.total,
0, i915->ggtt.vm.total,
PIN_NOEVICT)) {
kfree(r);
break;
......
......@@ -113,7 +113,7 @@ static int igt_gem_huge(void *arg)
obj = huge_gem_object(i915,
nreal * PAGE_SIZE,
i915->ggtt.base.total + PAGE_SIZE);
i915->ggtt.vm.total + PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
......@@ -311,7 +311,7 @@ static int igt_partial_tiling(void *arg)
obj = huge_gem_object(i915,
nreal << PAGE_SHIFT,
(1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
(1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
if (IS_ERR(obj))
return PTR_ERR(obj);
......@@ -440,7 +440,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
struct i915_vma *vma;
int err;
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
......
......@@ -430,7 +430,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (err)
goto err;
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
......@@ -555,7 +555,8 @@ static int live_empty_request(void *arg)
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx = i915->kernel_context;
struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
struct i915_vma *vma;
......
......@@ -35,7 +35,7 @@ static bool assert_vma(struct i915_vma *vma,
{
bool ok = true;
if (vma->vm != &ctx->ppgtt->base) {
if (vma->vm != &ctx->ppgtt->vm) {
pr_err("VMA created with wrong VM\n");
ok = false;
}
......@@ -110,8 +110,7 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) {
struct i915_address_space *vm =
&ctx->ppgtt->base;
struct i915_address_space *vm = &ctx->ppgtt->vm;
struct i915_vma *vma;
int err;
......@@ -259,12 +258,12 @@ static int igt_vma_pin1(void *arg)
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total),
VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total),
INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
VALID(4096, PIN_GLOBAL),
......@@ -272,12 +271,12 @@ static int igt_vma_pin1(void *arg)
VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL),
VALID(i915->ggtt.base.total, PIN_GLOBAL),
NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL),
VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL),
VALID(i915->ggtt.vm.total, PIN_GLOBAL),
NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL),
NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
......@@ -289,9 +288,9 @@ static int igt_vma_pin1(void *arg)
* variable start, end and size.
*/
NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total),
NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total),
NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
#endif
{ },
#undef NOSPACE
......@@ -307,13 +306,13 @@ static int igt_vma_pin1(void *arg)
* focusing on error handling of boundary conditions.
*/
GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm));
GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm));
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
vma = checked_vma_instance(obj, &i915->ggtt.base, NULL);
vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
goto out;
......@@ -405,7 +404,7 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
static int igt_vma_rotate(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_address_space *vm = &i915->ggtt.base;
struct i915_address_space *vm = &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const struct intel_rotation_plane_info planes[] = {
{ .width = 1, .height = 1, .stride = 1 },
......@@ -604,7 +603,7 @@ static bool assert_pin(struct i915_vma *vma,
static int igt_vma_partial(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_address_space *vm = &i915->ggtt.base;
struct i915_address_space *vm = &i915->ggtt.vm;
const unsigned int npages = 1021; /* prime! */
struct drm_i915_gem_object *obj;
const struct phase {
......
......@@ -107,8 +107,8 @@ static int emit_recurse_batch(struct hang *h,
struct drm_i915_private *i915 = h->i915;
struct i915_address_space *vm =
rq->gem_context->ppgtt ?
&rq->gem_context->ppgtt->base :
&i915->ggtt.base;
&rq->gem_context->ppgtt->vm :
&i915->ggtt.vm;
struct i915_vma *hws, *vma;
unsigned int flags;
u32 *batch;
......
......@@ -83,7 +83,7 @@ static int emit_recurse_batch(struct spinner *spin,
struct i915_request *rq,
u32 arbitration_command)
{
struct i915_address_space *vm = &rq->gem_context->ppgtt->base;
struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
......
......@@ -33,7 +33,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
memset(cs, 0xc5, PAGE_SIZE);
i915_gem_object_unpin_map(result);
vma = i915_vma_instance(result, &engine->i915->ggtt.base, NULL);
vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
......
......@@ -66,25 +66,25 @@ mock_ppgtt(struct drm_i915_private *i915,
return NULL;
kref_init(&ppgtt->ref);
ppgtt->base.i915 = i915;
ppgtt->base.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->base.file = ERR_PTR(-ENODEV);
INIT_LIST_HEAD(&ppgtt->base.active_list);
INIT_LIST_HEAD(&ppgtt->base.inactive_list);
INIT_LIST_HEAD(&ppgtt->base.unbound_list);
INIT_LIST_HEAD(&ppgtt->base.global_link);
drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total);
ppgtt->base.clear_range = nop_clear_range;
ppgtt->base.insert_page = mock_insert_page;
ppgtt->base.insert_entries = mock_insert_entries;
ppgtt->base.bind_vma = mock_bind_ppgtt;
ppgtt->base.unbind_vma = mock_unbind_ppgtt;
ppgtt->base.set_pages = ppgtt_set_pages;
ppgtt->base.clear_pages = clear_pages;
ppgtt->base.cleanup = mock_cleanup;
ppgtt->vm.i915 = i915;
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.file = ERR_PTR(-ENODEV);
INIT_LIST_HEAD(&ppgtt->vm.active_list);
INIT_LIST_HEAD(&ppgtt->vm.inactive_list);
INIT_LIST_HEAD(&ppgtt->vm.unbound_list);
INIT_LIST_HEAD(&ppgtt->vm.global_link);
drm_mm_init(&ppgtt->vm.mm, 0, ppgtt->vm.total);
ppgtt->vm.clear_range = nop_clear_range;
ppgtt->vm.insert_page = mock_insert_page;
ppgtt->vm.insert_entries = mock_insert_entries;
ppgtt->vm.bind_vma = mock_bind_ppgtt;
ppgtt->vm.unbind_vma = mock_unbind_ppgtt;
ppgtt->vm.set_pages = ppgtt_set_pages;
ppgtt->vm.clear_pages = clear_pages;
ppgtt->vm.cleanup = mock_cleanup;
return ppgtt;
}
......@@ -107,27 +107,27 @@ void mock_init_ggtt(struct drm_i915_private *i915)
INIT_LIST_HEAD(&i915->vm_list);
ggtt->base.i915 = i915;
ggtt->vm.i915 = i915;
ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
ggtt->base.total = 4096 * PAGE_SIZE;
ggtt->base.clear_range = nop_clear_range;
ggtt->base.insert_page = mock_insert_page;
ggtt->base.insert_entries = mock_insert_entries;
ggtt->base.bind_vma = mock_bind_ggtt;
ggtt->base.unbind_vma = mock_unbind_ggtt;
ggtt->base.set_pages = ggtt_set_pages;
ggtt->base.clear_pages = clear_pages;
ggtt->base.cleanup = mock_cleanup;
i915_address_space_init(&ggtt->base, i915, "global");
ggtt->vm.total = 4096 * PAGE_SIZE;
ggtt->vm.clear_range = nop_clear_range;
ggtt->vm.insert_page = mock_insert_page;
ggtt->vm.insert_entries = mock_insert_entries;
ggtt->vm.bind_vma = mock_bind_ggtt;
ggtt->vm.unbind_vma = mock_unbind_ggtt;
ggtt->vm.set_pages = ggtt_set_pages;
ggtt->vm.clear_pages = clear_pages;
ggtt->vm.cleanup = mock_cleanup;
i915_address_space_init(&ggtt->vm, i915, "global");
}
void mock_fini_ggtt(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
i915_address_space_fini(&ggtt->base);
i915_address_space_fini(&ggtt->vm);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment