Commit 1ec9e26d authored by Daniel Vetter's avatar Daniel Vetter

drm/i915: Consolidate binding parameters into flags

Anything more than just one bool parameter is just a pain to read,
symbolic constants are much better.

Split out from Chris' vma-binding rework patch.

v2: Undo the behaviour change in object_pin that Chris spotted.

v3: Split out misplaced hunk to handle set_cache_level errors,
spotted by Jani.

v4: Keep the current over-zealous binding logic in the execbuffer code
working with a quick hack while the overall binding code gets shuffled
around.

v5: Reorder the PIN_ flags for more natural patch splitup.

v6: Pull out the PIN_GLOBAL split-up again.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ben Widawsky <benjamin.widawsky@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 931c1c26
...@@ -2076,11 +2076,12 @@ void i915_init_vm(struct drm_i915_private *dev_priv, ...@@ -2076,11 +2076,12 @@ void i915_init_vm(struct drm_i915_private *dev_priv,
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
void i915_gem_vma_destroy(struct i915_vma *vma); void i915_gem_vma_destroy(struct i915_vma *vma);
#define PIN_MAPPABLE 0x1
#define PIN_NONBLOCK 0x2
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm, struct i915_address_space *vm,
uint32_t alignment, uint32_t alignment,
bool map_and_fenceable, unsigned flags);
bool nonblocking);
void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_vma_unbind(struct i915_vma *vma); int __must_check i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
...@@ -2283,11 +2284,9 @@ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) ...@@ -2283,11 +2284,9 @@ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
static inline int __must_check static inline int __must_check
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment, uint32_t alignment,
bool map_and_fenceable, unsigned flags)
bool nonblocking)
{ {
return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags);
map_and_fenceable, nonblocking);
} }
/* i915_gem_context.c */ /* i915_gem_context.c */
...@@ -2331,8 +2330,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, ...@@ -2331,8 +2330,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
int min_size, int min_size,
unsigned alignment, unsigned alignment,
unsigned cache_level, unsigned cache_level,
bool mappable, unsigned flags);
bool nonblock);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev); int i915_gem_evict_everything(struct drm_device *dev);
......
...@@ -43,12 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o ...@@ -43,12 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
static __must_check int static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly); bool readonly);
static __must_check int
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
bool map_and_fenceable,
bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev, static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args, struct drm_i915_gem_pwrite *args,
...@@ -605,7 +599,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, ...@@ -605,7 +599,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
char __user *user_data; char __user *user_data;
int page_offset, page_length, ret; int page_offset, page_length, ret;
ret = i915_gem_obj_ggtt_pin(obj, 0, true, true); ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
if (ret) if (ret)
goto out; goto out;
...@@ -1411,7 +1405,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1411,7 +1405,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
/* Now bind it into the GTT if needed */ /* Now bind it into the GTT if needed */
ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
if (ret) if (ret)
goto unlock; goto unlock;
...@@ -2721,7 +2715,6 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -2721,7 +2715,6 @@ int i915_vma_unbind(struct i915_vma *vma)
if (!drm_mm_node_allocated(&vma->node)) { if (!drm_mm_node_allocated(&vma->node)) {
i915_gem_vma_destroy(vma); i915_gem_vma_destroy(vma);
return 0; return 0;
} }
...@@ -3219,14 +3212,13 @@ static int ...@@ -3219,14 +3212,13 @@ static int
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm, struct i915_address_space *vm,
unsigned alignment, unsigned alignment,
bool map_and_fenceable, unsigned flags)
bool nonblocking)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
u32 size, fence_size, fence_alignment, unfenced_alignment; u32 size, fence_size, fence_alignment, unfenced_alignment;
size_t gtt_max = size_t gtt_max =
map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total; flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
...@@ -3238,18 +3230,18 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3238,18 +3230,18 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
obj->tiling_mode, true); obj->tiling_mode, true);
unfenced_alignment = unfenced_alignment =
i915_gem_get_gtt_alignment(dev, i915_gem_get_gtt_alignment(dev,
obj->base.size, obj->base.size,
obj->tiling_mode, false); obj->tiling_mode, false);
if (alignment == 0) if (alignment == 0)
alignment = map_and_fenceable ? fence_alignment : alignment = flags & PIN_MAPPABLE ? fence_alignment :
unfenced_alignment; unfenced_alignment;
if (map_and_fenceable && alignment & (fence_alignment - 1)) { if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
DRM_DEBUG("Invalid object alignment requested %u\n", alignment); DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
return -EINVAL; return -EINVAL;
} }
size = map_and_fenceable ? fence_size : obj->base.size; size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
/* If the object is bigger than the entire aperture, reject it early /* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space. * before evicting everything in a vain attempt to find space.
...@@ -3257,7 +3249,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3257,7 +3249,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
if (obj->base.size > gtt_max) { if (obj->base.size > gtt_max) {
DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
obj->base.size, obj->base.size,
map_and_fenceable ? "mappable" : "total", flags & PIN_MAPPABLE ? "mappable" : "total",
gtt_max); gtt_max);
return -E2BIG; return -E2BIG;
} }
...@@ -3281,9 +3273,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3281,9 +3273,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
DRM_MM_SEARCH_DEFAULT); DRM_MM_SEARCH_DEFAULT);
if (ret) { if (ret) {
ret = i915_gem_evict_something(dev, vm, size, alignment, ret = i915_gem_evict_something(dev, vm, size, alignment,
obj->cache_level, obj->cache_level, flags);
map_and_fenceable,
nonblocking);
if (ret == 0) if (ret == 0)
goto search_free; goto search_free;
...@@ -3314,9 +3304,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3314,9 +3304,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
obj->map_and_fenceable = mappable && fenceable; obj->map_and_fenceable = mappable && fenceable;
} }
WARN_ON(map_and_fenceable && !obj->map_and_fenceable); WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
trace_i915_vma_bind(vma, map_and_fenceable); trace_i915_vma_bind(vma, flags);
i915_gem_verify_gtt(dev); i915_gem_verify_gtt(dev);
return 0; return 0;
...@@ -3687,7 +3677,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -3687,7 +3677,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* (e.g. libkms for the bootup splash), we have to ensure that we * (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers. * always use map_and_fenceable for all scanout buffers.
*/ */
ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false); ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
if (ret) if (ret)
goto err_unpin_display; goto err_unpin_display;
...@@ -3843,30 +3833,28 @@ int ...@@ -3843,30 +3833,28 @@ int
i915_gem_object_pin(struct drm_i915_gem_object *obj, i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm, struct i915_address_space *vm,
uint32_t alignment, uint32_t alignment,
bool map_and_fenceable, unsigned flags)
bool nonblocking)
{ {
const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0;
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
WARN_ON(map_and_fenceable && !i915_is_ggtt(vm)); if (WARN_ON(flags & PIN_MAPPABLE && !i915_is_ggtt(vm)))
return -EINVAL;
vma = i915_gem_obj_to_vma(obj, vm); vma = i915_gem_obj_to_vma(obj, vm);
if (vma) { if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY; return -EBUSY;
if ((alignment && if ((alignment &&
vma->node.start & (alignment - 1)) || vma->node.start & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) { (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
WARN(vma->pin_count, WARN(vma->pin_count,
"bo is already pinned with incorrect alignment:" "bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n", " obj->map_and_fenceable=%d\n",
i915_gem_obj_offset(obj, vm), alignment, i915_gem_obj_offset(obj, vm), alignment,
map_and_fenceable, flags & PIN_MAPPABLE,
obj->map_and_fenceable); obj->map_and_fenceable);
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
if (ret) if (ret)
...@@ -3875,9 +3863,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -3875,9 +3863,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
} }
if (!i915_gem_obj_bound(obj, vm)) { if (!i915_gem_obj_bound(obj, vm)) {
ret = i915_gem_object_bind_to_vm(obj, vm, alignment, ret = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
map_and_fenceable,
nonblocking);
if (ret) if (ret)
return ret; return ret;
...@@ -3885,10 +3871,12 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -3885,10 +3871,12 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
vma = i915_gem_obj_to_vma(obj, vm); vma = i915_gem_obj_to_vma(obj, vm);
vma->bind_vma(vma, obj->cache_level, flags); vma->bind_vma(vma, obj->cache_level,
flags & PIN_MAPPABLE ? GLOBAL_BIND : 0);
i915_gem_obj_to_vma(obj, vm)->pin_count++; i915_gem_obj_to_vma(obj, vm)->pin_count++;
obj->pin_mappable |= map_and_fenceable; if (flags & PIN_MAPPABLE)
obj->pin_mappable |= true;
return 0; return 0;
} }
...@@ -3946,7 +3934,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, ...@@ -3946,7 +3934,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
} }
if (obj->user_pin_count == 0) { if (obj->user_pin_count == 0) {
ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false); ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
if (ret) if (ret)
goto out; goto out;
} }
......
...@@ -258,8 +258,7 @@ i915_gem_create_context(struct drm_device *dev, ...@@ -258,8 +258,7 @@ i915_gem_create_context(struct drm_device *dev,
* context. * context.
*/ */
ret = i915_gem_obj_ggtt_pin(ctx->obj, ret = i915_gem_obj_ggtt_pin(ctx->obj,
get_context_alignment(dev), get_context_alignment(dev), 0);
false, false);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy; goto err_destroy;
...@@ -335,8 +334,7 @@ void i915_gem_context_reset(struct drm_device *dev) ...@@ -335,8 +334,7 @@ void i915_gem_context_reset(struct drm_device *dev)
if (i == RCS) { if (i == RCS) {
WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
get_context_alignment(dev), get_context_alignment(dev), 0));
false, false));
/* Fake a finish/inactive */ /* Fake a finish/inactive */
dctx->obj->base.write_domain = 0; dctx->obj->base.write_domain = 0;
dctx->obj->active = 0; dctx->obj->active = 0;
...@@ -612,8 +610,7 @@ static int do_switch(struct intel_ring_buffer *ring, ...@@ -612,8 +610,7 @@ static int do_switch(struct intel_ring_buffer *ring,
/* Trying to pin first makes error handling easier. */ /* Trying to pin first makes error handling easier. */
if (ring == &dev_priv->ring[RCS]) { if (ring == &dev_priv->ring[RCS]) {
ret = i915_gem_obj_ggtt_pin(to->obj, ret = i915_gem_obj_ggtt_pin(to->obj,
get_context_alignment(ring->dev), get_context_alignment(ring->dev), 0);
false, false);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -68,7 +68,7 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) ...@@ -68,7 +68,7 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
int int
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
int min_size, unsigned alignment, unsigned cache_level, int min_size, unsigned alignment, unsigned cache_level,
bool mappable, bool nonblocking) unsigned flags)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list; struct list_head eviction_list, unwind_list;
...@@ -76,7 +76,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, ...@@ -76,7 +76,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
int ret = 0; int ret = 0;
int pass = 0; int pass = 0;
trace_i915_gem_evict(dev, min_size, alignment, mappable); trace_i915_gem_evict(dev, min_size, alignment, flags);
/* /*
* The goal is to evict objects and amalgamate space in LRU order. * The goal is to evict objects and amalgamate space in LRU order.
...@@ -102,7 +102,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, ...@@ -102,7 +102,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
*/ */
INIT_LIST_HEAD(&unwind_list); INIT_LIST_HEAD(&unwind_list);
if (mappable) { if (flags & PIN_MAPPABLE) {
BUG_ON(!i915_is_ggtt(vm)); BUG_ON(!i915_is_ggtt(vm));
drm_mm_init_scan_with_range(&vm->mm, min_size, drm_mm_init_scan_with_range(&vm->mm, min_size,
alignment, cache_level, 0, alignment, cache_level, 0,
...@@ -117,7 +117,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, ...@@ -117,7 +117,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
goto found; goto found;
} }
if (nonblocking) if (flags & PIN_NONBLOCK)
goto none; goto none;
/* Now merge in the soon-to-be-expired objects... */ /* Now merge in the soon-to-be-expired objects... */
...@@ -141,7 +141,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, ...@@ -141,7 +141,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
/* Can we unpin some objects such as idle hw contents, /* Can we unpin some objects such as idle hw contents,
* or pending flips? * or pending flips?
*/ */
if (nonblocking) if (flags & PIN_NONBLOCK)
return -ENOSPC; return -ENOSPC;
/* Only idle the GPU and repeat the search once */ /* Only idle the GPU and repeat the search once */
......
...@@ -544,19 +544,23 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, ...@@ -544,19 +544,23 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable; bool need_fence;
u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) && unsigned flags;
!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
int ret; int ret;
flags = 0;
need_fence = need_fence =
has_fenced_gpu_access && has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE && entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE; obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(vma); if (need_fence || need_reloc_mappable(vma))
flags |= PIN_MAPPABLE;
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable, if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
false); flags |= PIN_MAPPABLE;
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
if (ret) if (ret)
return ret; return ret;
...@@ -585,6 +589,9 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, ...@@ -585,6 +589,9 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
} }
/* Temporary hack while we rework the binding logic. */
flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
vma->bind_vma(vma, obj->cache_level, flags); vma->bind_vma(vma, obj->cache_level, flags);
return 0; return 0;
......
...@@ -885,7 +885,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ...@@ -885,7 +885,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
if (ret == -ENOSPC && !retried) { if (ret == -ENOSPC && !retried) {
ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
GEN6_PD_SIZE, GEN6_PD_ALIGN, GEN6_PD_SIZE, GEN6_PD_ALIGN,
I915_CACHE_NONE, false, true); I915_CACHE_NONE, PIN_NONBLOCK);
if (ret) if (ret)
return ret; return ret;
......
...@@ -34,15 +34,15 @@ TRACE_EVENT(i915_gem_object_create, ...@@ -34,15 +34,15 @@ TRACE_EVENT(i915_gem_object_create,
); );
TRACE_EVENT(i915_vma_bind, TRACE_EVENT(i915_vma_bind,
TP_PROTO(struct i915_vma *vma, bool mappable), TP_PROTO(struct i915_vma *vma, unsigned flags),
TP_ARGS(vma, mappable), TP_ARGS(vma, flags),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj) __field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm) __field(struct i915_address_space *, vm)
__field(u32, offset) __field(u32, offset)
__field(u32, size) __field(u32, size)
__field(bool, mappable) __field(unsigned, flags)
), ),
TP_fast_assign( TP_fast_assign(
...@@ -50,12 +50,12 @@ TRACE_EVENT(i915_vma_bind, ...@@ -50,12 +50,12 @@ TRACE_EVENT(i915_vma_bind,
__entry->vm = vma->vm; __entry->vm = vma->vm;
__entry->offset = vma->node.start; __entry->offset = vma->node.start;
__entry->size = vma->node.size; __entry->size = vma->node.size;
__entry->mappable = mappable; __entry->flags = flags;
), ),
TP_printk("obj=%p, offset=%08x size=%x%s vm=%p", TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
__entry->obj, __entry->offset, __entry->size, __entry->obj, __entry->offset, __entry->size,
__entry->mappable ? ", mappable" : "", __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
__entry->vm) __entry->vm)
); );
...@@ -196,26 +196,26 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, ...@@ -196,26 +196,26 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
); );
TRACE_EVENT(i915_gem_evict, TRACE_EVENT(i915_gem_evict,
TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable), TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
TP_ARGS(dev, size, align, mappable), TP_ARGS(dev, size, align, flags),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, size) __field(u32, size)
__field(u32, align) __field(u32, align)
__field(bool, mappable) __field(unsigned, flags)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = dev->primary->index; __entry->dev = dev->primary->index;
__entry->size = size; __entry->size = size;
__entry->align = align; __entry->align = align;
__entry->mappable = mappable; __entry->flags = flags;
), ),
TP_printk("dev=%d, size=%d, align=%d %s", TP_printk("dev=%d, size=%d, align=%d %s",
__entry->dev, __entry->size, __entry->align, __entry->dev, __entry->size, __entry->align,
__entry->mappable ? ", mappable" : "") __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
); );
TRACE_EVENT(i915_gem_evict_everything, TRACE_EVENT(i915_gem_evict_everything,
......
...@@ -1349,7 +1349,7 @@ void intel_setup_overlay(struct drm_device *dev) ...@@ -1349,7 +1349,7 @@ void intel_setup_overlay(struct drm_device *dev)
} }
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else { } else {
ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false); ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
if (ret) { if (ret) {
DRM_ERROR("failed to pin overlay register bo\n"); DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo; goto out_free_bo;
......
...@@ -2741,7 +2741,7 @@ intel_alloc_context_page(struct drm_device *dev) ...@@ -2741,7 +2741,7 @@ intel_alloc_context_page(struct drm_device *dev)
return NULL; return NULL;
} }
ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false); ret = i915_gem_obj_ggtt_pin(ctx, 4096, PIN_MAPPABLE);
if (ret) { if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret); DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref; goto err_unref;
......
...@@ -533,7 +533,7 @@ init_pipe_control(struct intel_ring_buffer *ring) ...@@ -533,7 +533,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false); ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
if (ret) if (ret)
goto err_unref; goto err_unref;
...@@ -1273,10 +1273,9 @@ static int init_status_page(struct intel_ring_buffer *ring) ...@@ -1273,10 +1273,9 @@ static int init_status_page(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_MAPPABLE);
if (ret != 0) { if (ret)
goto err_unref; goto err_unref;
}
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
...@@ -1356,7 +1355,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ...@@ -1356,7 +1355,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->obj = obj; ring->obj = obj;
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false); ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
if (ret) if (ret)
goto err_unref; goto err_unref;
...@@ -1919,7 +1918,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ...@@ -1919,7 +1918,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return -ENOMEM; return -ENOMEM;
} }
ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
if (ret != 0) { if (ret != 0) {
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
DRM_ERROR("Failed to ping batch bo\n"); DRM_ERROR("Failed to ping batch bo\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment