Commit 3e510a8e authored by Chris Wilson's avatar Chris Wilson

drm/i915: Repack fence tiling mode and stride into a single integer

In the previous commit, we moved the obj->tiling_mode out of a bitfield
and into its own integer so that we could safely use READ_ONCE(). Let us
now repair some of that damage by sharing the tiling_mode with its
companion, the fence stride.

v2: New magic
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470388464-28458-18-git-send-email-chris@chris-wilson.co.uk
parent deeb1519
...@@ -101,7 +101,7 @@ static char get_pin_flag(struct drm_i915_gem_object *obj) ...@@ -101,7 +101,7 @@ static char get_pin_flag(struct drm_i915_gem_object *obj)
static char get_tiling_flag(struct drm_i915_gem_object *obj) static char get_tiling_flag(struct drm_i915_gem_object *obj)
{ {
switch (obj->tiling_mode) { switch (i915_gem_object_get_tiling(obj)) {
default: default:
case I915_TILING_NONE: return ' '; case I915_TILING_NONE: return ' ';
case I915_TILING_X: return 'X'; case I915_TILING_X: return 'X';
......
...@@ -2214,13 +2214,11 @@ struct drm_i915_gem_object { ...@@ -2214,13 +2214,11 @@ struct drm_i915_gem_object {
atomic_t frontbuffer_bits; atomic_t frontbuffer_bits;
/**
* Current tiling mode for the object.
*/
unsigned int tiling_mode;
/** Current tiling stride for the object, if it's tiled. */ /** Current tiling stride for the object, if it's tiled. */
uint32_t stride; unsigned int tiling_and_stride;
#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
#define STRIDE_MASK (~TILING_MASK)
unsigned int has_wc_mmap; unsigned int has_wc_mmap;
/** Count of VMA actually bound by this object */ /** Count of VMA actually bound by this object */
...@@ -2359,6 +2357,24 @@ i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj, ...@@ -2359,6 +2357,24 @@ i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT); return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
} }
static inline unsigned int
i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
{
return obj->tiling_and_stride & TILING_MASK;
}
static inline bool
i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
{
return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
}
static inline unsigned int
i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
{
return obj->tiling_and_stride & STRIDE_MASK;
}
/* /*
* Optimised SGL iterator for GEM objects * Optimised SGL iterator for GEM objects
*/ */
...@@ -3457,7 +3473,7 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec ...@@ -3457,7 +3473,7 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj->tiling_mode != I915_TILING_NONE; i915_gem_object_is_tiled(obj);
} }
/* i915_debugfs.c */ /* i915_debugfs.c */
......
...@@ -1042,7 +1042,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, ...@@ -1042,7 +1042,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
int ret; int ret;
bool hit_slow_path = false; bool hit_slow_path = false;
if (obj->tiling_mode != I915_TILING_NONE) if (i915_gem_object_is_tiled(obj))
return -EFAULT; return -EFAULT;
ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
...@@ -1671,7 +1671,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1671,7 +1671,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Use a partial view if the object is bigger than the aperture. */ /* Use a partial view if the object is bigger than the aperture. */
if (obj->base.size >= ggtt->mappable_end && if (obj->base.size >= ggtt->mappable_end &&
obj->tiling_mode == I915_TILING_NONE) { !i915_gem_object_is_tiled(obj)) {
static const unsigned int chunk_size = 256; // 1 MiB static const unsigned int chunk_size = 256; // 1 MiB
memset(&view, 0, sizeof(view)); memset(&view, 0, sizeof(view));
...@@ -2189,7 +2189,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2189,7 +2189,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (i915_gem_object_needs_bit17_swizzle(obj)) if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj); i915_gem_object_do_bit_17_swizzle(obj);
if (obj->tiling_mode != I915_TILING_NONE && if (i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
i915_gem_object_pin_pages(obj); i915_gem_object_pin_pages(obj);
...@@ -2938,10 +2938,12 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -2938,10 +2938,12 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
size = max(size, vma->size); size = max(size, vma->size);
if (flags & PIN_MAPPABLE) if (flags & PIN_MAPPABLE)
size = i915_gem_get_ggtt_size(dev_priv, size, obj->tiling_mode); size = i915_gem_get_ggtt_size(dev_priv, size,
i915_gem_object_get_tiling(obj));
min_alignment = min_alignment =
i915_gem_get_ggtt_alignment(dev_priv, size, obj->tiling_mode, i915_gem_get_ggtt_alignment(dev_priv, size,
i915_gem_object_get_tiling(obj),
flags & PIN_MAPPABLE); flags & PIN_MAPPABLE);
if (alignment == 0) if (alignment == 0)
alignment = min_alignment; alignment = min_alignment;
...@@ -3637,10 +3639,10 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) ...@@ -3637,10 +3639,10 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
fence_size = i915_gem_get_ggtt_size(dev_priv, fence_size = i915_gem_get_ggtt_size(dev_priv,
obj->base.size, obj->base.size,
obj->tiling_mode); i915_gem_object_get_tiling(obj));
fence_alignment = i915_gem_get_ggtt_alignment(dev_priv, fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
obj->base.size, obj->base.size,
obj->tiling_mode, i915_gem_object_get_tiling(obj),
true); true);
fenceable = (vma->node.size == fence_size && fenceable = (vma->node.size == fence_size &&
...@@ -3884,7 +3886,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, ...@@ -3884,7 +3886,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
} }
if (obj->pages && if (obj->pages &&
obj->tiling_mode != I915_TILING_NONE && i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->madv == I915_MADV_WILLNEED) if (obj->madv == I915_MADV_WILLNEED)
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
...@@ -4054,7 +4056,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) ...@@ -4054,7 +4056,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->pages && obj->madv == I915_MADV_WILLNEED && if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES && dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
obj->tiling_mode != I915_TILING_NONE) i915_gem_object_is_tiled(obj))
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
if (WARN_ON(obj->pages_pin_count)) if (WARN_ON(obj->pages_pin_count))
......
...@@ -803,7 +803,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, ...@@ -803,7 +803,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
need_fence = need_fence =
entry->flags & EXEC_OBJECT_NEEDS_FENCE && entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE; i915_gem_object_is_tiled(obj);
need_mappable = need_fence || need_reloc_mappable(vma); need_mappable = need_fence || need_reloc_mappable(vma);
if (entry->flags & EXEC_OBJECT_PINNED) if (entry->flags & EXEC_OBJECT_PINNED)
......
...@@ -86,20 +86,22 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, ...@@ -86,20 +86,22 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
if (obj) { if (obj) {
u32 size = i915_gem_obj_ggtt_size(obj); u32 size = i915_gem_obj_ggtt_size(obj);
unsigned int tiling = i915_gem_object_get_tiling(obj);
unsigned int stride = i915_gem_object_get_stride(obj);
uint64_t val; uint64_t val;
/* Adjust fence size to match tiled area */ /* Adjust fence size to match tiled area */
if (obj->tiling_mode != I915_TILING_NONE) { if (tiling != I915_TILING_NONE) {
uint32_t row_size = obj->stride * uint32_t row_size = stride *
(obj->tiling_mode == I915_TILING_Y ? 32 : 8); (tiling == I915_TILING_Y ? 32 : 8);
size = (size / row_size) * row_size; size = (size / row_size) * row_size;
} }
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32; 0xfffff000) << 32;
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; val |= (uint64_t)((stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y) if (tiling == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID; val |= I965_FENCE_REG_VALID;
...@@ -122,6 +124,8 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, ...@@ -122,6 +124,8 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
if (obj) { if (obj) {
u32 size = i915_gem_obj_ggtt_size(obj); u32 size = i915_gem_obj_ggtt_size(obj);
unsigned int tiling = i915_gem_object_get_tiling(obj);
unsigned int stride = i915_gem_object_get_stride(obj);
int pitch_val; int pitch_val;
int tile_width; int tile_width;
...@@ -131,17 +135,17 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, ...@@ -131,17 +135,17 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
"object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) if (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128; tile_width = 128;
else else
tile_width = 512; tile_width = 512;
/* Note: pitch better be a power of two tile widths */ /* Note: pitch better be a power of two tile widths */
pitch_val = obj->stride / tile_width; pitch_val = stride / tile_width;
pitch_val = ffs(pitch_val) - 1; pitch_val = ffs(pitch_val) - 1;
val = i915_gem_obj_ggtt_offset(obj); val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y) if (tiling == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size); val |= I915_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= pitch_val << I830_FENCE_PITCH_SHIFT;
...@@ -161,6 +165,8 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg, ...@@ -161,6 +165,8 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
if (obj) { if (obj) {
u32 size = i915_gem_obj_ggtt_size(obj); u32 size = i915_gem_obj_ggtt_size(obj);
unsigned int tiling = i915_gem_object_get_tiling(obj);
unsigned int stride = i915_gem_object_get_stride(obj);
uint32_t pitch_val; uint32_t pitch_val;
WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
...@@ -169,11 +175,11 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg, ...@@ -169,11 +175,11 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
"object 0x%08llx not 512K or pot-size 0x%08x aligned\n", "object 0x%08llx not 512K or pot-size 0x%08x aligned\n",
i915_gem_obj_ggtt_offset(obj), size); i915_gem_obj_ggtt_offset(obj), size);
pitch_val = obj->stride / 128; pitch_val = stride / 128;
pitch_val = ffs(pitch_val) - 1; pitch_val = ffs(pitch_val) - 1;
val = i915_gem_obj_ggtt_offset(obj); val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y) if (tiling == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I830_FENCE_SIZE_BITS(size); val |= I830_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= pitch_val << I830_FENCE_PITCH_SHIFT;
...@@ -201,9 +207,12 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg, ...@@ -201,9 +207,12 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
mb(); mb();
WARN(obj && (!obj->stride || !obj->tiling_mode), WARN(obj &&
(!i915_gem_object_get_stride(obj) ||
!i915_gem_object_get_tiling(obj)),
"bogus fence setup with stride: 0x%x, tiling mode: %i\n", "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
obj->stride, obj->tiling_mode); i915_gem_object_get_stride(obj),
i915_gem_object_get_tiling(obj));
if (IS_GEN2(dev)) if (IS_GEN2(dev))
i830_write_fence_reg(dev, reg, obj); i830_write_fence_reg(dev, reg, obj);
...@@ -248,7 +257,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, ...@@ -248,7 +257,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
{ {
if (obj->tiling_mode) if (i915_gem_object_is_tiled(obj))
i915_gem_release_mmap(obj); i915_gem_release_mmap(obj);
/* As we do not have an associated fence register, we will force /* As we do not have an associated fence register, we will force
...@@ -361,7 +370,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) ...@@ -361,7 +370,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
bool enable = obj->tiling_mode != I915_TILING_NONE; bool enable = i915_gem_object_is_tiled(obj);
struct drm_i915_fence_reg *reg; struct drm_i915_fence_reg *reg;
int ret; int ret;
...@@ -477,7 +486,7 @@ void i915_gem_restore_fences(struct drm_device *dev) ...@@ -477,7 +486,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
*/ */
if (reg->obj) { if (reg->obj) {
i915_gem_object_update_fence(reg->obj, reg, i915_gem_object_update_fence(reg->obj, reg,
reg->obj->tiling_mode); i915_gem_object_get_tiling(reg->obj));
} else { } else {
i915_gem_write_fence(dev, i, NULL); i915_gem_write_fence(dev, i, NULL);
} }
......
...@@ -170,6 +170,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -170,6 +170,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret = 0; int ret = 0;
/* Make sure we don't cross-contaminate obj->tiling_and_stride */
BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
obj = i915_gem_object_lookup(file, args->handle); obj = i915_gem_object_lookup(file, args->handle);
if (!obj) if (!obj)
return -ENOENT; return -ENOENT;
...@@ -217,8 +220,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -217,8 +220,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
} }
} }
if (args->tiling_mode != obj->tiling_mode || if (args->tiling_mode != i915_gem_object_get_tiling(obj) ||
args->stride != obj->stride) { args->stride != i915_gem_object_get_stride(obj)) {
/* We need to rebind the object if its current allocation /* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new * no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but * tiling mode. Otherwise we can just leave it alone, but
...@@ -241,7 +244,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -241,7 +244,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (args->tiling_mode == I915_TILING_NONE) if (args->tiling_mode == I915_TILING_NONE)
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
if (obj->tiling_mode == I915_TILING_NONE) if (!i915_gem_object_is_tiled(obj))
i915_gem_object_pin_pages(obj); i915_gem_object_pin_pages(obj);
} }
...@@ -250,16 +253,16 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -250,16 +253,16 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
&dev->struct_mutex) || &dev->struct_mutex) ||
obj->fence_reg != I915_FENCE_REG_NONE; obj->fence_reg != I915_FENCE_REG_NONE;
obj->tiling_mode = args->tiling_mode; obj->tiling_and_stride =
obj->stride = args->stride; args->stride | args->tiling_mode;
/* Force the fence to be reacquired for GTT access */ /* Force the fence to be reacquired for GTT access */
i915_gem_release_mmap(obj); i915_gem_release_mmap(obj);
} }
} }
/* we have to maintain this existing ABI... */ /* we have to maintain this existing ABI... */
args->stride = obj->stride; args->stride = i915_gem_object_get_stride(obj);
args->tiling_mode = obj->tiling_mode; args->tiling_mode = i915_gem_object_get_tiling(obj);
/* Try to preallocate memory required to save swizzling on put-pages */ /* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) { if (i915_gem_object_needs_bit17_swizzle(obj)) {
...@@ -306,7 +309,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, ...@@ -306,7 +309,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (!obj) if (!obj)
return -ENOENT; return -ENOENT;
args->tiling_mode = READ_ONCE(obj->tiling_mode); args->tiling_mode = READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
switch (args->tiling_mode) { switch (args->tiling_mode) {
case I915_TILING_X: case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
......
...@@ -781,7 +781,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, ...@@ -781,7 +781,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->pinned = 0; err->pinned = 0;
if (i915_gem_obj_is_pinned(obj)) if (i915_gem_obj_is_pinned(obj))
err->pinned = 1; err->pinned = 1;
err->tiling = obj->tiling_mode; err->tiling = i915_gem_object_get_tiling(obj);
err->dirty = obj->dirty; err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED; err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL; err->userptr = obj->userptr.mm != NULL;
......
...@@ -2466,9 +2466,8 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, ...@@ -2466,9 +2466,8 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
return false; return false;
} }
obj->tiling_mode = plane_config->tiling; if (plane_config->tiling == I915_TILING_X)
if (obj->tiling_mode == I915_TILING_X) obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
obj->stride = fb->pitches[0];
mode_cmd.pixel_format = fb->pixel_format; mode_cmd.pixel_format = fb->pixel_format;
mode_cmd.width = fb->width; mode_cmd.width = fb->width;
...@@ -2594,7 +2593,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, ...@@ -2594,7 +2593,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h; intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
obj = intel_fb_obj(fb); obj = intel_fb_obj(fb);
if (obj->tiling_mode != I915_TILING_NONE) if (i915_gem_object_is_tiled(obj))
dev_priv->preserve_bios_swizzle = true; dev_priv->preserve_bios_swizzle = true;
drm_framebuffer_reference(fb); drm_framebuffer_reference(fb);
...@@ -2672,8 +2671,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, ...@@ -2672,8 +2671,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
BUG(); BUG();
} }
if (INTEL_INFO(dev)->gen >= 4 && if (INTEL_INFO(dev)->gen >= 4 && i915_gem_object_is_tiled(obj))
obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED; dspcntr |= DISPPLANE_TILED;
if (IS_G4X(dev)) if (IS_G4X(dev))
...@@ -2782,7 +2780,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, ...@@ -2782,7 +2780,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
BUG(); BUG();
} }
if (obj->tiling_mode != I915_TILING_NONE) if (i915_gem_object_is_tiled(obj))
dspcntr |= DISPPLANE_TILED; dspcntr |= DISPPLANE_TILED;
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
...@@ -11200,7 +11198,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, ...@@ -11200,7 +11198,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]); intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset | intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
obj->tiling_mode); i915_gem_object_get_tiling(obj));
/* XXX Enabling the panel-fitter across page-flip is so far /* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now. * untested on non-native modes, so ignore it for now.
...@@ -11232,7 +11230,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, ...@@ -11232,7 +11230,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP | intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); intel_ring_emit(ring, fb->pitches[0] | i915_gem_object_get_tiling(obj));
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
/* Contrary to the suggestions in the documentation, /* Contrary to the suggestions in the documentation,
...@@ -11335,7 +11333,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, ...@@ -11335,7 +11333,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
} }
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); intel_ring_emit(ring, fb->pitches[0] | i915_gem_object_get_tiling(obj));
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(ring, (MI_NOOP)); intel_ring_emit(ring, (MI_NOOP));
...@@ -11442,7 +11440,7 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, ...@@ -11442,7 +11440,7 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
dspcntr = I915_READ(reg); dspcntr = I915_READ(reg);
if (obj->tiling_mode != I915_TILING_NONE) if (i915_gem_object_is_tiled(obj))
dspcntr |= DISPPLANE_TILED; dspcntr |= DISPPLANE_TILED;
else else
dspcntr &= ~DISPPLANE_TILED; dspcntr &= ~DISPPLANE_TILED;
...@@ -11670,7 +11668,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -11670,7 +11668,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
engine = &dev_priv->engine[BCS]; engine = &dev_priv->engine[BCS];
if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) if (i915_gem_object_get_tiling(obj) !=
i915_gem_object_get_tiling(intel_fb_obj(work->old_fb)))
/* vlv: DISPLAY_FLIP fails to change tiling */ /* vlv: DISPLAY_FLIP fails to change tiling */
engine = NULL; engine = NULL;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
...@@ -14932,15 +14931,15 @@ static int intel_framebuffer_init(struct drm_device *dev, ...@@ -14932,15 +14931,15 @@ static int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
/* Enforce that fb modifier and tiling mode match, but only for /* Enforce that fb modifier and tiling mode match, but only for
* X-tiled. This is needed for FBC. */ * X-tiled. This is needed for FBC. */
if (!!(obj->tiling_mode == I915_TILING_X) != if (!!(i915_gem_object_get_tiling(obj) == I915_TILING_X) !=
!!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) { !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
DRM_DEBUG("tiling_mode doesn't match fb modifier\n"); DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
return -EINVAL; return -EINVAL;
} }
} else { } else {
if (obj->tiling_mode == I915_TILING_X) if (i915_gem_object_get_tiling(obj) == I915_TILING_X)
mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
else if (obj->tiling_mode == I915_TILING_Y) { else if (i915_gem_object_get_tiling(obj) == I915_TILING_Y) {
DRM_DEBUG("No Y tiling for legacy addfb\n"); DRM_DEBUG("No Y tiling for legacy addfb\n");
return -EINVAL; return -EINVAL;
} }
...@@ -14984,9 +14983,10 @@ static int intel_framebuffer_init(struct drm_device *dev, ...@@ -14984,9 +14983,10 @@ static int intel_framebuffer_init(struct drm_device *dev,
} }
if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED && if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
mode_cmd->pitches[0] != obj->stride) { mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) {
DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
mode_cmd->pitches[0], obj->stride); mode_cmd->pitches[0],
i915_gem_object_get_stride(obj));
return -EINVAL; return -EINVAL;
} }
......
...@@ -741,7 +741,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, ...@@ -741,7 +741,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fb.pixel_format = fb->pixel_format; cache->fb.pixel_format = fb->pixel_format;
cache->fb.stride = fb->pitches[0]; cache->fb.stride = fb->pitches[0];
cache->fb.fence_reg = obj->fence_reg; cache->fb.fence_reg = obj->fence_reg;
cache->fb.tiling_mode = obj->tiling_mode; cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
} }
static bool intel_fbc_can_activate(struct intel_crtc *crtc) static bool intel_fbc_can_activate(struct intel_crtc *crtc)
......
...@@ -1129,7 +1129,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, ...@@ -1129,7 +1129,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev); drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (new_bo->tiling_mode) { if (i915_gem_object_is_tiled(new_bo)) {
DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n"); DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out_unlock;
......
...@@ -1585,7 +1585,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) ...@@ -1585,7 +1585,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
obj = intel_fb_obj(enabled->primary->state->fb); obj = intel_fb_obj(enabled->primary->state->fb);
/* self-refresh seems busted with untiled */ /* self-refresh seems busted with untiled */
if (obj->tiling_mode == I915_TILING_NONE) if (!i915_gem_object_is_tiled(obj))
enabled = NULL; enabled = NULL;
} }
......
...@@ -431,7 +431,7 @@ vlv_update_plane(struct drm_plane *dplane, ...@@ -431,7 +431,7 @@ vlv_update_plane(struct drm_plane *dplane,
*/ */
sprctl |= SP_GAMMA_ENABLE; sprctl |= SP_GAMMA_ENABLE;
if (obj->tiling_mode != I915_TILING_NONE) if (i915_gem_object_is_tiled(obj))
sprctl |= SP_TILED; sprctl |= SP_TILED;
/* Sizes are 0 based */ /* Sizes are 0 based */
...@@ -468,7 +468,7 @@ vlv_update_plane(struct drm_plane *dplane, ...@@ -468,7 +468,7 @@ vlv_update_plane(struct drm_plane *dplane,
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
if (obj->tiling_mode != I915_TILING_NONE) if (i915_gem_object_is_tiled(obj))
I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x); I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
else else
I915_WRITE(SPLINOFF(pipe, plane), linear_offset); I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
...@@ -553,7 +553,7 @@ ivb_update_plane(struct drm_plane *plane, ...@@ -553,7 +553,7 @@ ivb_update_plane(struct drm_plane *plane,
*/ */
sprctl |= SPRITE_GAMMA_ENABLE; sprctl |= SPRITE_GAMMA_ENABLE;
if (obj->tiling_mode != I915_TILING_NONE) if (i915_gem_object_is_tiled(obj))
sprctl |= SPRITE_TILED; sprctl |= SPRITE_TILED;
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) if (IS_HASWELL(dev) || IS_BROADWELL(dev))
...@@ -607,7 +607,7 @@ ivb_update_plane(struct drm_plane *plane, ...@@ -607,7 +607,7 @@ ivb_update_plane(struct drm_plane *plane,
* register */ * register */
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) if (IS_HASWELL(dev) || IS_BROADWELL(dev))
I915_WRITE(SPROFFSET(pipe), (y << 16) | x); I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
else if (obj->tiling_mode != I915_TILING_NONE) else if (i915_gem_object_is_tiled(obj))
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
else else
I915_WRITE(SPRLINOFF(pipe), linear_offset); I915_WRITE(SPRLINOFF(pipe), linear_offset);
...@@ -694,7 +694,7 @@ ilk_update_plane(struct drm_plane *plane, ...@@ -694,7 +694,7 @@ ilk_update_plane(struct drm_plane *plane,
*/ */
dvscntr |= DVS_GAMMA_ENABLE; dvscntr |= DVS_GAMMA_ENABLE;
if (obj->tiling_mode != I915_TILING_NONE) if (i915_gem_object_is_tiled(obj))
dvscntr |= DVS_TILED; dvscntr |= DVS_TILED;
if (IS_GEN6(dev)) if (IS_GEN6(dev))
...@@ -737,7 +737,7 @@ ilk_update_plane(struct drm_plane *plane, ...@@ -737,7 +737,7 @@ ilk_update_plane(struct drm_plane *plane,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
if (obj->tiling_mode != I915_TILING_NONE) if (i915_gem_object_is_tiled(obj))
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
else else
I915_WRITE(DVSLINOFF(pipe), linear_offset); I915_WRITE(DVSLINOFF(pipe), linear_offset);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment