Commit f51455d4 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Replace 4096 with PAGE_SIZE or I915_GTT_PAGE_SIZE

Start converting over from the byte count to its semantic macro, either
we want to allocate the size of a physical page in main memory or we
want the size of a virtual page in the GTT. 4096 could mean either, but
PAGE_SIZE and I915_GTT_PAGE_SIZE are explicit and should help improve
code comprehension and future changes. In the future, we may want to use
variable GTT page sizes and so have the challenge of knowing which
hardcoded values were used to represent a physical page vs the virtual
page.

v2: Look for a few more 4096s to convert, discover IS_ALIGNED().
v3: 4096ul paranoia, make fence alignment a distinct value of 4096, keep
bdw stolen w/a as 4096 until we know better.
v4: Add asserts that i915_vma_insert() start/end are aligned to GTT page
sizes.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170110144734.26052-1-chris@chris-wilson.co.ukReviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent 2a20d6f8
...@@ -3496,7 +3496,7 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) ...@@ -3496,7 +3496,7 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
return; return;
if (--vma->obj->pin_display == 0) if (--vma->obj->pin_display == 0)
vma->display_alignment = 4096; vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
/* Bump the LRU to try and avoid premature eviction whilst flipping */ /* Bump the LRU to try and avoid premature eviction whilst flipping */
if (!i915_vma_is_active(vma)) if (!i915_vma_is_active(vma))
......
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
* part. It should be safe to decrease this, but it's more future proof as is. * part. It should be safe to decrease this, but it's more future proof as is.
*/ */
#define GEN6_CONTEXT_ALIGN (64<<10) #define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096 #define GEN7_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
static size_t get_context_alignment(struct drm_i915_private *dev_priv) static size_t get_context_alignment(struct drm_i915_private *dev_priv)
{ {
...@@ -341,7 +341,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, ...@@ -341,7 +341,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
if (HAS_GUC(dev_priv) && i915.enable_guc_loading) if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
ctx->ggtt_offset_bias = GUC_WOPCM_TOP; ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
else else
ctx->ggtt_offset_bias = 4096; ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
return ctx; return ctx;
...@@ -456,7 +456,8 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv) ...@@ -456,7 +456,8 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
dev_priv->hw_context_size = 0; dev_priv->hw_context_size = 0;
} else if (HAS_HW_CONTEXTS(dev_priv)) { } else if (HAS_HW_CONTEXTS(dev_priv)) {
dev_priv->hw_context_size = dev_priv->hw_context_size =
round_up(get_context_size(dev_priv), 4096); round_up(get_context_size(dev_priv),
I915_GTT_PAGE_SIZE);
if (dev_priv->hw_context_size > (1<<20)) { if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
dev_priv->hw_context_size); dev_priv->hw_context_size);
......
...@@ -264,9 +264,9 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags) ...@@ -264,9 +264,9 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
if (check_color) { if (check_color) {
/* Expand search to cover neighbouring guard pages (or lack!) */ /* Expand search to cover neighbouring guard pages (or lack!) */
if (start > target->vm->start) if (start > target->vm->start)
start -= 4096; start -= I915_GTT_PAGE_SIZE;
if (end < target->vm->start + target->vm->total) if (end < target->vm->start + target->vm->total)
end += 4096; end += I915_GTT_PAGE_SIZE;
} }
drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) { drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) {
......
...@@ -438,7 +438,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -438,7 +438,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
memset(&cache->node, 0, sizeof(cache->node)); memset(&cache->node, 0, sizeof(cache->node));
ret = drm_mm_insert_node_in_range_generic ret = drm_mm_insert_node_in_range_generic
(&ggtt->base.mm, &cache->node, (&ggtt->base.mm, &cache->node,
4096, 0, I915_COLOR_UNEVICTABLE, PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end, 0, ggtt->mappable_end,
DRM_MM_SEARCH_DEFAULT, DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT); DRM_MM_CREATE_DEFAULT);
...@@ -851,8 +851,7 @@ eb_vma_misplaced(struct i915_vma *vma) ...@@ -851,8 +851,7 @@ eb_vma_misplaced(struct i915_vma *vma)
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_vma_is_ggtt(vma)); !i915_vma_is_ggtt(vma));
if (entry->alignment && if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
vma->node.start & (entry->alignment - 1))
return true; return true;
if (vma->node.size < entry->pad_to_size) if (vma->node.size < entry->pad_to_size)
......
...@@ -80,11 +80,11 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, ...@@ -80,11 +80,11 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
unsigned int stride = i915_gem_object_get_stride(vma->obj); unsigned int stride = i915_gem_object_get_stride(vma->obj);
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(vma->node.start & 4095); GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
GEM_BUG_ON(vma->fence_size & 4095); GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
GEM_BUG_ON(stride & 127); GEM_BUG_ON(!IS_ALIGNED(stride, 128));
val = (vma->node.start + vma->fence_size - 4096) << 32; val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
val |= vma->node.start; val |= vma->node.start;
val |= (u64)((stride / 128) - 1) << fence_pitch_shift; val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y) if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
...@@ -127,7 +127,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence, ...@@ -127,7 +127,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK); GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
GEM_BUG_ON(!is_power_of_2(vma->fence_size)); GEM_BUG_ON(!is_power_of_2(vma->fence_size));
GEM_BUG_ON(vma->node.start & (vma->fence_size - 1)); GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915)) if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
stride /= 128; stride /= 128;
...@@ -166,7 +166,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence, ...@@ -166,7 +166,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK); GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
GEM_BUG_ON(!is_power_of_2(vma->fence_size)); GEM_BUG_ON(!is_power_of_2(vma->fence_size));
GEM_BUG_ON(!is_power_of_2(stride / 128)); GEM_BUG_ON(!is_power_of_2(stride / 128));
GEM_BUG_ON(vma->node.start & (vma->fence_size - 1)); GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
val = vma->node.start; val = vma->node.start;
if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y) if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
struct drm_i915_private; struct drm_i915_private;
struct i915_vma; struct i915_vma;
#define I965_FENCE_PAGE 4096UL
struct drm_i915_fence_reg { struct drm_i915_fence_reg {
struct list_head link; struct list_head link;
struct drm_i915_private *i915; struct drm_i915_private *i915;
......
...@@ -329,7 +329,7 @@ static int __setup_page_dma(struct drm_i915_private *dev_priv, ...@@ -329,7 +329,7 @@ static int __setup_page_dma(struct drm_i915_private *dev_priv,
return -ENOMEM; return -ENOMEM;
p->daddr = dma_map_page(kdev, p->daddr = dma_map_page(kdev,
p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); p->page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(kdev, p->daddr)) { if (dma_mapping_error(kdev, p->daddr)) {
__free_page(p->page); __free_page(p->page);
...@@ -353,7 +353,7 @@ static void cleanup_page_dma(struct drm_i915_private *dev_priv, ...@@ -353,7 +353,7 @@ static void cleanup_page_dma(struct drm_i915_private *dev_priv,
if (WARN_ON(!p->page)) if (WARN_ON(!p->page))
return; return;
dma_unmap_page(&pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL); dma_unmap_page(&pdev->dev, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(p->page); __free_page(p->page);
memset(p, 0, sizeof(*p)); memset(p, 0, sizeof(*p));
} }
...@@ -2711,11 +2711,11 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node, ...@@ -2711,11 +2711,11 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node,
u64 *end) u64 *end)
{ {
if (node->color != color) if (node->color != color)
*start += 4096; *start += I915_GTT_PAGE_SIZE;
node = list_next_entry(node, node_list); node = list_next_entry(node, node_list);
if (node->allocated && node->color != color) if (node->allocated && node->color != color)
*end -= 4096; *end -= I915_GTT_PAGE_SIZE;
} }
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
...@@ -2742,7 +2742,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) ...@@ -2742,7 +2742,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
/* Reserve a mappable slot for our lockless error capture */ /* Reserve a mappable slot for our lockless error capture */
ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
&ggtt->error_capture, &ggtt->error_capture,
4096, 0, PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end, 0, ggtt->mappable_end,
0, 0); 0, 0);
......
...@@ -40,6 +40,9 @@ ...@@ -40,6 +40,9 @@
#include "i915_gem_timeline.h" #include "i915_gem_timeline.h"
#include "i915_gem_request.h" #include "i915_gem_request.h"
#define I915_GTT_PAGE_SIZE 4096UL
#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
#define I915_FENCE_REG_NONE -1 #define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 32 #define I915_MAX_NUM_FENCES 32
/* 32 fences + sign bit for FENCE_REG_NONE */ /* 32 fences + sign bit for FENCE_REG_NONE */
...@@ -543,6 +546,6 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, ...@@ -543,6 +546,6 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
#define PIN_HIGH BIT(9) #define PIN_HIGH BIT(9)
#define PIN_OFFSET_BIAS BIT(10) #define PIN_OFFSET_BIAS BIT(10)
#define PIN_OFFSET_FIXED BIT(11) #define PIN_OFFSET_FIXED BIT(11)
#define PIN_OFFSET_MASK (~4095) #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
#endif #endif
...@@ -187,14 +187,14 @@ int i915_gem_render_state_init(struct intel_engine_cs *engine) ...@@ -187,14 +187,14 @@ int i915_gem_render_state_init(struct intel_engine_cs *engine)
if (!rodata) if (!rodata)
return 0; return 0;
if (rodata->batch_items * 4 > 4096) if (rodata->batch_items * 4 > PAGE_SIZE)
return -EINVAL; return -EINVAL;
so = kmalloc(sizeof(*so), GFP_KERNEL); so = kmalloc(sizeof(*so), GFP_KERNEL);
if (!so) if (!so)
return -ENOMEM; return -ENOMEM;
obj = i915_gem_object_create_internal(engine->i915, 4096); obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
ret = PTR_ERR(obj); ret = PTR_ERR(obj);
goto err_free; goto err_free;
......
...@@ -647,8 +647,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv ...@@ -647,8 +647,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
stolen_offset, gtt_offset, size); stolen_offset, gtt_offset, size);
/* KISS and expect everything to be page-aligned */ /* KISS and expect everything to be page-aligned */
if (WARN_ON(size == 0) || WARN_ON(size & 4095) || if (WARN_ON(size == 0) ||
WARN_ON(stolen_offset & 4095)) WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
return NULL; return NULL;
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
......
...@@ -82,7 +82,7 @@ u32 i915_gem_fence_size(struct drm_i915_private *i915, ...@@ -82,7 +82,7 @@ u32 i915_gem_fence_size(struct drm_i915_private *i915,
if (INTEL_GEN(i915) >= 4) { if (INTEL_GEN(i915) >= 4) {
stride *= i915_gem_tile_height(tiling); stride *= i915_gem_tile_height(tiling);
GEM_BUG_ON(stride & 4095); GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE));
return roundup(size, stride); return roundup(size, stride);
} }
...@@ -117,8 +117,11 @@ u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size, ...@@ -117,8 +117,11 @@ u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
* Minimum alignment is 4k (GTT page size), but might be greater * Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object. * if a fence register is needed for the object.
*/ */
if (INTEL_GEN(i915) >= 4 || tiling == I915_TILING_NONE) if (tiling == I915_TILING_NONE)
return 4096; return I915_GTT_MIN_ALIGNMENT;
if (INTEL_GEN(i915) >= 4)
return I965_FENCE_PAGE;
/* /*
* Previous chips need to be aligned to the size of the smallest * Previous chips need to be aligned to the size of the smallest
...@@ -170,7 +173,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj, ...@@ -170,7 +173,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
else else
tile_width = 512; tile_width = 512;
if (stride & (tile_width - 1)) if (!IS_ALIGNED(stride, tile_width))
return false; return false;
/* 965+ just needs multiples of tile width */ /* 965+ just needs multiples of tile width */
...@@ -195,7 +198,7 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma, ...@@ -195,7 +198,7 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma,
return false; return false;
alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride); alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
if (vma->node.start & (alignment - 1)) if (!IS_ALIGNED(vma->node.start, alignment))
return false; return false;
return true; return true;
......
...@@ -91,7 +91,7 @@ __i915_vma_create(struct drm_i915_gem_object *obj, ...@@ -91,7 +91,7 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
vma->vm = vm; vma->vm = vm;
vma->obj = obj; vma->obj = obj;
vma->size = obj->base.size; vma->size = obj->base.size;
vma->display_alignment = 4096; vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
if (view) { if (view) {
vma->ggtt_view = *view; vma->ggtt_view = *view;
...@@ -115,7 +115,7 @@ __i915_vma_create(struct drm_i915_gem_object *obj, ...@@ -115,7 +115,7 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
i915_gem_object_get_tiling(obj), i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj)); i915_gem_object_get_stride(obj));
GEM_BUG_ON(vma->fence_size & 4095); GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
i915_gem_object_get_tiling(obj), i915_gem_object_get_tiling(obj),
...@@ -270,7 +270,8 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -270,7 +270,8 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (vma->node.size < size) if (vma->node.size < size)
return true; return true;
if (alignment && vma->node.start & (alignment - 1)) GEM_BUG_ON(alignment && !is_power_of_2(alignment));
if (alignment && !IS_ALIGNED(vma->node.start, alignment))
return true; return true;
if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
...@@ -302,7 +303,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) ...@@ -302,7 +303,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
return; return;
fenceable = (vma->node.size >= vma->fence_size && fenceable = (vma->node.size >= vma->fence_size &&
(vma->node.start & (vma->fence_alignment - 1)) == 0); IS_ALIGNED(vma->node.start, vma->fence_alignment));
mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
...@@ -380,13 +381,19 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -380,13 +381,19 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
alignment, vma->fence_alignment); alignment, vma->fence_alignment);
} }
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(!is_power_of_2(alignment));
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
end = vma->vm->total; end = vma->vm->total;
if (flags & PIN_MAPPABLE) if (flags & PIN_MAPPABLE)
end = min_t(u64, end, dev_priv->ggtt.mappable_end); end = min_t(u64, end, dev_priv->ggtt.mappable_end);
if (flags & PIN_ZONE_4G) if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE); end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
/* If binding the object/GGTT view requires more space than the entire /* If binding the object/GGTT view requires more space than the entire
* aperture has, reject it early before evicting everything in a vain * aperture has, reject it early before evicting everything in a vain
...@@ -406,7 +413,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -406,7 +413,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (flags & PIN_OFFSET_FIXED) { if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK; u64 offset = flags & PIN_OFFSET_MASK;
if (offset & (alignment - 1) || if (!IS_ALIGNED(offset, alignment) ||
range_overflows(offset, size, end)) { range_overflows(offset, size, end)) {
ret = -EINVAL; ret = -EINVAL;
goto err_unpin; goto err_unpin;
...@@ -440,7 +447,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -440,7 +447,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
* with zero alignment, so where possible use the optimal * with zero alignment, so where possible use the optimal
* path. * path.
*/ */
if (alignment <= 4096) if (alignment <= I915_GTT_MIN_ALIGNMENT)
alignment = 0; alignment = 0;
search_free: search_free:
......
...@@ -1927,7 +1927,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine) ...@@ -1927,7 +1927,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
engine->emit_breadcrumb = gen8_emit_breadcrumb_render; engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz; engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
ret = intel_engine_create_scratch(engine, 4096); ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret) if (ret)
return ret; return ret;
...@@ -2209,7 +2209,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, ...@@ -2209,7 +2209,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
WARN_ON(ce->state); WARN_ON(ce->state);
context_size = round_up(intel_lr_context_size(engine), 4096); context_size = round_up(intel_lr_context_size(engine),
I915_GTT_PAGE_SIZE);
/* One extra page as the sharing data between driver and GuC */ /* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN; context_size += PAGE_SIZE * LRC_PPHWSP_PN;
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include "intel_ringbuffer.h" #include "intel_ringbuffer.h"
#define GEN8_LR_CONTEXT_ALIGN 4096 #define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
/* Execlists regs */ /* Execlists regs */
#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230) #define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230)
......
...@@ -1736,7 +1736,7 @@ static int init_status_page(struct intel_engine_cs *engine) ...@@ -1736,7 +1736,7 @@ static int init_status_page(struct intel_engine_cs *engine)
void *vaddr; void *vaddr;
int ret; int ret;
obj = i915_gem_object_create_internal(engine->i915, 4096); obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n"); DRM_ERROR("Failed to allocate status page\n");
return PTR_ERR(obj); return PTR_ERR(obj);
...@@ -1777,7 +1777,7 @@ static int init_status_page(struct intel_engine_cs *engine) ...@@ -1777,7 +1777,7 @@ static int init_status_page(struct intel_engine_cs *engine)
engine->status_page.vma = vma; engine->status_page.vma = vma;
engine->status_page.ggtt_offset = i915_ggtt_offset(vma); engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
engine->status_page.page_addr = memset(vaddr, 0, 4096); engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
engine->name, i915_ggtt_offset(vma)); engine->name, i915_ggtt_offset(vma));
...@@ -2049,7 +2049,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) ...@@ -2049,7 +2049,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
} }
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
ret = intel_ring_pin(ring, 4096); ret = intel_ring_pin(ring, I915_GTT_PAGE_SIZE);
if (ret) { if (ret) {
intel_ring_free(ring); intel_ring_free(ring);
goto error; goto error;
...@@ -2466,7 +2466,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, ...@@ -2466,7 +2466,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) { if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
struct i915_vma *vma; struct i915_vma *vma;
obj = i915_gem_object_create(dev_priv, 4096); obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
if (IS_ERR(obj)) if (IS_ERR(obj))
goto err; goto err;
...@@ -2683,7 +2683,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine) ...@@ -2683,7 +2683,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
return ret; return ret;
if (INTEL_GEN(dev_priv) >= 6) { if (INTEL_GEN(dev_priv) >= 6) {
ret = intel_engine_create_scratch(engine, 4096); ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret) if (ret)
return ret; return ret;
} else if (HAS_BROKEN_CS_TLB(dev_priv)) { } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment