Commit 250f8c81 authored by Jon Bloomfield's avatar Jon Bloomfield Committed by Chris Wilson

drm/i915/gtt: Read-only pages for insert_entries on bdw+

Hook up the flags to allow read-only ppGTT mappings for gen8+

v2: Include a selftest to check that writes to a readonly PTE are
dropped
v3: Don't duplicate cpu_check() as we can just reuse it, and even worse
don't wholesale copy the theory-of-operation comment from igt_ctx_exec
without changing it to explain the intention behind the new test!
v4: Joonas really likes magic mystery values
Signed-off-by: default avatarJon Bloomfield <jon.bloomfield@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180712185315.3288-2-chris@chris-wilson.co.uk
parent 25dda4da
...@@ -204,7 +204,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma, ...@@ -204,7 +204,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
return err; return err;
} }
/* Currently applicable only to VLV */ /* Applicable to VLV, and gen8+ */
pte_flags = 0; pte_flags = 0;
if (vma->obj->gt_ro) if (vma->obj->gt_ro)
pte_flags |= PTE_READ_ONLY; pte_flags |= PTE_READ_ONLY;
...@@ -1044,10 +1044,11 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, ...@@ -1044,10 +1044,11 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
struct i915_page_directory_pointer *pdp, struct i915_page_directory_pointer *pdp,
struct sgt_dma *iter, struct sgt_dma *iter,
struct gen8_insert_pte *idx, struct gen8_insert_pte *idx,
enum i915_cache_level cache_level) enum i915_cache_level cache_level,
u32 flags)
{ {
struct i915_page_directory *pd; struct i915_page_directory *pd;
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, 0); const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
gen8_pte_t *vaddr; gen8_pte_t *vaddr;
bool ret; bool ret;
...@@ -1098,14 +1099,14 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, ...@@ -1098,14 +1099,14 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
struct i915_vma *vma, struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 unused) u32 flags)
{ {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma); struct sgt_dma iter = sgt_dma(vma);
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx, gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
cache_level); cache_level, flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
} }
...@@ -1113,9 +1114,10 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, ...@@ -1113,9 +1114,10 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
struct i915_page_directory_pointer **pdps, struct i915_page_directory_pointer **pdps,
struct sgt_dma *iter, struct sgt_dma *iter,
enum i915_cache_level cache_level) enum i915_cache_level cache_level,
u32 flags)
{ {
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, 0); const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
u64 start = vma->node.start; u64 start = vma->node.start;
dma_addr_t rem = iter->sg->length; dma_addr_t rem = iter->sg->length;
...@@ -1231,19 +1233,21 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, ...@@ -1231,19 +1233,21 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
struct i915_vma *vma, struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 unused) u32 flags)
{ {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma); struct sgt_dma iter = sgt_dma(vma);
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps; struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level); gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
flags);
} else { } else {
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
&iter, &idx, cache_level)) &iter, &idx, cache_level,
flags))
GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
...@@ -1658,6 +1662,9 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) ...@@ -1658,6 +1662,9 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
1ULL << 48 : 1ULL << 48 :
1ULL << 32; 1ULL << 32;
/* From bdw, there is support for read-only pages in the PPGTT */
ppgtt->vm.has_read_only = true;
i915_address_space_init(&ppgtt->vm, i915); i915_address_space_init(&ppgtt->vm, i915);
/* There are only few exceptions for gen >=6. chv and bxt. /* There are only few exceptions for gen >=6. chv and bxt.
...@@ -2472,7 +2479,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, ...@@ -2472,7 +2479,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
static void gen8_ggtt_insert_entries(struct i915_address_space *vm, static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma *vma, struct i915_vma *vma,
enum i915_cache_level level, enum i915_cache_level level,
u32 unused) u32 flags)
{ {
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter; struct sgt_iter sgt_iter;
...@@ -2480,6 +2487,9 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, ...@@ -2480,6 +2487,9 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0); const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
dma_addr_t addr; dma_addr_t addr;
/* The GTT does not support read-only mappings */
GEM_BUG_ON(flags & PTE_READ_ONLY);
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
gtt_entries += vma->node.start >> PAGE_SHIFT; gtt_entries += vma->node.start >> PAGE_SHIFT;
for_each_sgt_dma(addr, sgt_iter, vma->pages) for_each_sgt_dma(addr, sgt_iter, vma->pages)
...@@ -2606,13 +2616,14 @@ struct insert_entries { ...@@ -2606,13 +2616,14 @@ struct insert_entries {
struct i915_address_space *vm; struct i915_address_space *vm;
struct i915_vma *vma; struct i915_vma *vma;
enum i915_cache_level level; enum i915_cache_level level;
u32 flags;
}; };
static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{ {
struct insert_entries *arg = _arg; struct insert_entries *arg = _arg;
gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0); gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
bxt_vtd_ggtt_wa(arg->vm); bxt_vtd_ggtt_wa(arg->vm);
return 0; return 0;
...@@ -2621,9 +2632,9 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) ...@@ -2621,9 +2632,9 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct i915_vma *vma, struct i915_vma *vma,
enum i915_cache_level level, enum i915_cache_level level,
u32 unused) u32 flags)
{ {
struct insert_entries arg = { vm, vma, level }; struct insert_entries arg = { vm, vma, level, flags };
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
} }
...@@ -2714,7 +2725,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, ...@@ -2714,7 +2725,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags; u32 pte_flags;
/* Currently applicable only to VLV */ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0; pte_flags = 0;
if (obj->gt_ro) if (obj->gt_ro)
pte_flags |= PTE_READ_ONLY; pte_flags |= PTE_READ_ONLY;
...@@ -3594,6 +3605,10 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) ...@@ -3594,6 +3605,10 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
*/ */
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
i915_address_space_init(&ggtt->vm, dev_priv); i915_address_space_init(&ggtt->vm, dev_priv);
/* Only VLV supports read-only GGTT mappings */
ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv)) if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
......
...@@ -331,7 +331,12 @@ struct i915_address_space { ...@@ -331,7 +331,12 @@ struct i915_address_space {
struct list_head unbound_list; struct list_head unbound_list;
struct pagestash free_pages; struct pagestash free_pages;
bool pt_kmap_wc;
/* Some systems require uncached updates of the page directories */
bool pt_kmap_wc:1;
/* Some systems support read-only mappings for GGTT and/or PPGTT */
bool has_read_only:1;
/* FIXME: Need a more generic return type */ /* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr, gen6_pte_t (*pte_encode)(dma_addr_t addr,
......
...@@ -1085,6 +1085,7 @@ void intel_ring_unpin(struct intel_ring *ring) ...@@ -1085,6 +1085,7 @@ void intel_ring_unpin(struct intel_ring *ring)
static struct i915_vma * static struct i915_vma *
intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
{ {
struct i915_address_space *vm = &dev_priv->ggtt.vm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *vma; struct i915_vma *vma;
...@@ -1094,10 +1095,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) ...@@ -1094,10 +1095,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
if (IS_ERR(obj)) if (IS_ERR(obj))
return ERR_CAST(obj); return ERR_CAST(obj);
/* mark ring buffers as read-only from GPU side by default */ /*
* Mark ring buffers as read-only from GPU side (so no stray overwrites)
* if supported by the platform's GGTT.
*/
if (vm->has_read_only)
obj->gt_ro = 1; obj->gt_ro = 1;
vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL); vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
goto err; goto err;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
*/ */
#include "../i915_selftest.h" #include "../i915_selftest.h"
#include "i915_random.h"
#include "igt_flush_test.h" #include "igt_flush_test.h"
#include "mock_drm.h" #include "mock_drm.h"
...@@ -252,9 +253,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max) ...@@ -252,9 +253,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
} }
for (; m < DW_PER_PAGE; m++) { for (; m < DW_PER_PAGE; m++) {
if (map[m] != 0xdeadbeef) { if (map[m] != STACK_MAGIC) {
pr_err("Invalid value at page %d, offset %d: found %x expected %x\n", pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
n, m, map[m], 0xdeadbeef); n, m, map[m], STACK_MAGIC);
err = -EINVAL; err = -EINVAL;
goto out_unmap; goto out_unmap;
} }
...@@ -310,7 +311,7 @@ create_test_object(struct i915_gem_context *ctx, ...@@ -310,7 +311,7 @@ create_test_object(struct i915_gem_context *ctx,
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
err = cpu_fill(obj, 0xdeadbeef); err = cpu_fill(obj, STACK_MAGIC);
if (err) { if (err) {
pr_err("Failed to fill object with cpu, err=%d\n", pr_err("Failed to fill object with cpu, err=%d\n",
err); err);
...@@ -432,6 +433,110 @@ static int igt_ctx_exec(void *arg) ...@@ -432,6 +433,110 @@ static int igt_ctx_exec(void *arg)
return err; return err;
} }
static int igt_ctx_readonly(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
struct drm_file *file;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
unsigned long ndwords, dw;
int err = -ENODEV;
/*
* Create a few read-only objects (with the occasional writable object)
* and try to write into these object checking that the GPU discards
* any write to a read-only object.
*/
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
ctx = i915_gem_create_context(i915, file->driver_priv);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
}
ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
if (!ppgtt || !ppgtt->vm.has_read_only) {
err = 0;
goto out_unlock;
}
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
struct intel_engine_cs *engine;
unsigned int id;
for_each_engine(engine, i915, id) {
if (!intel_engine_can_store_dword(engine))
continue;
if (!obj) {
obj = create_test_object(ctx, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out_unlock;
}
obj->gt_ro = prandom_u32_state(&prng);
}
intel_runtime_pm_get(i915);
err = gpu_fill(obj, ctx, engine, dw);
intel_runtime_pm_put(i915);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
yesno(!!ctx->ppgtt), err);
goto out_unlock;
}
if (++dw == max_dwords(obj)) {
obj = NULL;
dw = 0;
}
ndwords++;
}
}
pr_info("Submitted %lu dwords (across %u engines)\n",
ndwords, INTEL_INFO(i915)->num_rings);
dw = 0;
list_for_each_entry(obj, &objects, st_link) {
unsigned int rem =
min_t(unsigned int, ndwords - dw, max_dwords(obj));
unsigned int num_writes;
num_writes = rem;
if (obj->gt_ro)
num_writes = 0;
err = cpu_check(obj, num_writes);
if (err)
break;
dw += rem;
}
out_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
}
static __maybe_unused const char * static __maybe_unused const char *
__engine_name(struct drm_i915_private *i915, unsigned int engines) __engine_name(struct drm_i915_private *i915, unsigned int engines)
{ {
...@@ -608,6 +713,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) ...@@ -608,6 +713,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
static const struct i915_subtest tests[] = { static const struct i915_subtest tests[] = {
SUBTEST(igt_switch_to_kernel_context), SUBTEST(igt_switch_to_kernel_context),
SUBTEST(igt_ctx_exec), SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
}; };
bool fake_alias = false; bool fake_alias = false;
int err; int err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment