Commit 01377a0d authored by Abdiel Janulgue's avatar Abdiel Janulgue Committed by Chris Wilson

drm/i915/lmem: support kernel mapping

We can create LMEM objects, but we also need to support mapping them
into kernel space for internal use.
Signed-off-by: default avatarAbdiel Janulgue <abdiel.janulgue@linux.intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarSteve Hampson <steven.t.hampson@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191025153728.23689-3-chris@chris-wilson.co.uk
parent cb6d2467
...@@ -9,11 +9,50 @@ ...@@ -9,11 +9,50 @@
#include "i915_drv.h" #include "i915_drv.h"
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = { const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.flags = I915_GEM_OBJECT_HAS_IOMEM,
.get_pages = i915_gem_object_get_pages_buddy, .get_pages = i915_gem_object_get_pages_buddy,
.put_pages = i915_gem_object_put_pages_buddy, .put_pages = i915_gem_object_put_pages_buddy,
.release = i915_gem_object_release_memory_region, .release = i915_gem_object_release_memory_region,
}; };
/* XXX: Time to vfunc your life up? */
void __iomem *
i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
unsigned long n)
{
resource_size_t offset;
offset = i915_gem_object_get_dma_address(obj, n);
return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
}
void __iomem *
i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
unsigned long n)
{
resource_size_t offset;
offset = i915_gem_object_get_dma_address(obj, n);
return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
}
void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n,
unsigned long size)
{
resource_size_t offset;
GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
offset = i915_gem_object_get_dma_address(obj, n);
return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
}
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{ {
return obj->ops == &i915_gem_lmem_obj_ops; return obj->ops == &i915_gem_lmem_obj_ops;
......
...@@ -14,6 +14,14 @@ struct intel_memory_region; ...@@ -14,6 +14,14 @@ struct intel_memory_region;
extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops; extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n, unsigned long size);
void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
unsigned long n);
void __iomem *
i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
unsigned long n);
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object * struct drm_i915_gem_object *
......
...@@ -31,10 +31,11 @@ struct i915_lut_handle { ...@@ -31,10 +31,11 @@ struct i915_lut_handle {
struct drm_i915_gem_object_ops { struct drm_i915_gem_object_ops {
unsigned int flags; unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0) #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) #define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
#define I915_GEM_OBJECT_IS_PROXY BIT(2) #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
#define I915_GEM_OBJECT_NO_GGTT BIT(3) #define I915_GEM_OBJECT_IS_PROXY BIT(3)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4) #define I915_GEM_OBJECT_NO_GGTT BIT(4)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5)
/* Interface between the GEM object and its backing storage. /* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set * get_pages() is called once prior to the use of the associated set
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_gem_object.h" #include "i915_gem_object.h"
#include "i915_scatterlist.h" #include "i915_scatterlist.h"
#include "i915_gem_lmem.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages, struct sg_table *pages,
...@@ -154,6 +155,16 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) ...@@ -154,6 +155,16 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
rcu_read_unlock(); rcu_read_unlock();
} }
static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
{
if (i915_gem_object_is_lmem(obj))
io_mapping_unmap((void __force __iomem *)ptr);
else if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
}
struct sg_table * struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{ {
...@@ -169,14 +180,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) ...@@ -169,14 +180,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
i915_gem_object_make_unshrinkable(obj); i915_gem_object_make_unshrinkable(obj);
if (obj->mm.mapping) { if (obj->mm.mapping) {
void *ptr; unmap_object(obj, page_mask_bits(obj->mm.mapping));
ptr = page_mask_bits(obj->mm.mapping);
if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
obj->mm.mapping = NULL; obj->mm.mapping = NULL;
} }
...@@ -231,7 +235,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, ...@@ -231,7 +235,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
} }
/* The 'mapping' part of i915_gem_object_pin_map() below */ /* The 'mapping' part of i915_gem_object_pin_map() below */
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
enum i915_map_type type) enum i915_map_type type)
{ {
unsigned long n_pages = obj->base.size >> PAGE_SHIFT; unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
...@@ -244,6 +248,16 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, ...@@ -244,6 +248,16 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
pgprot_t pgprot; pgprot_t pgprot;
void *addr; void *addr;
if (i915_gem_object_is_lmem(obj)) {
void __iomem *io;
if (type != I915_MAP_WC)
return NULL;
io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
return (void __force *)io;
}
/* A single page can always be kmapped */ /* A single page can always be kmapped */
if (n_pages == 1 && type == I915_MAP_WB) if (n_pages == 1 && type == I915_MAP_WB)
return kmap(sg_page(sgt->sgl)); return kmap(sg_page(sgt->sgl));
...@@ -285,11 +299,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -285,11 +299,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type) enum i915_map_type type)
{ {
enum i915_map_type has_type; enum i915_map_type has_type;
unsigned int flags;
bool pinned; bool pinned;
void *ptr; void *ptr;
int err; int err;
if (unlikely(!i915_gem_object_has_struct_page(obj))) flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
if (!i915_gem_object_type_has(obj, flags))
return ERR_PTR(-ENXIO); return ERR_PTR(-ENXIO);
err = mutex_lock_interruptible(&obj->mm.lock); err = mutex_lock_interruptible(&obj->mm.lock);
...@@ -321,10 +337,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -321,10 +337,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
goto err_unpin; goto err_unpin;
} }
if (is_vmalloc_addr(ptr)) unmap_object(obj, ptr);
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
ptr = obj->mm.mapping = NULL; ptr = obj->mm.mapping = NULL;
} }
......
...@@ -13,8 +13,10 @@ ...@@ -13,8 +13,10 @@
#include "gem/i915_gem_lmem.h" #include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h" #include "gem/i915_gem_region.h"
#include "gem/i915_gem_object_blt.h"
#include "gem/selftests/mock_context.h" #include "gem/selftests/mock_context.h"
#include "gt/intel_gt.h" #include "gt/intel_gt.h"
#include "selftests/igt_flush_test.h"
#include "selftests/i915_random.h" #include "selftests/i915_random.h"
static void close_objects(struct intel_memory_region *mem, static void close_objects(struct intel_memory_region *mem,
...@@ -275,6 +277,116 @@ static int igt_lmem_create(void *arg) ...@@ -275,6 +277,116 @@ static int igt_lmem_create(void *arg)
return err; return err;
} }
static int igt_lmem_write_cpu(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
u32 bytes[] = {
0, /* rng placeholder */
sizeof(u32),
sizeof(u64),
64, /* cl */
PAGE_SIZE,
PAGE_SIZE - sizeof(u32),
PAGE_SIZE - sizeof(u64),
PAGE_SIZE - 64,
};
u32 *vaddr;
u32 sz;
u32 i;
int *order;
int count;
int err;
if (!HAS_ENGINE(i915, BCS0))
return 0;
sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
sz = max_t(u32, 2 * PAGE_SIZE, sz);
obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
if (IS_ERR(obj))
return PTR_ERR(obj);
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto out_put;
}
/* Put the pages into a known state -- from the gpu for added fun */
err = i915_gem_object_fill_blt(obj, i915->engine[BCS0]->kernel_context,
0xdeadbeaf);
if (err)
goto out_unpin;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_wc_domain(obj, true);
i915_gem_object_unlock(obj);
if (err)
goto out_unpin;
count = ARRAY_SIZE(bytes);
order = i915_random_order(count * count, &prng);
if (!order) {
err = -ENOMEM;
goto out_unpin;
}
/* We want to throw in a random width/align */
bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32),
sizeof(u32));
i = 0;
do {
u32 offset;
u32 align;
u32 dword;
u32 size;
u32 val;
size = bytes[order[i] % count];
i = (i + 1) % (count * count);
align = bytes[order[i] % count];
i = (i + 1) % (count * count);
align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
offset = igt_random_offset(&prng, 0, obj->base.size,
size, align);
val = prandom_u32_state(&prng);
memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
size / sizeof(u32));
/*
* Sample random dw -- don't waste precious time reading every
* single dw.
*/
dword = igt_random_offset(&prng, offset,
offset + size,
sizeof(u32), sizeof(u32));
dword /= sizeof(u32);
if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
__func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
size, align, offset);
err = -EINVAL;
break;
}
} while (!__igt_timeout(end_time, NULL));
out_unpin:
i915_gem_object_unpin_map(obj);
out_put:
i915_gem_object_put(obj);
return err;
}
int intel_memory_region_mock_selftests(void) int intel_memory_region_mock_selftests(void)
{ {
static const struct i915_subtest tests[] = { static const struct i915_subtest tests[] = {
...@@ -308,6 +420,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915) ...@@ -308,6 +420,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
{ {
static const struct i915_subtest tests[] = { static const struct i915_subtest tests[] = {
SUBTEST(igt_lmem_create), SUBTEST(igt_lmem_create),
SUBTEST(igt_lmem_write_cpu),
}; };
if (!HAS_LMEM(i915)) { if (!HAS_LMEM(i915)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment