Commit b5293714 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2020-01-23' of...

Merge tag 'drm-intel-fixes-2020-01-23' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- Avoid overflow with huge userptr objects
- uAPI fix to correctly handle negative values in
  engine->uabi_class/instance (cc: stable)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200123135045.GA12584@jlahtine-desk.ger.corp.intel.com
parents a48d4a33 5eec7182
...@@ -9,16 +9,16 @@ ...@@ -9,16 +9,16 @@
#include "i915_gem_ioctls.h" #include "i915_gem_ioctls.h"
#include "i915_gem_object.h" #include "i915_gem_object.h"
static __always_inline u32 __busy_read_flag(u8 id) static __always_inline u32 __busy_read_flag(u16 id)
{ {
if (id == (u8)I915_ENGINE_CLASS_INVALID) if (id == (u16)I915_ENGINE_CLASS_INVALID)
return 0xffff0000u; return 0xffff0000u;
GEM_BUG_ON(id >= 16); GEM_BUG_ON(id >= 16);
return 0x10000u << id; return 0x10000u << id;
} }
static __always_inline u32 __busy_write_id(u8 id) static __always_inline u32 __busy_write_id(u16 id)
{ {
/* /*
* The uABI guarantees an active writer is also amongst the read * The uABI guarantees an active writer is also amongst the read
...@@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id) ...@@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id)
* last_read - hence we always set both read and write busy for * last_read - hence we always set both read and write busy for
* last_write. * last_write.
*/ */
if (id == (u8)I915_ENGINE_CLASS_INVALID) if (id == (u16)I915_ENGINE_CLASS_INVALID)
return 0xffffffffu; return 0xffffffffu;
return (id + 1) | __busy_read_flag(id); return (id + 1) | __busy_read_flag(id);
} }
static __always_inline unsigned int static __always_inline unsigned int
__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id)) __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
{ {
const struct i915_request *rq; const struct i915_request *rq;
...@@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id)) ...@@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
return 0; return 0;
/* Beware type-expansion follies! */ /* Beware type-expansion follies! */
BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class)); BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
return flag(rq->engine->uabi_class); return flag(rq->engine->uabi_class);
} }
......
...@@ -402,7 +402,7 @@ struct get_pages_work { ...@@ -402,7 +402,7 @@ struct get_pages_work {
static struct sg_table * static struct sg_table *
__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
struct page **pvec, int num_pages) struct page **pvec, unsigned long num_pages)
{ {
unsigned int max_segment = i915_sg_segment_size(); unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st; struct sg_table *st;
...@@ -448,9 +448,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -448,9 +448,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{ {
struct get_pages_work *work = container_of(_work, typeof(*work), work); struct get_pages_work *work = container_of(_work, typeof(*work), work);
struct drm_i915_gem_object *obj = work->obj; struct drm_i915_gem_object *obj = work->obj;
const int npages = obj->base.size >> PAGE_SHIFT; const unsigned long npages = obj->base.size >> PAGE_SHIFT;
unsigned long pinned;
struct page **pvec; struct page **pvec;
int pinned, ret; int ret;
ret = -ENOMEM; ret = -ENOMEM;
pinned = 0; pinned = 0;
...@@ -553,7 +554,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj) ...@@ -553,7 +554,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{ {
const int num_pages = obj->base.size >> PAGE_SHIFT; const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
struct mm_struct *mm = obj->userptr.mm->mm; struct mm_struct *mm = obj->userptr.mm->mm;
struct page **pvec; struct page **pvec;
struct sg_table *pages; struct sg_table *pages;
......
...@@ -274,8 +274,8 @@ struct intel_engine_cs { ...@@ -274,8 +274,8 @@ struct intel_engine_cs {
u8 class; u8 class;
u8 instance; u8 instance;
u8 uabi_class; u16 uabi_class;
u8 uabi_instance; u16 uabi_instance;
u32 uabi_capabilities; u32 uabi_capabilities;
u32 context_size; u32 context_size;
......
...@@ -1177,6 +1177,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, ...@@ -1177,6 +1177,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do { do {
GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
iter->dma += I915_GTT_PAGE_SIZE; iter->dma += I915_GTT_PAGE_SIZE;
...@@ -1660,6 +1661,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, ...@@ -1660,6 +1661,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
do { do {
GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
iter.dma += I915_GTT_PAGE_SIZE; iter.dma += I915_GTT_PAGE_SIZE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment