Commit 871dfbd6 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Allow compaction upto SWIOTLB max segment size

commit 1625e7e5 ("drm/i915: make compact dma scatter lists creation
work with SWIOTLB backend") took a heavy handed approach to undo the
scatterlist compaction in the face of SWIOTLB. (The compaction hit a bug
whereby we tried to pass a segment larger than SWIOTLB could handle.) We
can be a little more intelligent and try compacting the scatterlist up
to the maximum SWIOTLB segment size (when using SWIOTLB).

v2: Tidy sg_mark_end() and cpp
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
CC: Imre Deak <imre.deak@intel.com>
CC: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161011082021.14606-2-chris@chris-wilson.co.uk
parent 465350d0
...@@ -2208,6 +2208,15 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) ...@@ -2208,6 +2208,15 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
static unsigned long swiotlb_max_size(void)
{
#if IS_ENABLED(CONFIG_SWIOTLB)
return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
#else
return 0;
#endif
}
static int static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{ {
...@@ -2219,6 +2228,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2219,6 +2228,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct sgt_iter sgt_iter; struct sgt_iter sgt_iter;
struct page *page; struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */ unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned long max_segment;
int ret; int ret;
gfp_t gfp; gfp_t gfp;
...@@ -2229,6 +2239,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2229,6 +2239,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
max_segment = swiotlb_max_size();
if (!max_segment)
max_segment = obj->base.size;
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL) if (st == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -2270,15 +2284,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2270,15 +2284,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
goto err_pages; goto err_pages;
} }
} }
#ifdef CONFIG_SWIOTLB if (!i ||
if (swiotlb_nr_tbl()) { sg->length >= max_segment ||
st->nents++; page_to_pfn(page) != last_pfn + 1) {
sg_set_page(sg, page, PAGE_SIZE, 0);
sg = sg_next(sg);
continue;
}
#endif
if (!i || page_to_pfn(page) != last_pfn + 1) {
if (i) if (i)
sg = sg_next(sg); sg = sg_next(sg);
st->nents++; st->nents++;
...@@ -2291,9 +2299,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2291,9 +2299,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
/* Check that the i965g/gm workaround works. */ /* Check that the i965g/gm workaround works. */
WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
} }
#ifdef CONFIG_SWIOTLB if (sg) /* loop terminated early; short sg table */
if (!swiotlb_nr_tbl())
#endif
sg_mark_end(sg); sg_mark_end(sg);
obj->pages = st; obj->pages = st;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment