Commit abb0deac authored by Chris Wilson's avatar Chris Wilson Committed by Jani Nikula

drm/i915: Fallback to single PAGE_SIZE segments for DMA remapping

If we at first do not succeed with attempting to remap our physical
pages using a coalesced scattergather list, try again with one
scattergather entry per page. This should help with swiotlb as it uses a
limited buffer size and only searches for contiguous chunks within its
buffer aligned up to the next boundary - i.e. we may prematurely cause a
failure as we are unable to utilize the unused space between large
chunks and trigger an error such as:

	 i915 0000:00:02.0: swiotlb buffer is full (sz: 1630208 bytes)
Reported-by: default avatarJuergen Gross <jgross@suse.com>
Tested-by: default avatarJuergen Gross <jgross@suse.com>
Fixes: 871dfbd6 ("drm/i915: Allow compaction upto SWIOTLB max segment size")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: <drm-intel-fixes@lists.freedesktop.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20161219124346.550-1-chris@chris-wilson.co.ukReviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
(cherry picked from commit d766ef53)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parent d8953c83
...@@ -2342,7 +2342,8 @@ static struct sg_table * ...@@ -2342,7 +2342,8 @@ static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int page_count, i; const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned long i;
struct address_space *mapping; struct address_space *mapping;
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg; struct scatterlist *sg;
...@@ -2368,7 +2369,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2368,7 +2369,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (st == NULL) if (st == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
page_count = obj->base.size / PAGE_SIZE; rebuild_st:
if (sg_alloc_table(st, page_count, GFP_KERNEL)) { if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st); kfree(st);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -2427,8 +2428,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2427,8 +2428,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
i915_sg_trim(st); i915_sg_trim(st);
ret = i915_gem_gtt_prepare_pages(obj, st); ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) if (ret) {
goto err_pages; /* DMA remapping failed? One possible cause is that
* it could not reserve enough large entries, asking
* for PAGE_SIZE chunks instead may be helpful.
*/
if (max_segment > PAGE_SIZE) {
for_each_sgt_page(page, sgt_iter, st)
put_page(page);
sg_free_table(st);
max_segment = PAGE_SIZE;
goto rebuild_st;
} else {
dev_warn(&dev_priv->drm.pdev->dev,
"Failed to DMA remap %lu pages\n",
page_count);
goto err_pages;
}
}
if (i915_gem_object_needs_bit17_swizzle(obj)) if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st); i915_gem_object_do_bit_17_swizzle(obj, st);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment