Commit 5602452e authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915: Use __sg_alloc_table_from_pages for userptr allocations

With the addition of __sg_alloc_table_from_pages we can control
the maximum coalescing size and eliminate a separate path for
allocating backing store here.

Similar to 871dfbd6 ("drm/i915: Allow compaction upto
SWIOTLB max segment size") this enables more compact sg lists to
be created and so has a beneficial effect on workloads with many
and/or large objects of this class.

v2:
 * Rename helper to i915_sg_segment_size and fix swiotlb override.
 * Commit message update.

v3:
 * Actually include the swiotlb override fix.

v4:
 * Regroup parameters a bit. (Chris Wilson)

v5:
 * Rebase for swiotlb_max_segment.
 * Add DMA map failure handling as in abb0deac
   ("drm/i915: Fallback to single PAGE_SIZE segments for DMA remapping").

v6: Handle swiotlb_max_segment() returning 1. (Joonas Lahtinen)

v7: Rebase.
v8: Commit spelling fix.
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: linux-kernel@vger.kernel.org
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170803091417.23677-1-tvrtko.ursulin@linux.intel.com
parent 89d8589c
...@@ -2828,6 +2828,21 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) ...@@ -2828,6 +2828,21 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
(((__iter).curr += PAGE_SIZE) < (__iter).max) || \ (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
static inline unsigned int i915_sg_segment_size(void)
{
unsigned int size = swiotlb_max_segment();
if (size == 0)
return SCATTERLIST_MAX_SEGMENT;
size = rounddown(size, PAGE_SIZE);
/* swiotlb_max_segment_size can return 1 byte when it means one page. */
if (size < PAGE_SIZE)
size = PAGE_SIZE;
return size;
}
static inline const struct intel_device_info * static inline const struct intel_device_info *
intel_info(const struct drm_i915_private *dev_priv) intel_info(const struct drm_i915_private *dev_priv)
{ {
......
...@@ -2303,7 +2303,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2303,7 +2303,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct sgt_iter sgt_iter; struct sgt_iter sgt_iter;
struct page *page; struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */ unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment; unsigned int max_segment = i915_sg_segment_size();
gfp_t noreclaim; gfp_t noreclaim;
int ret; int ret;
...@@ -2314,10 +2314,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2314,10 +2314,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
max_segment = swiotlb_max_segment();
if (!max_segment)
max_segment = rounddown(UINT_MAX, PAGE_SIZE);
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL) if (st == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
......
...@@ -399,64 +399,42 @@ struct get_pages_work { ...@@ -399,64 +399,42 @@ struct get_pages_work {
struct task_struct *task; struct task_struct *task;
}; };
#if IS_ENABLED(CONFIG_SWIOTLB)
#define swiotlb_active() swiotlb_nr_tbl()
#else
#define swiotlb_active() 0
#endif
static int
st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
{
struct scatterlist *sg;
int ret, n;
*st = kmalloc(sizeof(**st), GFP_KERNEL);
if (*st == NULL)
return -ENOMEM;
if (swiotlb_active()) {
ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
if (ret)
goto err;
for_each_sg((*st)->sgl, sg, num_pages, n)
sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
} else {
ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
0, num_pages << PAGE_SHIFT,
GFP_KERNEL);
if (ret)
goto err;
}
return 0;
err:
kfree(*st);
*st = NULL;
return ret;
}
static struct sg_table * static struct sg_table *
__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj, __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
struct page **pvec, int num_pages) struct page **pvec, int num_pages)
{ {
struct sg_table *pages; unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st;
int ret; int ret;
ret = st_set_pages(&pages, pvec, num_pages); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (ret) if (!st)
return ERR_PTR(-ENOMEM);
alloc_table:
ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
0, num_pages << PAGE_SHIFT,
max_segment,
GFP_KERNEL);
if (ret) {
kfree(st);
return ERR_PTR(ret); return ERR_PTR(ret);
}
ret = i915_gem_gtt_prepare_pages(obj, pages); ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) { if (ret) {
sg_free_table(pages); sg_free_table(st);
kfree(pages);
if (max_segment > PAGE_SIZE) {
max_segment = PAGE_SIZE;
goto alloc_table;
}
kfree(st);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
return pages; return st;
} }
static int static int
...@@ -540,7 +518,8 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -540,7 +518,8 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
struct sg_table *pages = ERR_PTR(ret); struct sg_table *pages = ERR_PTR(ret);
if (pinned == npages) { if (pinned == npages) {
pages = __i915_gem_userptr_set_pages(obj, pvec, npages); pages = __i915_gem_userptr_alloc_pages(obj, pvec,
npages);
if (!IS_ERR(pages)) { if (!IS_ERR(pages)) {
__i915_gem_object_set_pages(obj, pages); __i915_gem_object_set_pages(obj, pages);
pinned = 0; pinned = 0;
...@@ -661,7 +640,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -661,7 +640,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pages = __i915_gem_userptr_get_pages_schedule(obj); pages = __i915_gem_userptr_get_pages_schedule(obj);
active = pages == ERR_PTR(-EAGAIN); active = pages == ERR_PTR(-EAGAIN);
} else { } else {
pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages); pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
active = !IS_ERR(pages); active = !IS_ERR(pages);
} }
if (active) if (active)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment