Commit 7d6a276e authored by Jason Ekstrand's avatar Jason Ekstrand Committed by Daniel Vetter

drm/i915: Remove allow_alloc from i915_gem_object_get_sg*

This reverts the rest of 0edbb9ba ("drm/i915: Move cmd parser
pinning to execbuffer").  Now that the only user of i915_gem_object_get_sg
without allow_alloc has been removed, we can drop the parameter.  This
portion of the revert was broken into its own patch to aid review.
Signed-off-by: default avatarJason Ekstrand <jason@jlekstrand.net>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarJon Bloomfield <jon.bloomfield@intel.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210714193419.1459723-4-jason@jlekstrand.net
parent 93a2711c
......@@ -342,22 +342,22 @@ struct scatterlist *
__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
struct i915_gem_object_page_iter *iter,
unsigned int n,
unsigned int *offset, bool allow_alloc, bool dma);
unsigned int *offset, bool dma);
static inline struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
unsigned int n,
unsigned int *offset, bool allow_alloc)
unsigned int *offset)
{
return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc, false);
return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, false);
}
static inline struct scatterlist *
i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
unsigned int n,
unsigned int *offset, bool allow_alloc)
unsigned int *offset)
{
return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc, true);
return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, true);
}
struct page *
......
......@@ -494,7 +494,7 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
struct i915_gem_object_page_iter *iter,
unsigned int n,
unsigned int *offset,
bool allow_alloc, bool dma)
bool dma)
{
struct scatterlist *sg;
unsigned int idx, count;
......@@ -516,9 +516,6 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
if (n < READ_ONCE(iter->sg_idx))
goto lookup;
if (!allow_alloc)
goto manual_lookup;
mutex_lock(&iter->lock);
/* We prefer to reuse the last sg so that repeated lookup of this
......@@ -568,16 +565,7 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
if (unlikely(n < idx)) /* insertion completed by another thread */
goto lookup;
goto manual_walk;
manual_lookup:
idx = 0;
sg = obj->mm.pages->sgl;
count = __sg_page_count(sg);
manual_walk:
/*
* In case we failed to insert the entry into the radixtree, we need
/* In case we failed to insert the entry into the radixtree, we need
* to look beyond the current sg.
*/
while (idx + count <= n) {
......@@ -624,7 +612,7 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
sg = i915_gem_object_get_sg(obj, n, &offset, true);
sg = i915_gem_object_get_sg(obj, n, &offset);
return nth_page(sg_page(sg), offset);
}
......@@ -650,7 +638,7 @@ i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
struct scatterlist *sg;
unsigned int offset;
sg = i915_gem_object_get_sg_dma(obj, n, &offset, true);
sg = i915_gem_object_get_sg_dma(obj, n, &offset);
if (len)
*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
......
......@@ -589,7 +589,7 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
GEM_WARN_ON(bo->ttm);
sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true, true);
sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
}
......
......@@ -1494,7 +1494,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
if (ret)
goto err_sg_alloc;
iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset, true);
iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
GEM_BUG_ON(!iter);
sg = st->sgl;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment