Commit dd5ba4ff authored by Imre Deak's avatar Imre Deak

drm/i915/fb: Factor out functions to remap contiguous FB obj pages

Factor out functions needed to map contiguous FB obj pages to a GTT/DPT
VMA view in the next patch.

While at it s/4096/I915_GTT_PAGE_SIZE/ in add_padding_pages().

No functional changes.

v2: s/4096/I915_GTT_PAGE_SIZE/ (Matthew)
Signed-off-by: default avatarImre Deak <imre.deak@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarJuha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211026225105.2783797-4-imre.deak@intel.com
parent 6b6636e1
...@@ -1388,18 +1388,9 @@ intel_rotate_pages(struct intel_rotation_info *rot_info, ...@@ -1388,18 +1388,9 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
} }
static struct scatterlist * static struct scatterlist *
remap_pages(struct drm_i915_gem_object *obj, add_padding_pages(unsigned int count,
unsigned int offset, unsigned int alignment_pad,
unsigned int width, unsigned int height,
unsigned int src_stride, unsigned int dst_stride,
struct sg_table *st, struct scatterlist *sg) struct sg_table *st, struct scatterlist *sg)
{ {
unsigned int row;
if (!width || !height)
return sg;
if (alignment_pad) {
st->nents++; st->nents++;
/* /*
...@@ -1407,11 +1398,28 @@ remap_pages(struct drm_i915_gem_object *obj, ...@@ -1407,11 +1398,28 @@ remap_pages(struct drm_i915_gem_object *obj,
* here is just a convenience to indicate how many padding PTEs * here is just a convenience to indicate how many padding PTEs
* to insert at this spot. * to insert at this spot.
*/ */
sg_set_page(sg, NULL, alignment_pad * 4096, 0); sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
sg_dma_address(sg) = 0; sg_dma_address(sg) = 0;
sg_dma_len(sg) = alignment_pad * 4096; sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
sg = sg_next(sg); sg = sg_next(sg);
}
return sg;
}
static struct scatterlist *
remap_pages(struct drm_i915_gem_object *obj,
unsigned int offset, unsigned int alignment_pad,
unsigned int width, unsigned int height,
unsigned int src_stride, unsigned int dst_stride,
struct sg_table *st, struct scatterlist *sg)
{
unsigned int row;
if (!width || !height)
return sg;
if (alignment_pad)
sg = add_padding_pages(alignment_pad, st, sg);
for (row = 0; row < height; row++) { for (row = 0; row < height; row++) {
unsigned int left = width * I915_GTT_PAGE_SIZE; unsigned int left = width * I915_GTT_PAGE_SIZE;
...@@ -1448,22 +1456,45 @@ remap_pages(struct drm_i915_gem_object *obj, ...@@ -1448,22 +1456,45 @@ remap_pages(struct drm_i915_gem_object *obj,
if (!left) if (!left)
continue; continue;
st->nents++; sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
/*
* The DE ignores the PTEs for the padding tiles, the sg entry
* here is just a conenience to indicate how many padding PTEs
* to insert at this spot.
*/
sg_set_page(sg, NULL, left, 0);
sg_dma_address(sg) = 0;
sg_dma_len(sg) = left;
sg = sg_next(sg);
} }
return sg; return sg;
} }
static struct scatterlist *
remap_contiguous_pages(struct drm_i915_gem_object *obj,
unsigned int obj_offset,
unsigned int count,
struct sg_table *st, struct scatterlist *sg)
{
struct scatterlist *iter;
unsigned int offset;
iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
GEM_BUG_ON(!iter);
do {
unsigned int len;
len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
count << PAGE_SHIFT);
sg_set_page(sg, NULL, len, 0);
sg_dma_address(sg) =
sg_dma_address(iter) + (offset << PAGE_SHIFT);
sg_dma_len(sg) = len;
st->nents++;
count -= len >> PAGE_SHIFT;
if (count == 0)
return sg;
sg = __sg_next(sg);
iter = __sg_next(iter);
offset = 0;
} while (1);
}
static noinline struct sg_table * static noinline struct sg_table *
intel_remap_pages(struct intel_remapped_info *rem_info, intel_remap_pages(struct intel_remapped_info *rem_info,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
...@@ -1524,9 +1555,8 @@ intel_partial_pages(const struct i915_ggtt_view *view, ...@@ -1524,9 +1555,8 @@ intel_partial_pages(const struct i915_ggtt_view *view,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg, *iter; struct scatterlist *sg;
unsigned int count = view->partial.size; unsigned int count = view->partial.size;
unsigned int offset;
int ret = -ENOMEM; int ret = -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
...@@ -1537,34 +1567,14 @@ intel_partial_pages(const struct i915_ggtt_view *view, ...@@ -1537,34 +1567,14 @@ intel_partial_pages(const struct i915_ggtt_view *view,
if (ret) if (ret)
goto err_sg_alloc; goto err_sg_alloc;
iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
GEM_BUG_ON(!iter);
sg = st->sgl;
st->nents = 0; st->nents = 0;
do {
unsigned int len;
len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
count << PAGE_SHIFT);
sg_set_page(sg, NULL, len, 0);
sg_dma_address(sg) =
sg_dma_address(iter) + (offset << PAGE_SHIFT);
sg_dma_len(sg) = len;
st->nents++;
count -= len >> PAGE_SHIFT;
if (count == 0) {
sg_mark_end(sg); sg_mark_end(sg);
i915_sg_trim(st); /* Drop any unused tail entries. */ i915_sg_trim(st); /* Drop any unused tail entries. */
return st; return st;
}
sg = __sg_next(sg);
iter = __sg_next(iter);
offset = 0;
} while (1);
err_sg_alloc: err_sg_alloc:
kfree(st); kfree(st);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment