Commit e559895a authored by Rob Clark's avatar Rob Clark Committed by Greg Kroah-Hartman

staging: drm/omap: mmap of tiled buffers with stride >4kb

Deal with the case of buffers with virtual stride larger than one
page in fault_2d().
Signed-off-by: default avatarRob Clark <rob@ti.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 5c137797
...@@ -153,11 +153,24 @@ static void evict_entry(struct drm_gem_object *obj, ...@@ -153,11 +153,24 @@ static void evict_entry(struct drm_gem_object *obj,
enum tiler_fmt fmt, struct usergart_entry *entry) enum tiler_fmt fmt, struct usergart_entry *entry)
{ {
if (obj->dev->dev_mapping) { if (obj->dev->dev_mapping) {
size_t size = PAGE_SIZE * usergart[fmt].height; struct omap_gem_object *omap_obj = to_omap_bo(obj);
int n = usergart[fmt].height;
size_t size = PAGE_SIZE * n;
loff_t off = mmap_offset(obj) + loff_t off = mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT); (entry->obj_pgoff << PAGE_SHIFT);
const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
if (m > 1) {
int i;
/* if stride > than PAGE_SIZE then sparse mapping: */
for (i = n; i > 0; i--) {
unmap_mapping_range(obj->dev->dev_mapping,
off, PAGE_SIZE, 1);
off += PAGE_SIZE * m;
}
} else {
unmap_mapping_range(obj->dev->dev_mapping, off, size, 1); unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
} }
}
entry->obj = NULL; entry->obj = NULL;
} }
...@@ -342,26 +355,39 @@ static int fault_2d(struct drm_gem_object *obj, ...@@ -342,26 +355,39 @@ static int fault_2d(struct drm_gem_object *obj,
void __user *vaddr; void __user *vaddr;
int i, ret, slots; int i, ret, slots;
if (!usergart) /*
return -EFAULT; * Note the height of the slot is also equal to the number of pages
* that need to be mapped in to fill 4kb wide CPU page. If the slot
/* TODO: this fxn might need a bit tweaking to deal w/ tiled buffers * height is 64, then 64 pages fill a 4kb wide by 64 row region.
* that are wider than 4kb */
const int n = usergart[fmt].height;
const int n_shift = usergart[fmt].height_shift;
/*
* If buffer width in bytes > PAGE_SIZE then the virtual stride is
* rounded up to next multiple of PAGE_SIZE.. this need to be taken
* into account in some of the math, so figure out virtual stride
* in pages
*/ */
const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
/* We don't use vmf->pgoff since that has the fake offset: */ /* We don't use vmf->pgoff since that has the fake offset: */
pgoff = ((unsigned long)vmf->virtual_address - pgoff = ((unsigned long)vmf->virtual_address -
vma->vm_start) >> PAGE_SHIFT; vma->vm_start) >> PAGE_SHIFT;
/* actual address we start mapping at is rounded down to previous slot /*
* Actual address we start mapping at is rounded down to previous slot
* boundary in the y direction: * boundary in the y direction:
*/ */
base_pgoff = round_down(pgoff, usergart[fmt].height); base_pgoff = round_down(pgoff, m << n_shift);
vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
entry = &usergart[fmt].entry[usergart[fmt].last];
/* figure out buffer width in slots */
slots = omap_obj->width >> usergart[fmt].slot_shift; slots = omap_obj->width >> usergart[fmt].slot_shift;
vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
entry = &usergart[fmt].entry[usergart[fmt].last];
/* evict previous buffer using this usergart entry, if any: */ /* evict previous buffer using this usergart entry, if any: */
if (entry->obj) if (entry->obj)
evict_entry(entry->obj, fmt, entry); evict_entry(entry->obj, fmt, entry);
...@@ -369,23 +395,30 @@ static int fault_2d(struct drm_gem_object *obj, ...@@ -369,23 +395,30 @@ static int fault_2d(struct drm_gem_object *obj,
entry->obj = obj; entry->obj = obj;
entry->obj_pgoff = base_pgoff; entry->obj_pgoff = base_pgoff;
/* now convert base_pgoff to phys offset from virt offset: /* now convert base_pgoff to phys offset from virt offset: */
*/ base_pgoff = (base_pgoff >> n_shift) * slots;
base_pgoff = (base_pgoff >> usergart[fmt].height_shift) * slots;
/* for wider-than 4k.. figure out which part of the slot-row we want: */
/* map in pages. Note the height of the slot is also equal to the if (m > 1) {
* number of pages that need to be mapped in to fill 4kb wide CPU page. int off = pgoff % m;
* If the height is 64, then 64 pages fill a 4kb wide by 64 row region. entry->obj_pgoff += off;
* Beyond the valid pixel part of the buffer, we set pages[i] to NULL to base_pgoff /= m;
* get a dummy page mapped in.. if someone reads/writes it they will get slots = min(slots - (off << n_shift), n);
* random/undefined content, but at least it won't be corrupting base_pgoff += off << n_shift;
* whatever other random page used to be mapped in, or other undefined vaddr += off << PAGE_SHIFT;
* behavior. }
/*
* Map in pages. Beyond the valid pixel part of the buffer, we set
* pages[i] to NULL to get a dummy page mapped in.. if someone
* reads/writes it they will get random/undefined content, but at
* least it won't be corrupting whatever other random page used to
* be mapped in, or other undefined behavior.
*/ */
memcpy(pages, &omap_obj->pages[base_pgoff], memcpy(pages, &omap_obj->pages[base_pgoff],
sizeof(struct page *) * slots); sizeof(struct page *) * slots);
memset(pages + slots, 0, memset(pages + slots, 0,
sizeof(struct page *) * (usergart[fmt].height - slots)); sizeof(struct page *) * (n - slots));
ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
if (ret) { if (ret) {
...@@ -393,16 +426,15 @@ static int fault_2d(struct drm_gem_object *obj, ...@@ -393,16 +426,15 @@ static int fault_2d(struct drm_gem_object *obj,
return ret; return ret;
} }
i = usergart[fmt].height;
pfn = entry->paddr >> PAGE_SHIFT; pfn = entry->paddr >> PAGE_SHIFT;
VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
pfn, pfn << PAGE_SHIFT); pfn, pfn << PAGE_SHIFT);
while (i--) { for (i = n; i > 0; i--) {
vm_insert_mixed(vma, (unsigned long)vaddr, pfn); vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
pfn += usergart[fmt].stride_pfn; pfn += usergart[fmt].stride_pfn;
vaddr += PAGE_SIZE; vaddr += PAGE_SIZE * m;
} }
/* simple round-robin: */ /* simple round-robin: */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment