Commit cdb0381d authored by Laurent Pinchart's avatar Laurent Pinchart Committed by Tomi Valkeinen

drm/omap: gem: Clean up GEM objects memory flags

The driver assumes that only objects backed by shmem need to be mapped
through DMM. While this is true with the current code, the assumption
won't hold with dma_buf import support.

Condition the mapping based on whether the buffer has been allocated
using the DMA mapping API instead and clean up the flags to avoid having
to check both flags and GEM object filp field to decide how to process
buffers. Flags are not the authoritative source of information regarding
where the buffer memory comes from, and are renamed to make that
clearer.
Signed-off-by: default avatarLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: default avatarTomi Valkeinen <tomi.valkeinen@ti.com>
parent 8d83bbd1
...@@ -31,9 +31,10 @@ ...@@ -31,9 +31,10 @@
*/ */
/* note: we use upper 8 bits of flags for driver-internal flags: */ /* note: we use upper 8 bits of flags for driver-internal flags: */
#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */ #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */ #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */ #define OMAP_BO_MEM_EXT 0x04000000 /* memory allocated externally */
#define OMAP_BO_EXT_SYNC 0x10000000 /* externally allocated sync object */
struct omap_gem_object { struct omap_gem_object {
struct drm_gem_object base; struct drm_gem_object base;
...@@ -49,16 +50,16 @@ struct omap_gem_object { ...@@ -49,16 +50,16 @@ struct omap_gem_object {
uint32_t roll; uint32_t roll;
/** /**
* If buffer is allocated physically contiguous, the OMAP_BO_DMA flag * If buffer is allocated physically contiguous, the OMAP_BO_MEM_DMA_API
* is set and the paddr is valid. Also if the buffer is remapped in * flag is set and the paddr is valid. Also if the buffer is remapped
* TILER and paddr_cnt > 0, then paddr is valid. But if you are using * in TILER and paddr_cnt > 0, then paddr is valid. But if you are using
* the physical address and OMAP_BO_DMA is not set, then you should * the physical address and OMAP_BO_MEM_DMA_API is not set, then you
* be going thru omap_gem_{get,put}_paddr() to ensure the mapping is * should be going thru omap_gem_{get,put}_paddr() to ensure the mapping
* not removed from under your feet. * is not removed from under your feet.
* *
* Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
* buffer is requested, but doesn't mean that it is. Use the * buffer is requested, but doesn't mean that it is. Use the
* OMAP_BO_DMA flag to determine if the buffer has a DMA capable * OMAP_BO_MEM_DMA_API flag to determine if the buffer has a DMA capable
* physical address. * physical address.
*/ */
dma_addr_t paddr; dma_addr_t paddr;
...@@ -166,18 +167,6 @@ static uint64_t mmap_offset(struct drm_gem_object *obj) ...@@ -166,18 +167,6 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
return drm_vma_node_offset_addr(&obj->vma_node); return drm_vma_node_offset_addr(&obj->vma_node);
} }
/* GEM objects can either be allocated from contiguous memory (in which
* case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
* contiguous buffers can be remapped in TILER/DMM if they need to be
* contiguous... but we don't do this all the time to reduce pressure
* on TILER/DMM space when we know at allocation time that the buffer
* will need to be scanned out.
*/
static inline bool is_shmem(struct drm_gem_object *obj)
{
return obj->filp != NULL;
}
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
* Eviction * Eviction
*/ */
...@@ -307,7 +296,7 @@ static int get_pages(struct drm_gem_object *obj, struct page ***pages) ...@@ -307,7 +296,7 @@ static int get_pages(struct drm_gem_object *obj, struct page ***pages)
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0; int ret = 0;
if (is_shmem(obj) && !omap_obj->pages) { if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
ret = omap_gem_attach_pages(obj); ret = omap_gem_attach_pages(obj);
if (ret) { if (ret) {
dev_err(obj->dev->dev, "could not attach pages\n"); dev_err(obj->dev->dev, "could not attach pages\n");
...@@ -411,7 +400,7 @@ static int fault_1d(struct drm_gem_object *obj, ...@@ -411,7 +400,7 @@ static int fault_1d(struct drm_gem_object *obj,
omap_gem_cpu_sync(obj, pgoff); omap_gem_cpu_sync(obj, pgoff);
pfn = page_to_pfn(omap_obj->pages[pgoff]); pfn = page_to_pfn(omap_obj->pages[pgoff]);
} else { } else {
BUG_ON(!(omap_obj->flags & OMAP_BO_DMA)); BUG_ON(!(omap_obj->flags & OMAP_BO_MEM_DMA_API));
pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
} }
...@@ -743,7 +732,8 @@ int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) ...@@ -743,7 +732,8 @@ int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
static inline bool is_cached_coherent(struct drm_gem_object *obj) static inline bool is_cached_coherent(struct drm_gem_object *obj)
{ {
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
return is_shmem(obj) &&
return (omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
} }
...@@ -813,7 +803,8 @@ int omap_gem_get_paddr(struct drm_gem_object *obj, ...@@ -813,7 +803,8 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
mutex_lock(&obj->dev->struct_mutex); mutex_lock(&obj->dev->struct_mutex);
if (remap && is_shmem(obj) && priv->has_dmm) { if (!(omap_obj->flags & OMAP_BO_MEM_DMA_API) &&
remap && priv->has_dmm) {
if (omap_obj->paddr_cnt == 0) { if (omap_obj->paddr_cnt == 0) {
struct page **pages; struct page **pages;
uint32_t npages = obj->size >> PAGE_SHIFT; uint32_t npages = obj->size >> PAGE_SHIFT;
...@@ -860,7 +851,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj, ...@@ -860,7 +851,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
omap_obj->paddr_cnt++; omap_obj->paddr_cnt++;
*paddr = omap_obj->paddr; *paddr = omap_obj->paddr;
} else if (omap_obj->flags & OMAP_BO_DMA) { } else if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
*paddr = omap_obj->paddr; *paddr = omap_obj->paddr;
} else { } else {
ret = -EINVAL; ret = -EINVAL;
...@@ -1351,11 +1342,11 @@ void omap_gem_free_object(struct drm_gem_object *obj) ...@@ -1351,11 +1342,11 @@ void omap_gem_free_object(struct drm_gem_object *obj)
WARN_ON(omap_obj->paddr_cnt > 0); WARN_ON(omap_obj->paddr_cnt > 0);
/* don't free externally allocated backing memory */ /* don't free externally allocated backing memory */
if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) { if (!(omap_obj->flags & OMAP_BO_MEM_EXT)) {
if (omap_obj->pages) if (omap_obj->pages)
omap_gem_detach_pages(obj); omap_gem_detach_pages(obj);
if (!is_shmem(obj)) { if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
dma_free_writecombine(dev->dev, obj->size, dma_free_writecombine(dev->dev, obj->size,
omap_obj->vaddr, omap_obj->paddr); omap_obj->vaddr, omap_obj->paddr);
} else if (omap_obj->vaddr) { } else if (omap_obj->vaddr) {
...@@ -1429,7 +1420,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, ...@@ -1429,7 +1420,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
return NULL; return NULL;
} }
flags |= OMAP_BO_DMA; flags |= OMAP_BO_MEM_DMA_API;
} }
spin_lock(&priv->list_lock); spin_lock(&priv->list_lock);
...@@ -1443,7 +1434,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, ...@@ -1443,7 +1434,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj->height = gsize.tiled.height; omap_obj->height = gsize.tiled.height;
} }
if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) { if (flags & (OMAP_BO_MEM_DMA_API | OMAP_BO_MEM_EXT)) {
drm_gem_private_object_init(dev, obj, size); drm_gem_private_object_init(dev, obj, size);
} else { } else {
ret = drm_gem_object_init(dev, obj, size); ret = drm_gem_object_init(dev, obj, size);
...@@ -1452,6 +1443,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, ...@@ -1452,6 +1443,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
mapping = file_inode(obj->filp)->i_mapping; mapping = file_inode(obj->filp)->i_mapping;
mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
omap_obj->flags |= OMAP_BO_MEM_SHMEM;
} }
return obj; return obj;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment