Commit f71c5d9d authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next' of git://people.freedesktop.org/~dvdhrm/linux into drm-next

* 'drm-next' of git://people.freedesktop.org/~dvdhrm/linux:
  drm/omap: remove null test before kfree
  drm/bochs: replace ALIGN(PAGE_SIZE) by PAGE_ALIGN
  drm/ttm: recognize ARM arch in ioprot handler
  drm: enable render-nodes by default
  drm/ttm: remove declaration of ttm_tt_cache_flush
  drm/gem: remove misleading gfp parameter to get_pages()
  drm/omap: use __GFP_DMA32 for shmem-backed gem
  drm/i915: use shmem helpers if possible

Conflicts:
	drivers/gpu/drm/drm_stub.c
parents afa95e74 d2c87e2d
...@@ -387,7 +387,7 @@ int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel, ...@@ -387,7 +387,7 @@ int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
*obj = NULL; *obj = NULL;
size = ALIGN(size, PAGE_SIZE); size = PAGE_ALIGN(size);
if (size == 0) if (size == 0)
return -EINVAL; return -EINVAL;
......
...@@ -441,18 +441,31 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset); ...@@ -441,18 +441,31 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset);
* drm_gem_get_pages - helper to allocate backing pages for a GEM object * drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem * from shmem
* @obj: obj in question * @obj: obj in question
* @gfpmask: gfp mask of requested pages *
* This reads the page-array of the shmem-backing storage of the given gem
* object. An array of pages is returned. If a page is not allocated or
* swapped-out, this will allocate/swap-in the required pages. Note that the
* whole object is covered by the page-array and pinned in memory.
*
* Use drm_gem_put_pages() to release the array and unpin all pages.
*
* This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
* If you require other GFP-masks, you have to do those allocations yourself.
*
* Note that you are not allowed to change gfp-zones during runtime. That is,
* shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
* set during initialization. If you have special zone constraints, set them
* after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
* to keep pages in the required zone during swap-in.
*/ */
struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) struct page **drm_gem_get_pages(struct drm_gem_object *obj)
{ {
struct inode *inode;
struct address_space *mapping; struct address_space *mapping;
struct page *p, **pages; struct page *p, **pages;
int i, npages; int i, npages;
/* This is the shared memory object that backs the GEM resource */ /* This is the shared memory object that backs the GEM resource */
inode = file_inode(obj->filp); mapping = file_inode(obj->filp)->i_mapping;
mapping = inode->i_mapping;
/* We already BUG_ON() for non-page-aligned sizes in /* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless * drm_gem_object_init(), so we should never hit this unless
...@@ -466,10 +479,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) ...@@ -466,10 +479,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
if (pages == NULL) if (pages == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
gfpmask |= mapping_gfp_mask(mapping);
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); p = shmem_read_mapping_page(mapping, i);
if (IS_ERR(p)) if (IS_ERR(p))
goto fail; goto fail;
pages[i] = p; pages[i] = p;
...@@ -479,7 +490,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) ...@@ -479,7 +490,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
* __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
* so shmem can relocate pages during swapin if required. * so shmem can relocate pages during swapin if required.
*/ */
BUG_ON((gfpmask & __GFP_DMA32) && BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
(page_to_pfn(p) >= 0x00100000UL)); (page_to_pfn(p) >= 0x00100000UL));
} }
......
...@@ -37,8 +37,6 @@ ...@@ -37,8 +37,6 @@
unsigned int drm_debug = 0; /* 1 to enable debug output */ unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug); EXPORT_SYMBOL(drm_debug);
unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
/* 1 to allow user space to request universal planes (experimental) */ /* 1 to allow user space to request universal planes (experimental) */
unsigned int drm_universal_planes = 0; unsigned int drm_universal_planes = 0;
...@@ -56,13 +54,11 @@ MODULE_AUTHOR(CORE_AUTHOR); ...@@ -56,13 +54,11 @@ MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC); MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights"); MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output"); MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600); module_param_named(debug, drm_debug, int, 0600);
module_param_named(rnodes, drm_rnodes, int, 0600);
module_param_named(universal_planes, drm_universal_planes, int, 0600); module_param_named(universal_planes, drm_universal_planes, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
...@@ -584,7 +580,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, ...@@ -584,7 +580,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
goto err_minors; goto err_minors;
} }
if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { if (drm_core_check_feature(dev, DRIVER_RENDER)) {
ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
if (ret) if (ret)
goto err_minors; goto err_minors;
......
...@@ -206,7 +206,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt) ...@@ -206,7 +206,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
WARN_ON(gt->pages); WARN_ON(gt->pages);
pages = drm_gem_get_pages(&gt->gem, 0); pages = drm_gem_get_pages(&gt->gem);
if (IS_ERR(pages)) if (IS_ERR(pages))
return PTR_ERR(pages); return PTR_ERR(pages);
......
...@@ -2059,16 +2059,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2059,16 +2059,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* our own buffer, now let the real VM do its job and * our own buffer, now let the real VM do its job and
* go down in flames if truly OOM. * go down in flames if truly OOM.
*/ */
gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
gfp |= __GFP_IO | __GFP_WAIT;
i915_gem_shrink_all(dev_priv); i915_gem_shrink_all(dev_priv);
page = shmem_read_mapping_page_gfp(mapping, i, gfp); page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) if (IS_ERR(page))
goto err_pages; goto err_pages;
gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
gfp &= ~(__GFP_IO | __GFP_WAIT);
} }
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) { if (swiotlb_nr_tbl()) {
......
...@@ -73,7 +73,7 @@ static struct page **get_pages(struct drm_gem_object *obj) ...@@ -73,7 +73,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
int npages = obj->size >> PAGE_SHIFT; int npages = obj->size >> PAGE_SHIFT;
if (iommu_present(&platform_bus_type)) if (iommu_present(&platform_bus_type))
p = drm_gem_get_pages(obj, 0); p = drm_gem_get_pages(obj);
else else
p = get_pages_vram(obj, npages); p = get_pages_vram(obj, npages);
......
...@@ -233,11 +233,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) ...@@ -233,11 +233,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
WARN_ON(omap_obj->pages); WARN_ON(omap_obj->pages);
/* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the pages = drm_gem_get_pages(obj);
* mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
* we actually want CMA memory for it all anyways..
*/
pages = drm_gem_get_pages(obj, GFP_KERNEL);
if (IS_ERR(pages)) { if (IS_ERR(pages)) {
dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
return PTR_ERR(pages); return PTR_ERR(pages);
...@@ -1183,8 +1179,6 @@ int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op) ...@@ -1183,8 +1179,6 @@ int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
} }
} }
spin_unlock(&sync_lock); spin_unlock(&sync_lock);
if (waiter)
kfree(waiter); kfree(waiter);
} }
return ret; return ret;
...@@ -1347,6 +1341,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, ...@@ -1347,6 +1341,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
struct omap_drm_private *priv = dev->dev_private; struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj; struct omap_gem_object *omap_obj;
struct drm_gem_object *obj = NULL; struct drm_gem_object *obj = NULL;
struct address_space *mapping;
size_t size; size_t size;
int ret; int ret;
...@@ -1404,15 +1399,17 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, ...@@ -1404,15 +1399,17 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj->height = gsize.tiled.height; omap_obj->height = gsize.tiled.height;
} }
ret = 0; if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
drm_gem_private_object_init(dev, obj, size); drm_gem_private_object_init(dev, obj, size);
else } else {
ret = drm_gem_object_init(dev, obj, size); ret = drm_gem_object_init(dev, obj, size);
if (ret) if (ret)
goto fail; goto fail;
mapping = file_inode(obj->filp)->i_mapping;
mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
}
return obj; return obj;
fail: fail:
......
...@@ -500,7 +500,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) ...@@ -500,7 +500,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
pgprot_val(tmp) |= _PAGE_GUARDED; pgprot_val(tmp) |= _PAGE_GUARDED;
} }
#endif #endif
#if defined(__ia64__) #if defined(__ia64__) || defined(__arm__)
if (caching_flags & TTM_PL_FLAG_WC) if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp); tmp = pgprot_writecombine(tmp);
else else
......
...@@ -107,14 +107,14 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -107,14 +107,14 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
} }
static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) static int udl_gem_get_pages(struct udl_gem_object *obj)
{ {
struct page **pages; struct page **pages;
if (obj->pages) if (obj->pages)
return 0; return 0;
pages = drm_gem_get_pages(&obj->base, gfpmask); pages = drm_gem_get_pages(&obj->base);
if (IS_ERR(pages)) if (IS_ERR(pages))
return PTR_ERR(pages); return PTR_ERR(pages);
...@@ -147,7 +147,7 @@ int udl_gem_vmap(struct udl_gem_object *obj) ...@@ -147,7 +147,7 @@ int udl_gem_vmap(struct udl_gem_object *obj)
return 0; return 0;
} }
ret = udl_gem_get_pages(obj, GFP_KERNEL); ret = udl_gem_get_pages(obj);
if (ret) if (ret)
return ret; return ret;
...@@ -205,7 +205,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, ...@@ -205,7 +205,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
} }
gobj = to_udl_bo(obj); gobj = to_udl_bo(obj);
ret = udl_gem_get_pages(gobj, GFP_KERNEL); ret = udl_gem_get_pages(gobj);
if (ret) if (ret)
goto out; goto out;
ret = drm_gem_create_mmap_offset(obj); ret = drm_gem_create_mmap_offset(obj);
......
...@@ -1395,7 +1395,6 @@ extern void drm_master_put(struct drm_master **master); ...@@ -1395,7 +1395,6 @@ extern void drm_master_put(struct drm_master **master);
extern void drm_put_dev(struct drm_device *dev); extern void drm_put_dev(struct drm_device *dev);
extern void drm_unplug_dev(struct drm_device *dev); extern void drm_unplug_dev(struct drm_device *dev);
extern unsigned int drm_debug; extern unsigned int drm_debug;
extern unsigned int drm_rnodes;
extern unsigned int drm_universal_planes; extern unsigned int drm_universal_planes;
extern unsigned int drm_vblank_offdelay; extern unsigned int drm_vblank_offdelay;
...@@ -1585,7 +1584,7 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj); ...@@ -1585,7 +1584,7 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); struct page **drm_gem_get_pages(struct drm_gem_object *obj);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed); bool dirty, bool accessed);
......
...@@ -652,18 +652,6 @@ extern void ttm_tt_unbind(struct ttm_tt *ttm); ...@@ -652,18 +652,6 @@ extern void ttm_tt_unbind(struct ttm_tt *ttm);
*/ */
extern int ttm_tt_swapin(struct ttm_tt *ttm); extern int ttm_tt_swapin(struct ttm_tt *ttm);
/**
* ttm_tt_cache_flush:
*
* @pages: An array of pointers to struct page:s to flush.
* @num_pages: Number of pages to flush.
*
* Flush the data of the indicated pages from the cpu caches.
* This is used when changing caching attributes of the pages from
* cache-coherent.
*/
extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
/** /**
* ttm_tt_set_placement_caching: * ttm_tt_set_placement_caching:
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment