Commit 5fd5d2b7 authored by Daniel Vetter's avatar Daniel Vetter

Merge tag 'drm-misc-fixes-2019-08-02' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

drm-misc-fixes for v5.3-rc3:
- Fix some build errors in drm/bridge.
- Do not build i810 on CONFIG_PREEMPTION.
- Fix cache sync on arm in vgem.
- Allow mapping fb in drm_client only when required, and use it to fix bochs fbdev.
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/af0dc371-16e0-cee8-0d71-4824d44aa973@linux.intel.com
parents ab35c8a5 58540594
...@@ -394,7 +394,7 @@ config DRM_R128 ...@@ -394,7 +394,7 @@ config DRM_R128
config DRM_I810 config DRM_I810
tristate "Intel I810" tristate "Intel I810"
# !PREEMPT because of missing ioctl locking # !PREEMPT because of missing ioctl locking
depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN) depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
help help
Choose this option if you have an Intel I810 graphics card. If M is Choose this option if you have an Intel I810 graphics card. If M is
selected, the module will be called i810. AGP support is required selected, the module will be called i810. AGP support is required
......
...@@ -191,6 +191,7 @@ int bochs_kms_init(struct bochs_device *bochs) ...@@ -191,6 +191,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.fb_base = bochs->fb_base; bochs->dev->mode_config.fb_base = bochs->fb_base;
bochs->dev->mode_config.preferred_depth = 24; bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0; bochs->dev->mode_config.prefer_shadow = 0;
bochs->dev->mode_config.prefer_shadow_fbdev = 1;
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
bochs->dev->mode_config.funcs = &bochs_mode_funcs; bochs->dev->mode_config.funcs = &bochs_mode_funcs;
......
...@@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC ...@@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC
config DRM_LVDS_ENCODER config DRM_LVDS_ENCODER
tristate "Transparent parallel to LVDS encoder support" tristate "Transparent parallel to LVDS encoder support"
depends on OF depends on OF
select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE select DRM_PANEL_BRIDGE
help help
Support for transparent parallel to LVDS encoders that don't require Support for transparent parallel to LVDS encoders that don't require
...@@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024 ...@@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024
config DRM_TOSHIBA_TC358764 config DRM_TOSHIBA_TC358764
tristate "TC358764 DSI/LVDS bridge" tristate "TC358764 DSI/LVDS bridge"
depends on DRM && DRM_PANEL
depends on OF depends on OF
select DRM_MIPI_DSI select DRM_MIPI_DSI
select DRM_KMS_HELPER
select DRM_PANEL
help help
Toshiba TC358764 DSI/LVDS bridge driver. Toshiba TC358764 DSI/LVDS bridge driver.
......
...@@ -254,7 +254,6 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u ...@@ -254,7 +254,6 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
struct drm_device *dev = client->dev; struct drm_device *dev = client->dev;
struct drm_client_buffer *buffer; struct drm_client_buffer *buffer;
struct drm_gem_object *obj; struct drm_gem_object *obj;
void *vaddr;
int ret; int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
...@@ -281,6 +280,36 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u ...@@ -281,6 +280,36 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
buffer->gem = obj; buffer->gem = obj;
return buffer;
err_delete:
drm_client_buffer_delete(buffer);
return ERR_PTR(ret);
}
/**
* drm_client_buffer_vmap - Map DRM client buffer into address space
* @buffer: DRM client buffer
*
* This function maps a client buffer into kernel address space. If the
* buffer is already mapped, it returns the mapping's address.
*
* Client buffer mappings are not ref'counted. Each call to
* drm_client_buffer_vmap() should be followed by a call to
* drm_client_buffer_vunmap(); or the client buffer should be mapped
* throughout its lifetime.
*
* Returns:
* The mapped memory's address
*/
void *drm_client_buffer_vmap(struct drm_client_buffer *buffer)
{
void *vaddr;
if (buffer->vaddr)
return buffer->vaddr;
/* /*
* FIXME: The dependency on GEM here isn't required, we could * FIXME: The dependency on GEM here isn't required, we could
* convert the driver handle to a dma-buf instead and use the * convert the driver handle to a dma-buf instead and use the
...@@ -289,21 +318,30 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u ...@@ -289,21 +318,30 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
* fd_install step out of the driver backend hooks, to make that * fd_install step out of the driver backend hooks, to make that
* final step optional for internal users. * final step optional for internal users.
*/ */
vaddr = drm_gem_vmap(obj); vaddr = drm_gem_vmap(buffer->gem);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr))
ret = PTR_ERR(vaddr); return vaddr;
goto err_delete;
}
buffer->vaddr = vaddr; buffer->vaddr = vaddr;
return buffer; return vaddr;
}
err_delete: EXPORT_SYMBOL(drm_client_buffer_vmap);
drm_client_buffer_delete(buffer);
return ERR_PTR(ret); /**
* drm_client_buffer_vunmap - Unmap DRM client buffer
* @buffer: DRM client buffer
*
* This function removes a client buffer's memory mapping. Calling this
* function is only required by clients that manage their buffer mappings
* by themselves.
*/
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
drm_gem_vunmap(buffer->gem, buffer->vaddr);
buffer->vaddr = NULL;
} }
EXPORT_SYMBOL(drm_client_buffer_vunmap);
static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer) static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
{ {
......
...@@ -403,6 +403,7 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) ...@@ -403,6 +403,7 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
struct drm_clip_rect *clip = &helper->dirty_clip; struct drm_clip_rect *clip = &helper->dirty_clip;
struct drm_clip_rect clip_copy; struct drm_clip_rect clip_copy;
unsigned long flags; unsigned long flags;
void *vaddr;
spin_lock_irqsave(&helper->dirty_lock, flags); spin_lock_irqsave(&helper->dirty_lock, flags);
clip_copy = *clip; clip_copy = *clip;
...@@ -412,10 +413,20 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) ...@@ -412,10 +413,20 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
/* call dirty callback only when it has been really touched */ /* call dirty callback only when it has been really touched */
if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) { if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
/* Generic fbdev uses a shadow buffer */ /* Generic fbdev uses a shadow buffer */
if (helper->buffer) if (helper->buffer) {
vaddr = drm_client_buffer_vmap(helper->buffer);
if (IS_ERR(vaddr))
return;
drm_fb_helper_dirty_blit_real(helper, &clip_copy); drm_fb_helper_dirty_blit_real(helper, &clip_copy);
helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); }
if (helper->fb->funcs->dirty)
helper->fb->funcs->dirty(helper->fb, NULL, 0, 0,
&clip_copy, 1);
if (helper->buffer)
drm_client_buffer_vunmap(helper->buffer);
} }
} }
...@@ -604,6 +615,16 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper) ...@@ -604,6 +615,16 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
} }
EXPORT_SYMBOL(drm_fb_helper_unlink_fbi); EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_framebuffer *fb = fb_helper->fb;
return dev->mode_config.prefer_shadow_fbdev ||
dev->mode_config.prefer_shadow ||
fb->funcs->dirty;
}
static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
u32 width, u32 height) u32 width, u32 height)
{ {
...@@ -611,7 +632,7 @@ static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, ...@@ -611,7 +632,7 @@ static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
struct drm_clip_rect *clip = &helper->dirty_clip; struct drm_clip_rect *clip = &helper->dirty_clip;
unsigned long flags; unsigned long flags;
if (!helper->fb->funcs->dirty) if (!drm_fbdev_use_shadow_fb(helper))
return; return;
spin_lock_irqsave(&helper->dirty_lock, flags); spin_lock_irqsave(&helper->dirty_lock, flags);
...@@ -2178,6 +2199,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, ...@@ -2178,6 +2199,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct fb_info *fbi; struct fb_info *fbi;
u32 format; u32 format;
void *vaddr;
DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n", DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height, sizes->surface_width, sizes->surface_height,
...@@ -2200,16 +2222,10 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, ...@@ -2200,16 +2222,10 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->fbops = &drm_fbdev_fb_ops; fbi->fbops = &drm_fbdev_fb_ops;
fbi->screen_size = fb->height * fb->pitches[0]; fbi->screen_size = fb->height * fb->pitches[0];
fbi->fix.smem_len = fbi->screen_size; fbi->fix.smem_len = fbi->screen_size;
fbi->screen_buffer = buffer->vaddr;
/* Shamelessly leak the physical address to user-space */
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
fbi->fix.smem_start =
page_to_phys(virt_to_page(fbi->screen_buffer));
#endif
drm_fb_helper_fill_info(fbi, fb_helper, sizes); drm_fb_helper_fill_info(fbi, fb_helper, sizes);
if (fb->funcs->dirty) { if (drm_fbdev_use_shadow_fb(fb_helper)) {
struct fb_ops *fbops; struct fb_ops *fbops;
void *shadow; void *shadow;
...@@ -2231,6 +2247,19 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, ...@@ -2231,6 +2247,19 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->fbdefio = &drm_fbdev_defio; fbi->fbdefio = &drm_fbdev_defio;
fb_deferred_io_init(fbi); fb_deferred_io_init(fbi);
} else {
/* buffer is mapped for HW framebuffer */
vaddr = drm_client_buffer_vmap(fb_helper->buffer);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
fbi->screen_buffer = vaddr;
/* Shamelessly leak the physical address to user-space */
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
fbi->fix.smem_start =
page_to_phys(virt_to_page(fbi->screen_buffer));
#endif
} }
return 0; return 0;
......
...@@ -47,10 +47,16 @@ static struct vgem_device { ...@@ -47,10 +47,16 @@ static struct vgem_device {
struct platform_device *platform; struct platform_device *platform;
} *vgem_device; } *vgem_device;
static void sync_and_unpin(struct drm_vgem_gem_object *bo);
static struct page **pin_and_sync(struct drm_vgem_gem_object *bo);
static void vgem_gem_free_object(struct drm_gem_object *obj) static void vgem_gem_free_object(struct drm_gem_object *obj)
{ {
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
if (!obj->import_attach)
sync_and_unpin(vgem_obj);
kvfree(vgem_obj->pages); kvfree(vgem_obj->pages);
mutex_destroy(&vgem_obj->pages_lock); mutex_destroy(&vgem_obj->pages_lock);
...@@ -78,40 +84,15 @@ static vm_fault_t vgem_gem_fault(struct vm_fault *vmf) ...@@ -78,40 +84,15 @@ static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
mutex_lock(&obj->pages_lock); mutex_lock(&obj->pages_lock);
if (!obj->pages)
pin_and_sync(obj);
if (obj->pages) { if (obj->pages) {
get_page(obj->pages[page_offset]); get_page(obj->pages[page_offset]);
vmf->page = obj->pages[page_offset]; vmf->page = obj->pages[page_offset];
ret = 0; ret = 0;
} }
mutex_unlock(&obj->pages_lock); mutex_unlock(&obj->pages_lock);
if (ret) {
struct page *page;
page = shmem_read_mapping_page(
file_inode(obj->base.filp)->i_mapping,
page_offset);
if (!IS_ERR(page)) {
vmf->page = page;
ret = 0;
} else switch (PTR_ERR(page)) {
case -ENOSPC:
case -ENOMEM:
ret = VM_FAULT_OOM;
break;
case -EBUSY:
ret = VM_FAULT_RETRY;
break;
case -EFAULT:
case -EINVAL:
ret = VM_FAULT_SIGBUS;
break;
default:
WARN_ON(PTR_ERR(page));
ret = VM_FAULT_SIGBUS;
break;
}
}
return ret; return ret;
} }
...@@ -277,32 +258,93 @@ static const struct file_operations vgem_driver_fops = { ...@@ -277,32 +258,93 @@ static const struct file_operations vgem_driver_fops = {
.release = drm_release, .release = drm_release,
}; };
static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo) /* Called under pages_lock, except in free path (where it can't race): */
static void sync_and_unpin(struct drm_vgem_gem_object *bo)
{ {
mutex_lock(&bo->pages_lock); struct drm_device *dev = bo->base.dev;
if (bo->pages_pin_count++ == 0) {
struct page **pages; if (bo->table) {
dma_sync_sg_for_cpu(dev->dev, bo->table->sgl,
bo->table->nents, DMA_BIDIRECTIONAL);
sg_free_table(bo->table);
kfree(bo->table);
bo->table = NULL;
}
if (bo->pages) {
drm_gem_put_pages(&bo->base, bo->pages, true, true);
bo->pages = NULL;
}
}
static struct page **pin_and_sync(struct drm_vgem_gem_object *bo)
{
struct drm_device *dev = bo->base.dev;
int npages = bo->base.size >> PAGE_SHIFT;
struct page **pages;
struct sg_table *sgt;
WARN_ON(!mutex_is_locked(&bo->pages_lock));
pages = drm_gem_get_pages(&bo->base);
if (IS_ERR(pages)) {
bo->pages_pin_count--;
mutex_unlock(&bo->pages_lock);
return pages;
}
pages = drm_gem_get_pages(&bo->base); sgt = drm_prime_pages_to_sg(pages, npages);
if (IS_ERR(pages)) { if (IS_ERR(sgt)) {
bo->pages_pin_count--; dev_err(dev->dev,
mutex_unlock(&bo->pages_lock); "failed to allocate sgt: %ld\n",
return pages; PTR_ERR(bo->table));
} drm_gem_put_pages(&bo->base, pages, false, false);
mutex_unlock(&bo->pages_lock);
return ERR_CAST(bo->table);
}
/*
* Flush the object from the CPU cache so that importers
* can rely on coherent indirect access via the exported
* dma-address.
*/
dma_sync_sg_for_device(dev->dev, sgt->sgl,
sgt->nents, DMA_BIDIRECTIONAL);
bo->pages = pages;
bo->table = sgt;
return pages;
}
static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
{
struct page **pages;
bo->pages = pages; mutex_lock(&bo->pages_lock);
if (bo->pages_pin_count++ == 0 && !bo->pages) {
pages = pin_and_sync(bo);
} else {
WARN_ON(!bo->pages);
pages = bo->pages;
} }
mutex_unlock(&bo->pages_lock); mutex_unlock(&bo->pages_lock);
return bo->pages; return pages;
} }
static void vgem_unpin_pages(struct drm_vgem_gem_object *bo) static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
{ {
/*
* We shouldn't hit this for imported bo's.. in the import
* case we don't own the scatter-table
*/
WARN_ON(bo->base.import_attach);
mutex_lock(&bo->pages_lock); mutex_lock(&bo->pages_lock);
if (--bo->pages_pin_count == 0) { if (--bo->pages_pin_count == 0) {
drm_gem_put_pages(&bo->base, bo->pages, true, true); WARN_ON(!bo->table);
bo->pages = NULL; sync_and_unpin(bo);
} }
mutex_unlock(&bo->pages_lock); mutex_unlock(&bo->pages_lock);
} }
...@@ -310,18 +352,12 @@ static void vgem_unpin_pages(struct drm_vgem_gem_object *bo) ...@@ -310,18 +352,12 @@ static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
static int vgem_prime_pin(struct drm_gem_object *obj) static int vgem_prime_pin(struct drm_gem_object *obj)
{ {
struct drm_vgem_gem_object *bo = to_vgem_bo(obj); struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
long n_pages = obj->size >> PAGE_SHIFT;
struct page **pages; struct page **pages;
pages = vgem_pin_pages(bo); pages = vgem_pin_pages(bo);
if (IS_ERR(pages)) if (IS_ERR(pages))
return PTR_ERR(pages); return PTR_ERR(pages);
/* Flush the object from the CPU cache so that importers can rely
* on coherent indirect access via the exported dma-address.
*/
drm_clflush_pages(pages, n_pages);
return 0; return 0;
} }
......
...@@ -149,6 +149,8 @@ struct drm_client_buffer { ...@@ -149,6 +149,8 @@ struct drm_client_buffer {
struct drm_client_buffer * struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
void *drm_client_buffer_vmap(struct drm_client_buffer *buffer);
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer);
int drm_client_modeset_create(struct drm_client_dev *client); int drm_client_modeset_create(struct drm_client_dev *client);
void drm_client_modeset_free(struct drm_client_dev *client); void drm_client_modeset_free(struct drm_client_dev *client);
......
...@@ -852,6 +852,13 @@ struct drm_mode_config { ...@@ -852,6 +852,13 @@ struct drm_mode_config {
/* dumb ioctl parameters */ /* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow; uint32_t preferred_depth, prefer_shadow;
/**
* @prefer_shadow_fbdev:
*
* Hint to framebuffer emulation to prefer shadow-fb rendering.
*/
bool prefer_shadow_fbdev;
/** /**
* @quirk_addfb_prefer_xbgr_30bpp: * @quirk_addfb_prefer_xbgr_30bpp:
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment