Commit 00f09afd authored by Dave Airlie's avatar Dave Airlie

Merge branch 'exynos-drm-next' of...

Merge branch 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos into drm-next

This patch set adds iommu support, userptr feature to g2d, minor fixups
and code cleanups.

And the iommu feature has dependency of the below patches related to
dma mapping framework.

This patch is used to allocate fully physically contiguous memory region.
- add sending AVI and AVI info frames.
  . this adds some codes for composing AVI and AUI info frames
    and send them every VSYNC for HDMI Certification.
- bug fix to previous pull request.
- add some code cleanup

* 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos: (32 commits)
  drm/exynos: sending AVI and AUI info frames
  drm/exynos: Use devm_clk_get in exynos_drm_fimd.c
  drm/exynos: Use devm_* APIs in exynos_hdmi.c
  drm/exynos: Use devm_clk_get in exynos_mixer.c
  drm/exynos: Fix potential NULL pointer dereference
  drm/exynos: Use devm_clk_get in exynos_drm_g2d.c
  drm/exynos: use sgt instead of pages for framebuffer address
  drm: exynos: fix for loosing display mode header during mode adjustment
  drm/exynos: fix memory leak to EDID block
  drm/exynos: remove 'pages' and 'page_size' elements in exynos gem buffer
  drm/exynos: add exynos drm specific fb_mmap function
  drm/exynos: make sure that overlay data are updated
  drm/exynos: add vm_ops to specific gem mmaper
  drm/exynos: add userptr feature for g2d module
  drm/exynos: remove unnecessary sg_alloc_table call
  drm: exynos: fix for mapping of dma buffers
  drm/exynos: remove EXYNOS_BO_NONCONTIG type checking.
  drm/exynos: add iommu support for g2d
  drm/exynos: add iommu support for hdmi driver
  drm/exynos: add iommu support to fimd driver
  ...
parents 7136470d a144c2e9
......@@ -91,3 +91,12 @@ transferred to 'device' domain. This attribute can be also used for
dma_unmap_{single,page,sg} functions family to force buffer to stay in
device domain after releasing a mapping for it. Use this attribute with
care!
DMA_ATTR_FORCE_CONTIGUOUS
-------------------------
By default DMA-mapping subsystem is allowed to assemble the buffer
allocated by dma_alloc_attrs() function from individual pages if it can
be mapped as contiguous chunk into device dma address space. By
specifing this attribute the allocated buffer is forced to be contiguous
also in physical memory.
......@@ -1036,7 +1036,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
spin_unlock_irqrestore(&mapping->lock, flags);
}
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
gfp_t gfp, struct dma_attrs *attrs)
{
struct page **pages;
int count = size >> PAGE_SHIFT;
......@@ -1050,6 +1051,23 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
if (!pages)
return NULL;
if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
{
unsigned long order = get_order(size);
struct page *page;
page = dma_alloc_from_contiguous(dev, count, order);
if (!page)
goto error;
__dma_clear_buffer(page, size);
for (i = 0; i < count; i++)
pages[i] = page + i;
return pages;
}
while (count) {
int j, order = __fls(count);
......@@ -1083,14 +1101,21 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
return NULL;
}
static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
static int __iommu_free_buffer(struct device *dev, struct page **pages,
size_t size, struct dma_attrs *attrs)
{
int count = size >> PAGE_SHIFT;
int array_size = count * sizeof(struct page *);
int i;
if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
dma_release_from_contiguous(dev, pages[0], count);
} else {
for (i = 0; i < count; i++)
if (pages[i])
__free_pages(pages[i], 0);
}
if (array_size <= PAGE_SIZE)
kfree(pages);
else
......@@ -1252,7 +1277,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (gfp & GFP_ATOMIC)
return __iommu_alloc_atomic(dev, size, handle);
pages = __iommu_alloc_buffer(dev, size, gfp);
pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
if (!pages)
return NULL;
......@@ -1273,7 +1298,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
err_mapping:
__iommu_remove_mapping(dev, *handle, size);
err_buffer:
__iommu_free_buffer(dev, pages, size);
__iommu_free_buffer(dev, pages, size, attrs);
return NULL;
}
......@@ -1329,7 +1354,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
__iommu_remove_mapping(dev, handle, size);
__iommu_free_buffer(dev, pages, size);
__iommu_free_buffer(dev, pages, size, attrs);
}
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
......
......@@ -1021,6 +1021,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
spin_lock(&dev->event_lock);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
continue;
......@@ -1031,6 +1033,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
drm_vblank_put(dev, e->pipe);
send_vblank_event(dev, e, seq, &now);
}
spin_unlock(&dev->event_lock);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
......
......@@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
config DRM_EXYNOS_IOMMU
bool "EXYNOS DRM IOMMU Support"
depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
help
Choose this option if you want to use IOMMU feature for DRM.
config DRM_EXYNOS_DMABUF
bool "EXYNOS DRM DMABUF"
depends on DRM_EXYNOS
......
......@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
......
......@@ -33,73 +33,42 @@
static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
dma_addr_t start_addr;
unsigned int npages, i = 0;
struct scatterlist *sgl;
int ret = 0;
enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (IS_NONCONTIG_BUFFER(flags)) {
DRM_DEBUG_KMS("not support allocation type.\n");
return -EINVAL;
}
if (buf->dma_addr) {
DRM_DEBUG_KMS("already allocated.\n");
return 0;
}
if (buf->size >= SZ_1M) {
npages = buf->size >> SECTION_SHIFT;
buf->page_size = SECTION_SIZE;
} else if (buf->size >= SZ_64K) {
npages = buf->size >> 16;
buf->page_size = SZ_64K;
} else {
npages = buf->size >> PAGE_SHIFT;
buf->page_size = PAGE_SIZE;
}
init_dma_attrs(&buf->dma_attrs);
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!buf->sgt) {
DRM_ERROR("failed to allocate sg table.\n");
return -ENOMEM;
}
if (flags & EXYNOS_BO_NONCONTIG)
attr = DMA_ATTR_WRITE_COMBINE;
ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
if (ret < 0) {
DRM_ERROR("failed to initialize sg table.\n");
kfree(buf->sgt);
buf->sgt = NULL;
return -ENOMEM;
}
dma_set_attr(attr, &buf->dma_attrs);
buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
&buf->dma_addr, GFP_KERNEL);
buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
&buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
if (!buf->kvaddr) {
DRM_ERROR("failed to allocate buffer.\n");
ret = -ENOMEM;
goto err1;
return -ENOMEM;
}
buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
if (!buf->pages) {
DRM_ERROR("failed to allocate pages.\n");
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!buf->sgt) {
DRM_ERROR("failed to allocate sg table.\n");
ret = -ENOMEM;
goto err2;
goto err_free_attrs;
}
sgl = buf->sgt->sgl;
start_addr = buf->dma_addr;
while (i < npages) {
buf->pages[i] = phys_to_page(start_addr);
sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
sg_dma_address(sgl) = start_addr;
start_addr += buf->page_size;
sgl = sg_next(sgl);
i++;
ret = dma_get_sgtable(dev->dev, buf->sgt, buf->kvaddr, buf->dma_addr,
buf->size);
if (ret < 0) {
DRM_ERROR("failed to get sgtable.\n");
goto err_free_sgt;
}
DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
......@@ -108,14 +77,14 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
buf->size);
return ret;
err2:
dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
(dma_addr_t)buf->dma_addr);
buf->dma_addr = (dma_addr_t)NULL;
err1:
sg_free_table(buf->sgt);
err_free_sgt:
kfree(buf->sgt);
buf->sgt = NULL;
err_free_attrs:
dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
return ret;
}
......@@ -125,16 +94,6 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
/*
* release only physically continuous memory and
* non-continuous memory would be released by exynos
* gem framework.
*/
if (IS_NONCONTIG_BUFFER(flags)) {
DRM_DEBUG_KMS("not support allocation type.\n");
return;
}
if (!buf->dma_addr) {
DRM_DEBUG_KMS("dma_addr is invalid.\n");
return;
......@@ -150,11 +109,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
kfree(buf->sgt);
buf->sgt = NULL;
kfree(buf->pages);
buf->pages = NULL;
dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
(dma_addr_t)buf->dma_addr);
dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
}
......
......@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer);
/* allocate physical memory region and setup sgt and pages. */
/* allocate physical memory region and setup sgt. */
int exynos_drm_alloc_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buf,
unsigned int flags);
/* release physical memory region, sgt and pages. */
/* release physical memory region, and sgt. */
void exynos_drm_free_buf(struct drm_device *dev,
unsigned int flags,
struct exynos_drm_gem_buf *buffer);
......
......@@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
goto out;
}
spin_lock_irq(&dev->event_lock);
list_add_tail(&event->base.link,
&dev_priv->pageflip_event_list);
spin_unlock_irq(&dev->event_lock);
crtc->fb = fb;
ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
NULL);
if (ret) {
crtc->fb = old_fb;
spin_lock_irq(&dev->event_lock);
drm_vblank_put(dev, exynos_crtc->pipe);
list_del(&event->base.link);
spin_unlock_irq(&dev->event_lock);
goto out;
}
......
......@@ -30,26 +30,22 @@
#include <linux/dma-buf.h>
static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
unsigned int page_size)
static struct sg_table *exynos_get_sgt(struct drm_device *drm_dev,
struct exynos_drm_gem_buf *buf)
{
struct sg_table *sgt = NULL;
struct scatterlist *sgl;
int i, ret;
int ret;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
goto out;
ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
if (ret)
ret = dma_get_sgtable(drm_dev->dev, sgt, buf->kvaddr,
buf->dma_addr, buf->size);
if (ret < 0) {
DRM_ERROR("failed to get sgtable.\n");
goto err_free_sgt;
if (page_size < PAGE_SIZE)
page_size = PAGE_SIZE;
for_each_sg(sgt->sgl, sgl, nr_pages, i)
sg_set_page(sgl, pages[i], page_size, 0);
}
return sgt;
......@@ -68,32 +64,30 @@ static struct sg_table *
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
struct sg_table *sgt = NULL;
unsigned int npages;
int nents;
DRM_DEBUG_PRIME("%s\n", __FILE__);
mutex_lock(&dev->struct_mutex);
buf = gem_obj->buffer;
/* there should always be pages allocated. */
if (!buf->pages) {
DRM_ERROR("pages is null.\n");
goto err_unlock;
if (!buf) {
DRM_ERROR("buffer is null.\n");
return sgt;
}
npages = buf->size / buf->page_size;
mutex_lock(&dev->struct_mutex);
sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
if (!sgt) {
DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
sgt = exynos_get_sgt(dev, buf);
if (!sgt)
goto err_unlock;
}
nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
if (!nents) {
DRM_ERROR("failed to map sgl with iommu.\n");
sgt = NULL;
goto err_unlock;
}
DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
npages, buf->size, buf->page_size);
DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
err_unlock:
mutex_unlock(&dev->struct_mutex);
......@@ -105,6 +99,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
sg_free_table(sgt);
kfree(sgt);
sgt = NULL;
......@@ -196,7 +191,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer;
struct page *page;
int ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
......@@ -233,38 +227,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
goto err_unmap_attach;
}
buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
if (!buffer->pages) {
DRM_ERROR("failed to allocate pages.\n");
ret = -ENOMEM;
goto err_free_buffer;
}
exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
goto err_free_pages;
goto err_free_buffer;
}
sgl = sgt->sgl;
if (sgt->nents == 1) {
buffer->dma_addr = sg_dma_address(sgt->sgl);
buffer->size = sg_dma_len(sgt->sgl);
buffer->size = dma_buf->size;
buffer->dma_addr = sg_dma_address(sgl);
if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else {
unsigned int i = 0;
buffer->dma_addr = sg_dma_address(sgl);
while (i < sgt->nents) {
buffer->pages[i] = sg_page(sgl);
buffer->size += sg_dma_len(sgl);
sgl = sg_next(sgl);
i++;
}
/*
* this case could be CONTIG or NONCONTIG type but for now
* sets NONCONTIG.
* TODO. we have to find a way that exporter can notify
* the type of its own buffer to importer.
*/
exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
}
......@@ -277,9 +260,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
return &exynos_gem_obj->base;
err_free_pages:
kfree(buffer->pages);
buffer->pages = NULL;
err_free_buffer:
kfree(buffer);
buffer = NULL;
......
......@@ -40,6 +40,7 @@
#include "exynos_drm_vidi.h"
#include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h"
#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
......@@ -66,6 +67,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&private->pageflip_event_list);
dev->dev_private = (void *)private;
/*
* create mapping to manage iommu table and set a pointer to iommu
* mapping structure to iommu_mapping of private data.
* also this iommu_mapping can be used to check if iommu is supported
* or not.
*/
ret = drm_create_iommu_mapping(dev);
if (ret < 0) {
DRM_ERROR("failed to create iommu mapping.\n");
goto err_crtc;
}
drm_mode_config_init(dev);
/* init kms poll for handling hpd */
......@@ -80,7 +93,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
for (nr = 0; nr < MAX_CRTC; nr++) {
ret = exynos_drm_crtc_create(dev, nr);
if (ret)
goto err_crtc;
goto err_release_iommu_mapping;
}
for (nr = 0; nr < MAX_PLANE; nr++) {
......@@ -89,12 +102,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane)
goto err_crtc;
goto err_release_iommu_mapping;
}
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
goto err_crtc;
goto err_release_iommu_mapping;
/*
* probe sub drivers such as display controller and hdmi driver,
......@@ -126,6 +139,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
exynos_drm_device_unregister(dev);
err_vblank:
drm_vblank_cleanup(dev);
err_release_iommu_mapping:
drm_release_iommu_mapping(dev);
err_crtc:
drm_mode_config_cleanup(dev);
kfree(private);
......@@ -142,6 +157,8 @@ static int exynos_drm_unload(struct drm_device *dev)
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
drm_release_iommu_mapping(dev);
kfree(dev->dev_private);
dev->dev_private = NULL;
......
......@@ -231,8 +231,7 @@ struct exynos_drm_g2d_private {
struct device *dev;
struct list_head inuse_cmdlist;
struct list_head event_list;
struct list_head gem_list;
unsigned int gem_nr;
struct list_head userptr_list;
};
struct drm_exynos_file_private {
......@@ -241,6 +240,13 @@ struct drm_exynos_file_private {
/*
* Exynos drm private structure.
*
* @da_start: start address to device address space.
* with iommu, device address space starts from this address
* otherwise default one.
* @da_space_size: size of device address space.
* if 0 then default value is used for it.
* @da_space_order: order to device address space.
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
......@@ -255,6 +261,10 @@ struct exynos_drm_private {
struct drm_crtc *crtc[MAX_CRTC];
struct drm_property *plane_zpos_property;
struct drm_property *crtc_mode_property;
unsigned long da_start;
unsigned long da_space_size;
unsigned long da_space_order;
};
/*
......
......@@ -226,8 +226,47 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
* already updated or not by exynos_drm_encoder_dpms function.
*/
exynos_encoder->updated = true;
/*
* In case of setcrtc, there is no way to update encoder's dpms
* so update it here.
*/
exynos_encoder->dpms = DRM_MODE_DPMS_ON;
}
void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
{
struct exynos_drm_encoder *exynos_encoder;
struct exynos_drm_overlay_ops *overlay_ops;
struct exynos_drm_manager *manager;
struct drm_device *dev = fb->dev;
struct drm_encoder *encoder;
/*
* make sure that overlay data are updated to real hardware
* for all encoders.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
exynos_encoder = to_exynos_encoder(encoder);
/* if exynos was disabled, just ignor it. */
if (exynos_encoder->dpms > DRM_MODE_DPMS_ON)
continue;
manager = exynos_encoder->manager;
overlay_ops = manager->overlay_ops;
/*
* wait for vblank interrupt
* - this makes sure that overlay data are updated to
* real hardware.
*/
if (overlay_ops->wait_for_vblank)
overlay_ops->wait_for_vblank(manager->dev);
}
}
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_plane *plane;
......@@ -499,14 +538,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
if (overlay_ops && overlay_ops->disable)
overlay_ops->disable(manager->dev, zpos);
/*
* wait for vblank interrupt
* - this makes sure that hardware overlay is disabled to avoid
* for the dma accesses to memory after gem buffer was released
* because the setting for disabling the overlay will be updated
* at vsync.
*/
if (overlay_ops->wait_for_vblank)
overlay_ops->wait_for_vblank(manager->dev);
}
......@@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
#endif
......@@ -30,10 +30,13 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <uapi/drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"
#include "exynos_drm_encoder.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
......@@ -50,6 +53,32 @@ struct exynos_drm_fb {
struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
static int check_fb_gem_memory_type(struct drm_device *drm_dev,
struct exynos_drm_gem_obj *exynos_gem_obj)
{
unsigned int flags;
/*
* if exynos drm driver supports iommu then framebuffer can use
* all the buffer types.
*/
if (is_drm_iommu_supported(drm_dev))
return 0;
flags = exynos_gem_obj->flags;
/*
* without iommu support, not support physically non-continuous memory
* for framebuffer.
*/
if (IS_NONCONTIG_BUFFER(flags)) {
DRM_ERROR("cannot use this gem memory type for fb.\n");
return -EINVAL;
}
return 0;
}
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
......@@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
DRM_DEBUG_KMS("%s\n", __FILE__);
/* make sure that overlay data are updated before relesing fb. */
exynos_drm_encoder_complete_scanout(fb);
drm_framebuffer_cleanup(fb);
for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
......@@ -128,14 +160,25 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj)
{
struct exynos_drm_fb *exynos_fb;
struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
exynos_gem_obj = to_exynos_gem_obj(obj);
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
if (ret < 0) {
DRM_ERROR("cannot use this gem memory type for fb.\n");
return ERR_PTR(-EINVAL);
}
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
......@@ -143,7 +186,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
}
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
return &exynos_fb->fb;
}
......@@ -214,6 +256,9 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < exynos_fb->buf_cnt; i++) {
struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]);
if (!obj) {
......@@ -222,6 +267,15 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
exynos_gem_obj = to_exynos_gem_obj(obj);
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
if (ret < 0) {
DRM_ERROR("cannot use this gem memory type for fb.\n");
exynos_drm_fb_destroy(fb);
return ERR_PTR(ret);
}
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
......
......@@ -46,8 +46,38 @@ struct exynos_drm_fbdev {
struct exynos_drm_gem_obj *exynos_gem_obj;
};
static int exynos_drm_fb_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
struct drm_fb_helper *helper = info->par;
struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
unsigned long vm_size;
int ret;
DRM_DEBUG_KMS("%s\n", __func__);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vm_size = vma->vm_end - vma->vm_start;
if (vm_size > buffer->size)
return -EINVAL;
ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->kvaddr,
buffer->dma_addr, buffer->size, &buffer->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
}
return 0;
}
static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
.fb_mmap = exynos_drm_fb_mmap,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
......@@ -87,7 +117,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset;
fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
fbi->fix.smem_start = (unsigned long)
(page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
......
......@@ -25,6 +25,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_iommu.h"
/*
* FIMD is stand for Fully Interactive Mobile Display and
......@@ -61,11 +62,11 @@ struct fimd_driver_data {
unsigned int timing_base;
};
struct fimd_driver_data exynos4_fimd_driver_data = {
static struct fimd_driver_data exynos4_fimd_driver_data = {
.timing_base = 0x0,
};
struct fimd_driver_data exynos5_fimd_driver_data = {
static struct fimd_driver_data exynos5_fimd_driver_data = {
.timing_base = 0x20000,
};
......@@ -623,7 +624,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
......@@ -633,8 +633,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
is_checked = true;
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
......@@ -642,22 +640,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
if (is_checked) {
/*
* call drm_vblank_put only in case that drm_vblank_get was
* called.
*/
if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
drm_vblank_put(drm_dev, crtc);
/*
* don't off vblank if vblank_disable_allowed is 1,
* because vblank would be off by timer handler.
*/
if (!drm_dev->vblank_disable_allowed)
drm_vblank_off(drm_dev, crtc);
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
......@@ -709,6 +692,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
*/
drm_dev->vblank_disable_allowed = 1;
/* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(drm_dev))
drm_iommu_attach_device(drm_dev, dev);
return 0;
}
......@@ -716,7 +703,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
/* TODO. */
/* detach this sub driver from iommu mapping if supported. */
if (is_drm_iommu_supported(drm_dev))
drm_iommu_detach_device(drm_dev, dev);
}
static int fimd_calc_clkdiv(struct fimd_context *ctx,
......@@ -857,18 +846,16 @@ static int __devinit fimd_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
ctx->bus_clk = clk_get(dev, "fimd");
ctx->bus_clk = devm_clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n");
ret = PTR_ERR(ctx->bus_clk);
goto err_clk_get;
return PTR_ERR(ctx->bus_clk);
}
ctx->lcd_clk = clk_get(dev, "sclk_fimd");
ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
if (IS_ERR(ctx->lcd_clk)) {
dev_err(dev, "failed to get lcd clock\n");
ret = PTR_ERR(ctx->lcd_clk);
goto err_bus_clk;
return PTR_ERR(ctx->lcd_clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
......@@ -876,14 +863,13 @@ static int __devinit fimd_probe(struct platform_device *pdev)
ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!ctx->regs) {
dev_err(dev, "failed to map registers\n");
ret = -ENXIO;
goto err_clk;
return -ENXIO;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "irq request failed.\n");
goto err_clk;
return -ENXIO;
}
ctx->irq = res->start;
......@@ -892,7 +878,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
0, "drm_fimd", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
goto err_clk;
return ret;
}
ctx->vidcon0 = pdata->vidcon0;
......@@ -926,17 +912,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
exynos_drm_subdrv_register(subdrv);
return 0;
err_clk:
clk_disable(ctx->lcd_clk);
clk_put(ctx->lcd_clk);
err_bus_clk:
clk_disable(ctx->bus_clk);
clk_put(ctx->bus_clk);
err_clk_get:
return ret;
}
static int __devexit fimd_remove(struct platform_device *pdev)
......@@ -960,9 +935,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
out:
pm_runtime_disable(dev);
clk_put(ctx->lcd_clk);
clk_put(ctx->bus_clk);
return 0;
}
......
......@@ -17,11 +17,14 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#include <linux/dma-attrs.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"
#define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1
......@@ -92,10 +95,20 @@
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
#define MAX_BUF_ADDR_NR 6
/* maximum buffer pool size of userptr is 64MB as default */
#define MAX_POOL (64 * 1024 * 1024)
enum {
BUF_TYPE_GEM = 1,
BUF_TYPE_USERPTR,
};
/* cmdlist data structure */
struct g2d_cmdlist {
u32 head;
u32 data[G2D_CMDLIST_DATA_NUM];
unsigned long data[G2D_CMDLIST_DATA_NUM];
u32 last; /* last data offset */
};
......@@ -104,15 +117,26 @@ struct drm_exynos_pending_g2d_event {
struct drm_exynos_g2d_event event;
};
struct g2d_gem_node {
struct g2d_cmdlist_userptr {
struct list_head list;
unsigned int handle;
dma_addr_t dma_addr;
unsigned long userptr;
unsigned long size;
struct page **pages;
unsigned int npages;
struct sg_table *sgt;
struct vm_area_struct *vma;
atomic_t refcount;
bool in_pool;
bool out_of_list;
};
struct g2d_cmdlist_node {
struct list_head list;
struct g2d_cmdlist *cmdlist;
unsigned int gem_nr;
unsigned int map_nr;
unsigned long handles[MAX_BUF_ADDR_NR];
unsigned int obj_type[MAX_BUF_ADDR_NR];
dma_addr_t dma_addr;
struct drm_exynos_pending_g2d_event *event;
......@@ -122,6 +146,7 @@ struct g2d_runqueue_node {
struct list_head list;
struct list_head run_cmdlist;
struct list_head event_list;
struct drm_file *filp;
pid_t pid;
struct completion complete;
int async;
......@@ -143,23 +168,33 @@ struct g2d_data {
struct mutex cmdlist_mutex;
dma_addr_t cmdlist_pool;
void *cmdlist_pool_virt;
struct dma_attrs cmdlist_dma_attrs;
/* runqueue*/
struct g2d_runqueue_node *runqueue_node;
struct list_head runqueue;
struct mutex runqueue_mutex;
struct kmem_cache *runqueue_slab;
unsigned long current_pool;
unsigned long max_pool;
};
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node;
struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int nr;
int ret;
g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
&g2d->cmdlist_pool, GFP_KERNEL);
init_dma_attrs(&g2d->cmdlist_dma_attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
G2D_CMDLIST_POOL_SIZE,
&g2d->cmdlist_pool, GFP_KERNEL,
&g2d->cmdlist_dma_attrs);
if (!g2d->cmdlist_pool_virt) {
dev_err(dev, "failed to allocate dma memory\n");
return -ENOMEM;
......@@ -184,18 +219,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return 0;
err:
dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
g2d->cmdlist_pool);
dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
return ret;
}
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
kfree(g2d->cmdlist_node);
dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
g2d->cmdlist_pool);
dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
}
static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
......@@ -245,62 +282,300 @@ static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
list_add_tail(&node->event->base.link, &g2d_priv->event_list);
}
static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
struct drm_file *file,
struct g2d_cmdlist_node *node)
static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
unsigned long obj,
bool force)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct g2d_cmdlist_userptr *g2d_userptr =
(struct g2d_cmdlist_userptr *)obj;
if (!obj)
return;
if (force)
goto out;
atomic_dec(&g2d_userptr->refcount);
if (atomic_read(&g2d_userptr->refcount) > 0)
return;
if (g2d_userptr->in_pool)
return;
out:
exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
DMA_BIDIRECTIONAL);
exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
g2d_userptr->npages,
g2d_userptr->vma);
if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list);
sg_free_table(g2d_userptr->sgt);
kfree(g2d_userptr->sgt);
g2d_userptr->sgt = NULL;
kfree(g2d_userptr->pages);
g2d_userptr->pages = NULL;
kfree(g2d_userptr);
g2d_userptr = NULL;
}
dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
unsigned long userptr,
unsigned long size,
struct drm_file *filp,
unsigned long *obj)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist_userptr *g2d_userptr;
struct g2d_data *g2d;
struct page **pages;
struct sg_table *sgt;
struct vm_area_struct *vma;
unsigned long start, end;
unsigned int npages, offset;
int ret;
if (!size) {
DRM_ERROR("invalid userptr size.\n");
return ERR_PTR(-EINVAL);
}
g2d = dev_get_drvdata(g2d_priv->dev);
/* check if userptr already exists in userptr_list. */
list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
if (g2d_userptr->userptr == userptr) {
/*
* also check size because there could be same address
* and different size.
*/
if (g2d_userptr->size == size) {
atomic_inc(&g2d_userptr->refcount);
*obj = (unsigned long)g2d_userptr;
return &g2d_userptr->dma_addr;
}
/*
* at this moment, maybe g2d dma is accessing this
* g2d_userptr memory region so just remove this
* g2d_userptr object from userptr_list not to be
* referred again and also except it the userptr
* pool to be released after the dma access completion.
*/
g2d_userptr->out_of_list = true;
g2d_userptr->in_pool = false;
list_del_init(&g2d_userptr->list);
break;
}
}
g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
if (!g2d_userptr) {
DRM_ERROR("failed to allocate g2d_userptr.\n");
return ERR_PTR(-ENOMEM);
}
atomic_set(&g2d_userptr->refcount, 1);
start = userptr & PAGE_MASK;
offset = userptr & ~PAGE_MASK;
end = PAGE_ALIGN(userptr + size);
npages = (end - start) >> PAGE_SHIFT;
g2d_userptr->npages = npages;
pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
if (!pages) {
DRM_ERROR("failed to allocate pages.\n");
kfree(g2d_userptr);
return ERR_PTR(-ENOMEM);
}
vma = find_vma(current->mm, userptr);
if (!vma) {
DRM_ERROR("failed to get vm region.\n");
ret = -EFAULT;
goto err_free_pages;
}
if (vma->vm_end < userptr + size) {
DRM_ERROR("vma is too small.\n");
ret = -EFAULT;
goto err_free_pages;
}
g2d_userptr->vma = exynos_gem_get_vma(vma);
if (!g2d_userptr->vma) {
DRM_ERROR("failed to copy vma.\n");
ret = -ENOMEM;
goto err_free_pages;
}
g2d_userptr->size = size;
ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
npages, pages, vma);
if (ret < 0) {
DRM_ERROR("failed to get user pages from userptr.\n");
goto err_put_vma;
}
g2d_userptr->pages = pages;
sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
if (!sgt) {
DRM_ERROR("failed to allocate sg table.\n");
ret = -ENOMEM;
goto err_free_userptr;
}
ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
size, GFP_KERNEL);
if (ret < 0) {
DRM_ERROR("failed to get sgt from pages.\n");
goto err_free_sgt;
}
g2d_userptr->sgt = sgt;
ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
DMA_BIDIRECTIONAL);
if (ret < 0) {
DRM_ERROR("failed to map sgt with dma region.\n");
goto err_free_sgt;
}
g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
g2d_userptr->userptr = userptr;
list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
g2d->current_pool += npages << PAGE_SHIFT;
g2d_userptr->in_pool = true;
}
*obj = (unsigned long)g2d_userptr;
return &g2d_userptr->dma_addr;
err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
sgt = NULL;
err_free_userptr:
exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
g2d_userptr->npages,
g2d_userptr->vma);
err_put_vma:
exynos_gem_put_vma(g2d_userptr->vma);
err_free_pages:
kfree(pages);
kfree(g2d_userptr);
pages = NULL;
g2d_userptr = NULL;
return ERR_PTR(ret);
}
static void g2d_userptr_free_all(struct drm_device *drm_dev,
struct g2d_data *g2d,
struct drm_file *filp)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist_userptr *g2d_userptr, *n;
list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
if (g2d_userptr->in_pool)
g2d_userptr_put_dma_addr(drm_dev,
(unsigned long)g2d_userptr,
true);
g2d->current_pool = 0;
}
static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
struct drm_device *drm_dev,
struct drm_file *file)
{
struct g2d_cmdlist *cmdlist = node->cmdlist;
dma_addr_t *addr;
int offset;
int i;
for (i = 0; i < node->gem_nr; i++) {
struct g2d_gem_node *gem_node;
gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
if (!gem_node) {
dev_err(g2d_priv->dev, "failed to allocate gem node\n");
return -ENOMEM;
}
for (i = 0; i < node->map_nr; i++) {
unsigned long handle;
dma_addr_t *addr;
offset = cmdlist->last - (i * 2 + 1);
gem_node->handle = cmdlist->data[offset];
handle = cmdlist->data[offset];
addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
if (node->obj_type[i] == BUF_TYPE_GEM) {
addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
file);
if (IS_ERR(addr)) {
node->gem_nr = i;
kfree(gem_node);
return PTR_ERR(addr);
node->map_nr = i;
return -EFAULT;
}
} else {
struct drm_exynos_g2d_userptr g2d_userptr;
if (copy_from_user(&g2d_userptr, (void __user *)handle,
sizeof(struct drm_exynos_g2d_userptr))) {
node->map_nr = i;
return -EFAULT;
}
addr = g2d_userptr_get_dma_addr(drm_dev,
g2d_userptr.userptr,
g2d_userptr.size,
file,
&handle);
if (IS_ERR(addr)) {
node->map_nr = i;
return -EFAULT;
}
}
cmdlist->data[offset] = *addr;
list_add_tail(&gem_node->list, &g2d_priv->gem_list);
g2d_priv->gem_nr++;
node->handles[i] = handle;
}
return 0;
}
static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
struct drm_file *file,
unsigned int nr)
static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
struct drm_file *filp)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_gem_node *node, *n;
struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int i;
list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
if (!nr)
break;
for (i = 0; i < node->map_nr; i++) {
unsigned long handle = node->handles[i];
exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
list_del_init(&node->list);
kfree(node);
nr--;
if (node->obj_type[i] == BUF_TYPE_GEM)
exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
filp);
else
g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
false);
node->handles[i] = 0;
}
node->map_nr = 0;
}
static void g2d_dma_start(struct g2d_data *g2d,
......@@ -337,10 +612,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
static void g2d_free_runqueue_node(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node)
{
struct g2d_cmdlist_node *node;
if (!runqueue_node)
return;
mutex_lock(&g2d->cmdlist_mutex);
/*
* commands in run_cmdlist have been completed so unmap all gem
* objects in each command node so that they are unreferenced.
*/
list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
......@@ -430,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
static int g2d_check_reg_offset(struct device *dev,
struct g2d_cmdlist_node *node,
int nr, bool for_addr)
{
struct g2d_cmdlist *cmdlist = node->cmdlist;
int reg_offset;
int index;
int i;
for (i = 0; i < nr; i++) {
index = cmdlist->last - 2 * (i + 1);
if (for_addr) {
/* check userptr buffer type. */
reg_offset = (cmdlist->data[index] &
~0x7fffffff) >> 31;
if (reg_offset) {
node->obj_type[i] = BUF_TYPE_USERPTR;
cmdlist->data[index] &= ~G2D_BUF_USERPTR;
}
}
reg_offset = cmdlist->data[index] & ~0xfffff000;
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
......@@ -455,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
case G2D_MSK_BASE_ADDR:
if (!for_addr)
goto err;
if (node->obj_type[i] != BUF_TYPE_USERPTR)
node->obj_type[i] = BUF_TYPE_GEM;
break;
default:
if (for_addr)
......@@ -466,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
return 0;
err:
dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
return -EINVAL;
}
......@@ -566,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
dev_err(dev, "cmdlist size is too big\n");
ret = -EINVAL;
......@@ -583,29 +882,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_nr * 2;
ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
if (ret < 0)
goto err_free_event;
node->gem_nr = req->cmd_gem_nr;
if (req->cmd_gem_nr) {
struct drm_exynos_g2d_cmd *cmd_gem;
node->map_nr = req->cmd_buf_nr;
if (req->cmd_buf_nr) {
struct drm_exynos_g2d_cmd *cmd_buf;
cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
if (copy_from_user(cmdlist->data + cmdlist->last,
(void __user *)cmd_gem,
sizeof(*cmd_gem) * req->cmd_gem_nr)) {
(void __user *)cmd_buf,
sizeof(*cmd_buf) * req->cmd_buf_nr)) {
ret = -EFAULT;
goto err_free_event;
}
cmdlist->last += req->cmd_gem_nr * 2;
cmdlist->last += req->cmd_buf_nr * 2;
ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
if (ret < 0)
goto err_free_event;
ret = g2d_get_cmdlist_gem(drm_dev, file, node);
ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
if (ret < 0)
goto err_unmap;
}
......@@ -624,7 +923,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
return 0;
err_unmap:
g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
g2d_unmap_cmdlist_gem(g2d, node, file);
err_free_event:
if (node->event) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
......@@ -680,6 +979,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
mutex_lock(&g2d->runqueue_mutex);
runqueue_node->pid = current->pid;
runqueue_node->filp = file;
list_add_tail(&runqueue_node->list, &g2d->runqueue);
if (!g2d->runqueue_node)
g2d_exec_runqueue(g2d);
......@@ -696,6 +996,43 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
}
EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
{
struct g2d_data *g2d;
int ret;
g2d = dev_get_drvdata(dev);
if (!g2d)
return -EFAULT;
/* allocate dma-aware cmdlist buffer. */
ret = g2d_init_cmdlist(g2d);
if (ret < 0) {
dev_err(dev, "cmdlist init failed\n");
return ret;
}
if (!is_drm_iommu_supported(drm_dev))
return 0;
ret = drm_iommu_attach_device(drm_dev, dev);
if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n");
g2d_fini_cmdlist(g2d);
}
return ret;
}
static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
if (!is_drm_iommu_supported(drm_dev))
return;
drm_iommu_detach_device(drm_dev, dev);
}
static int g2d_open(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
......@@ -713,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
INIT_LIST_HEAD(&g2d_priv->event_list);
INIT_LIST_HEAD(&g2d_priv->gem_list);
INIT_LIST_HEAD(&g2d_priv->userptr_list);
return 0;
}
......@@ -734,11 +1071,21 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
return;
mutex_lock(&g2d->cmdlist_mutex);
list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
/*
* unmap all gem objects not completed.
*
* P.S. if current process was terminated forcely then
* there may be some commands in inuse_cmdlist so unmap
* them.
*/
g2d_unmap_cmdlist_gem(g2d, node, file);
list_move_tail(&node->list, &g2d->free_cmdlist);
}
mutex_unlock(&g2d->cmdlist_mutex);
g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
/* release all g2d_userptr in pool. */
g2d_userptr_free_all(drm_dev, g2d, file);
kfree(file_priv->g2d_priv);
}
......@@ -778,15 +1125,11 @@ static int __devinit g2d_probe(struct platform_device *pdev)
mutex_init(&g2d->cmdlist_mutex);
mutex_init(&g2d->runqueue_mutex);
ret = g2d_init_cmdlist(g2d);
if (ret < 0)
goto err_destroy_workqueue;
g2d->gate_clk = clk_get(dev, "fimg2d");
g2d->gate_clk = devm_clk_get(dev, "fimg2d");
if (IS_ERR(g2d->gate_clk)) {
dev_err(dev, "failed to get gate clock\n");
ret = PTR_ERR(g2d->gate_clk);
goto err_fini_cmdlist;
goto err_destroy_workqueue;
}
pm_runtime_enable(dev);
......@@ -814,10 +1157,14 @@ static int __devinit g2d_probe(struct platform_device *pdev)
goto err_put_clk;
}
g2d->max_pool = MAX_POOL;
platform_set_drvdata(pdev, g2d);
subdrv = &g2d->subdrv;
subdrv->dev = dev;
subdrv->probe = g2d_subdrv_probe;
subdrv->remove = g2d_subdrv_remove;
subdrv->open = g2d_open;
subdrv->close = g2d_close;
......@@ -834,9 +1181,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
err_put_clk:
pm_runtime_disable(dev);
clk_put(g2d->gate_clk);
err_fini_cmdlist:
g2d_fini_cmdlist(g2d);
err_destroy_workqueue:
destroy_workqueue(g2d->g2d_workq);
err_destroy_slab:
......@@ -857,7 +1201,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
}
pm_runtime_disable(&pdev->dev);
clk_put(g2d->gate_clk);
g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq);
......
......@@ -83,157 +83,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
if (!IS_NONCONTIG_BUFFER(flags)) {
if (size >= SZ_1M)
return roundup(size, SECTION_SIZE);
else if (size >= SZ_64K)
return roundup(size, SZ_64K);
else
goto out;
}
out:
return roundup(size, PAGE_SIZE);
}
struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
struct page *p, **pages;
int i, npages;
npages = obj->size >> PAGE_SHIFT;
pages = drm_malloc_ab(npages, sizeof(struct page *));
if (pages == NULL)
return ERR_PTR(-ENOMEM);
for (i = 0; i < npages; i++) {
p = alloc_page(gfpmask);
if (IS_ERR(p))
goto fail;
pages[i] = p;
}
return pages;
fail:
while (--i)
__free_page(pages[i]);
drm_free_large(pages);
return ERR_CAST(p);
}
static void exynos_gem_put_pages(struct drm_gem_object *obj,
struct page **pages)
{
int npages;
npages = obj->size >> PAGE_SHIFT;
while (--npages >= 0)
__free_page(pages[npages]);
/* TODO */
drm_free_large(pages);
return roundup(size, PAGE_SIZE);
}
static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
struct vm_area_struct *vma,
unsigned long f_vaddr,
pgoff_t page_offset)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
struct scatterlist *sgl;
unsigned long pfn;
int i;
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
if (!buf->pages)
if (!buf->sgt)
return -EINTR;
pfn = page_to_pfn(buf->pages[page_offset++]);
} else
pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
return vm_insert_mixed(vma, f_vaddr, pfn);
}
static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
struct scatterlist *sgl;
struct page **pages;
unsigned int npages, i = 0;
int ret;
if (buf->pages) {
DRM_DEBUG_KMS("already allocated.\n");
if (page_offset >= (buf->size >> PAGE_SHIFT)) {
DRM_ERROR("invalid page offset\n");
return -EINVAL;
}
pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
if (IS_ERR(pages)) {
DRM_ERROR("failed to get pages.\n");
return PTR_ERR(pages);
}
npages = obj->size >> PAGE_SHIFT;
buf->page_size = PAGE_SIZE;
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!buf->sgt) {
DRM_ERROR("failed to allocate sg table.\n");
ret = -ENOMEM;
goto err;
}
ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
if (ret < 0) {
DRM_ERROR("failed to initialize sg table.\n");
ret = -EFAULT;
goto err1;
}
sgl = buf->sgt->sgl;
/* set all pages to sg list. */
while (i < npages) {
sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
sg_dma_address(sgl) = page_to_phys(pages[i]);
i++;
sgl = sg_next(sgl);
for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
if (page_offset < (sgl->length >> PAGE_SHIFT))
break;
page_offset -= (sgl->length >> PAGE_SHIFT);
}
/* add some codes for UNCACHED type here. TODO */
buf->pages = pages;
return ret;
err1:
kfree(buf->sgt);
buf->sgt = NULL;
err:
exynos_gem_put_pages(obj, pages);
return ret;
}
static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
/*
* if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
* allocated at gem fault handler.
*/
sg_free_table(buf->sgt);
kfree(buf->sgt);
buf->sgt = NULL;
exynos_gem_put_pages(obj, buf->pages);
buf->pages = NULL;
pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
/* add some codes for UNCACHED type here. TODO */
return vm_insert_mixed(vma, f_vaddr, pfn);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
......@@ -270,9 +153,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
if (!buf->pages)
return;
/*
* do not release memory region from exporter.
*
......@@ -282,9 +162,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (obj->import_attach)
goto out;
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
exynos_drm_gem_put_pages(obj);
else
exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
out:
......@@ -364,23 +241,11 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
/* set memory type and cache attribute from user side. */
exynos_gem_obj->flags = flags;
/*
* allocate all pages as desired size if user wants to allocate
* physically non-continuous memory.
*/
if (flags & EXYNOS_BO_NONCONTIG) {
ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
if (ret < 0) {
drm_gem_object_release(&exynos_gem_obj->base);
goto err_fini_buf;
}
} else {
ret = exynos_drm_alloc_buf(dev, buf, flags);
if (ret < 0) {
drm_gem_object_release(&exynos_gem_obj->base);
goto err_fini_buf;
}
}
return exynos_gem_obj;
......@@ -412,14 +277,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *file_priv)
struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return ERR_PTR(-EINVAL);
......@@ -427,25 +292,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
DRM_DEBUG_KMS("not support NONCONTIG type.\n");
drm_gem_object_unreference_unlocked(obj);
/* TODO */
return ERR_PTR(-EINVAL);
}
return &exynos_gem_obj->buffer->dma_addr;
}
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *file_priv)
struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return;
......@@ -453,14 +310,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
DRM_DEBUG_KMS("not support NONCONTIG type.\n");
drm_gem_object_unreference_unlocked(obj);
/* TODO */
return;
}
drm_gem_object_unreference_unlocked(obj);
/*
......@@ -489,22 +338,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
&args->offset);
}
static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
struct file *filp)
{
struct drm_file *file_priv;
mutex_lock(&drm_dev->struct_mutex);
/* find current process's drm_file from filelist. */
list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
if (file_priv->filp == filp) {
mutex_unlock(&drm_dev->struct_mutex);
return file_priv;
}
}
mutex_unlock(&drm_dev->struct_mutex);
WARN_ON(1);
return ERR_PTR(-EFAULT);
}
static int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct drm_device *drm_dev = obj->dev;
struct exynos_drm_gem_buf *buffer;
unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
struct drm_file *file_priv;
unsigned long vm_size;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = obj;
vma->vm_ops = drm_dev->driver->gem_vm_ops;
/* restore it to driver's fops. */
filp->f_op = fops_get(drm_dev->driver->fops);
file_priv = exynos_drm_find_drm_file(drm_dev, filp);
if (IS_ERR(file_priv))
return PTR_ERR(file_priv);
/* restore it to drm_file. */
filp->private_data = file_priv;
update_vm_cache_attr(exynos_gem_obj, vma);
vm_size = usize = vma->vm_end - vma->vm_start;
vm_size = vma->vm_end - vma->vm_start;
/*
* a buffer contains information to physically continuous memory
......@@ -516,40 +400,23 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
if (vm_size > buffer->size)
return -EINVAL;
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
int i = 0;
if (!buffer->pages)
return -EINVAL;
vma->vm_flags |= VM_MIXEDMAP;
do {
ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
if (ret) {
DRM_ERROR("failed to remap user space.\n");
ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->kvaddr,
buffer->dma_addr, buffer->size,
&buffer->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
}
uaddr += PAGE_SIZE;
usize -= PAGE_SIZE;
} while (usize > 0);
} else {
/*
* get page frame number to physical memory to be mapped
* to user space.
* take a reference to this mapping of the object. And this reference
* is unreferenced by the corresponding vm_close call.
*/
pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
PAGE_SHIFT;
drm_gem_object_reference(obj);
DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
vma->vm_page_prot)) {
DRM_ERROR("failed to remap pfn range.\n");
return -EAGAIN;
}
}
mutex_lock(&drm_dev->struct_mutex);
drm_vm_open_locked(drm_dev, vma);
mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
......@@ -578,16 +445,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
obj->filp->f_op = &exynos_drm_gem_fops;
obj->filp->private_data = obj;
/*
* Set specific mmper's fops. And it will be restored by
* exynos_drm_gem_mmap_buffer to dev->driver->fops.
* This is used to call specific mapper temporarily.
*/
file_priv->filp->f_op = &exynos_drm_gem_fops;
addr = vm_mmap(obj->filp, 0, args->size,
/*
* Set gem object to private_data so that specific mmaper
* can get the gem object. And it will be restored by
* exynos_drm_gem_mmap_buffer to drm_file.
*/
file_priv->filp->private_data = obj;
addr = vm_mmap(file_priv->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0);
drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr))
if (IS_ERR((void *)addr)) {
file_priv->filp->private_data = file_priv;
return PTR_ERR((void *)addr);
}
args->mapped = addr;
......@@ -622,6 +502,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
return 0;
}
struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
{
struct vm_area_struct *vma_copy;
vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
if (!vma_copy)
return NULL;
if (vma->vm_ops && vma->vm_ops->open)
vma->vm_ops->open(vma);
if (vma->vm_file)
get_file(vma->vm_file);
memcpy(vma_copy, vma, sizeof(*vma));
vma_copy->vm_mm = NULL;
vma_copy->vm_next = NULL;
vma_copy->vm_prev = NULL;
return vma_copy;
}
void exynos_gem_put_vma(struct vm_area_struct *vma)
{
if (!vma)
return;
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file)
fput(vma->vm_file);
kfree(vma);
}
int exynos_gem_get_pages_from_userptr(unsigned long start,
unsigned int npages,
struct page **pages,
struct vm_area_struct *vma)
{
int get_npages;
/* the memory region mmaped with VM_PFNMAP. */
if (vma_is_io(vma)) {
unsigned int i;
for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
unsigned long pfn;
int ret = follow_pfn(vma, start, &pfn);
if (ret)
return ret;
pages[i] = pfn_to_page(pfn);
}
if (i != npages) {
DRM_ERROR("failed to get user_pages.\n");
return -EINVAL;
}
return 0;
}
get_npages = get_user_pages(current, current->mm, start,
npages, 1, 1, pages, NULL);
get_npages = max(get_npages, 0);
if (get_npages != npages) {
DRM_ERROR("failed to get user_pages.\n");
while (get_npages)
put_page(pages[--get_npages]);
return -EFAULT;
}
return 0;
}
void exynos_gem_put_pages_to_userptr(struct page **pages,
unsigned int npages,
struct vm_area_struct *vma)
{
if (!vma_is_io(vma)) {
unsigned int i;
for (i = 0; i < npages; i++) {
set_page_dirty_lock(pages[i]);
/*
* undo the reference we took when populating
* the table.
*/
put_page(pages[i]);
}
}
}
int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir)
{
int nents;
mutex_lock(&drm_dev->struct_mutex);
nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
if (!nents) {
DRM_ERROR("failed to map sgl with dma.\n");
mutex_unlock(&drm_dev->struct_mutex);
return nents;
}
mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir)
{
dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
}
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
......@@ -753,9 +756,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
if (ret < 0)
DRM_ERROR("failed to map pages.\n");
DRM_ERROR("failed to map a buffer with user.\n");
mutex_unlock(&dev->struct_mutex);
......
......@@ -35,21 +35,25 @@
* exynos drm gem buffer structure.
*
* @kvaddr: kernel virtual address to allocated memory region.
* *userptr: user space address.
* @dma_addr: bus address(accessed by dma) to allocated memory region.
* - this address could be physical address without IOMMU and
* device address with IOMMU.
* @write: whether pages will be written to by the caller.
* @sgt: sg table to transfer page data.
* @pages: contain all pages to allocated memory region.
* @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region.
* @pfnmap: indicate whether memory region from userptr is mmaped with
* VM_PFNMAP or not.
*/
struct exynos_drm_gem_buf {
void __iomem *kvaddr;
unsigned long userptr;
dma_addr_t dma_addr;
struct dma_attrs dma_attrs;
unsigned int write;
struct sg_table *sgt;
struct page **pages;
unsigned long page_size;
unsigned long size;
bool pfnmap;
};
/*
......@@ -65,6 +69,7 @@ struct exynos_drm_gem_buf {
* or at framebuffer creation.
* @size: size requested from user, in bytes and this size is aligned
* in page unit.
* @vma: a pointer to vm_area.
* @flags: indicate memory type to allocated buffer and cache attruibute.
*
* P.S. this object would be transfered to user as kms_bo.handle so
......@@ -74,6 +79,7 @@ struct exynos_drm_gem_obj {
struct drm_gem_object base;
struct exynos_drm_gem_buf *buffer;
unsigned long size;
struct vm_area_struct *vma;
unsigned int flags;
};
......@@ -104,9 +110,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
* other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be increased.
*/
void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *file_priv);
struct drm_file *filp);
/*
* put dma address from gem handle and this function could be used for
......@@ -115,7 +121,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
*/
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *file_priv);
struct drm_file *filp);
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
......@@ -128,6 +134,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* map user space allocated by malloc to pages. */
int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
......@@ -163,4 +173,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
static inline int vma_is_io(struct vm_area_struct *vma)
{
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
}
/* get a copy of a virtual memory region. */
struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
/* release a userspace virtual memory area. */
void exynos_gem_put_vma(struct vm_area_struct *vma);
/* get pages from user space. */
int exynos_gem_get_pages_from_userptr(unsigned long start,
unsigned int npages,
struct page **pages,
struct vm_area_struct *vma);
/* drop the reference to pages. */
void exynos_gem_put_pages_to_userptr(struct page **pages,
unsigned int npages,
struct vm_area_struct *vma);
/* map sgt with dma region. */
int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir);
/* unmap sgt from dma region. */
void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir);
#endif
......@@ -346,9 +346,23 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
ctx->hdmi_ctx->drm_dev = drm_dev;
ctx->mixer_ctx->drm_dev = drm_dev;
if (mixer_ops->iommu_on)
mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
return 0;
}
static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
struct drm_hdmi_context *ctx;
struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
ctx = get_ctx_from_subdrv(subdrv);
if (mixer_ops->iommu_on)
mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
}
static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
......@@ -368,6 +382,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
subdrv->dev = dev;
subdrv->manager = &hdmi_manager;
subdrv->probe = hdmi_subdrv_probe;
subdrv->remove = hdmi_subdrv_remove;
platform_set_drvdata(pdev, subdrv);
......
......@@ -62,6 +62,7 @@ struct exynos_hdmi_ops {
struct exynos_mixer_ops {
/* manager */
int (*iommu_on)(void *ctx, bool enable);
int (*enable_vblank)(void *ctx, int pipe);
void (*disable_vblank)(void *ctx);
void (*dpms)(void *ctx, int mode);
......
/* exynos_drm_iommu.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drmP.h>
#include <drm/exynos_drm.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/kref.h>
#include <asm/dma-iommu.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_iommu.h"
/*
* drm_create_iommu_mapping - create a mapping structure
*
* @drm_dev: DRM device
*/
int drm_create_iommu_mapping(struct drm_device *drm_dev)
{
struct dma_iommu_mapping *mapping = NULL;
struct exynos_drm_private *priv = drm_dev->dev_private;
struct device *dev = drm_dev->dev;
if (!priv->da_start)
priv->da_start = EXYNOS_DEV_ADDR_START;
if (!priv->da_space_size)
priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
if (!priv->da_space_order)
priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
priv->da_space_size,
priv->da_space_order);
if (!mapping)
return -ENOMEM;
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
GFP_KERNEL);
dma_set_max_seg_size(dev, 0xffffffffu);
dev->archdata.mapping = mapping;
return 0;
}
/*
* drm_release_iommu_mapping - release iommu mapping structure
*
* @drm_dev: DRM device
*
* if mapping->kref becomes 0 then all things related to iommu mapping
* will be released
*/
void drm_release_iommu_mapping(struct drm_device *drm_dev)
{
struct device *dev = drm_dev->dev;
arm_iommu_release_mapping(dev->archdata.mapping);
}
/*
* drm_iommu_attach_device- attach device to iommu mapping
*
* @drm_dev: DRM device
* @subdrv_dev: device to be attach
*
* This function should be called by sub drivers to attach it to iommu
* mapping.
*/
int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
struct device *dev = drm_dev->dev;
int ret;
if (!dev->archdata.mapping) {
DRM_ERROR("iommu_mapping is null.\n");
return -EFAULT;
}
subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
sizeof(*subdrv_dev->dma_parms),
GFP_KERNEL);
dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
if (ret < 0) {
DRM_DEBUG_KMS("failed iommu attach.\n");
return ret;
}
/*
* Set dma_ops to drm_device just one time.
*
* The dma mapping api needs device object and the api is used
* to allocate physial memory and map it with iommu table.
* If iommu attach succeeded, the sub driver would have dma_ops
* for iommu and also all sub drivers have same dma_ops.
*/
if (!dev->archdata.dma_ops)
dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
return 0;
}
/*
* drm_iommu_detach_device -detach device address space mapping from device
*
* @drm_dev: DRM device
* @subdrv_dev: device to be detached
*
* This function should be called by sub drivers to detach it from iommu
* mapping
*/
void drm_iommu_detach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
struct device *dev = drm_dev->dev;
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
if (!mapping || !mapping->domain)
return;
iommu_detach_device(mapping->domain, subdrv_dev);
drm_release_iommu_mapping(drm_dev);
}
/* exynos_drm_iommu.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Authoer: Inki Dae <inki.dae@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _EXYNOS_DRM_IOMMU_H_
#define _EXYNOS_DRM_IOMMU_H_
#define EXYNOS_DEV_ADDR_START 0x20000000
#define EXYNOS_DEV_ADDR_SIZE 0x40000000
#define EXYNOS_DEV_ADDR_ORDER 0x4
#ifdef CONFIG_DRM_EXYNOS_IOMMU
int drm_create_iommu_mapping(struct drm_device *drm_dev);
void drm_release_iommu_mapping(struct drm_device *drm_dev);
int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev);
void drm_iommu_detach_device(struct drm_device *dev_dev,
struct device *subdrv_dev);
static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
{
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct device *dev = drm_dev->dev;
return dev->archdata.mapping ? true : false;
#else
return false;
#endif
}
#else
struct dma_iommu_mapping;
static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
{
return 0;
}
static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
{
}
static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
return 0;
}
static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
}
static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
{
return false;
}
#endif
#endif
......@@ -204,7 +204,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
return ret;
plane->crtc = crtc;
plane->fb = crtc->fb;
exynos_plane_commit(plane);
exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
......
......@@ -382,7 +382,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
......@@ -392,8 +391,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
is_checked = true;
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
......@@ -401,22 +398,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
if (is_checked) {
/*
* call drm_vblank_put only in case that drm_vblank_get was
* called.
*/
if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
drm_vblank_put(drm_dev, crtc);
/*
* don't off vblank if vblank_disable_allowed is 1,
* because vblank would be off by timer handler.
*/
if (!drm_dev->vblank_disable_allowed)
drm_vblank_off(drm_dev, crtc);
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
......
......@@ -50,6 +50,29 @@
#define MAX_HEIGHT 1080
#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
/* AVI header and aspect ratio */
#define HDMI_AVI_VERSION 0x02
#define HDMI_AVI_LENGTH 0x0D
#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4)
#define AVI_SAME_AS_PIC_ASPECT_RATIO 8
/* AUI header info */
#define HDMI_AUI_VERSION 0x01
#define HDMI_AUI_LENGTH 0x0A
/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
enum HDMI_PACKET_TYPE {
/* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
/* InfoFrame packet type */
HDMI_PACKET_TYPE_INFOFRAME = 0x80,
/* Vendor-Specific InfoFrame */
HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
/* Auxiliary Video information InfoFrame */
HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
/* Audio information InfoFrame */
HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
};
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
......@@ -74,6 +97,7 @@ struct hdmi_context {
struct mutex hdmi_mutex;
void __iomem *regs;
void *parent_ctx;
int external_irq;
int internal_irq;
......@@ -84,7 +108,6 @@ struct hdmi_context {
int cur_conf;
struct hdmi_resources res;
void *parent_ctx;
int hpd_gpio;
......@@ -182,6 +205,7 @@ struct hdmi_v13_conf {
int height;
int vrefresh;
bool interlace;
int cea_video_id;
const u8 *hdmiphy_data;
const struct hdmi_v13_preset_conf *conf;
};
......@@ -353,14 +377,19 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
};
static const struct hdmi_v13_conf hdmi_v13_confs[] = {
{ 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
{ 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
{ 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p },
{ 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 },
{ 1920, 1080, 50, false, hdmiphy_v13_conf148_5,
{ 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
&hdmi_v13_conf_720p60 },
{ 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
&hdmi_v13_conf_720p60 },
{ 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
&hdmi_v13_conf_480p },
{ 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
&hdmi_v13_conf_1080i50 },
{ 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
&hdmi_v13_conf_1080p50 },
{ 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 },
{ 1920, 1080, 60, false, hdmiphy_v13_conf148_5,
{ 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
&hdmi_v13_conf_1080i60 },
{ 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
&hdmi_v13_conf_1080p60 },
};
......@@ -479,6 +508,7 @@ struct hdmi_conf {
int height;
int vrefresh;
bool interlace;
int cea_video_id;
const u8 *hdmiphy_data;
const struct hdmi_preset_conf *conf;
};
......@@ -934,16 +964,21 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
};
static const struct hdmi_conf hdmi_confs[] = {
{ 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 },
{ 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 },
{ 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
{ 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
{ 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
{ 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
{ 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
{ 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
{ 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
{ 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
{ 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
{ 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
{ 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
{ 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
{ 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
{ 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
};
struct hdmi_infoframe {
enum HDMI_PACKET_TYPE type;
u8 ver;
u8 len;
};
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
......@@ -1267,6 +1302,88 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
return hdmi_v14_conf_index(mode);
}
static u8 hdmi_chksum(struct hdmi_context *hdata,
u32 start, u8 len, u32 hdr_sum)
{
int i;
/* hdr_sum : header0 + header1 + header2
* start : start address of packet byte1
* len : packet bytes - 1 */
for (i = 0; i < len; ++i)
hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
/* return 2's complement of 8 bit hdr_sum */
return (u8)(~(hdr_sum & 0xff) + 1);
}
static void hdmi_reg_infoframe(struct hdmi_context *hdata,
struct hdmi_infoframe *infoframe)
{
u32 hdr_sum;
u8 chksum;
u32 aspect_ratio;
u32 mod;
u32 vic;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
if (hdata->dvi_mode) {
hdmi_reg_writeb(hdata, HDMI_VSI_CON,
HDMI_VSI_CON_DO_NOT_TRANSMIT);
hdmi_reg_writeb(hdata, HDMI_AVI_CON,
HDMI_AVI_CON_DO_NOT_TRANSMIT);
hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
return;
}
switch (infoframe->type) {
case HDMI_PACKET_TYPE_AVI:
hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
/* Output format zero hardcoded ,RGB YBCR selection */
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
AVI_ACTIVE_FORMAT_VALID |
AVI_UNDERSCANNED_DISPLAY_VALID);
aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
AVI_SAME_AS_PIC_ASPECT_RATIO);
if (hdata->type == HDMI_TYPE13)
vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
else
vic = hdmi_confs[hdata->cur_conf].cea_video_id;
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
infoframe->len, hdr_sum);
DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
break;
case HDMI_PACKET_TYPE_AUI:
hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
infoframe->len, hdr_sum);
DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
break;
default:
break;
}
}
static bool hdmi_is_connected(void *ctx)
{
struct hdmi_context *hdata = ctx;
......@@ -1293,6 +1410,7 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
raw_edid->width_cm, raw_edid->height_cm);
kfree(raw_edid);
} else {
return -ENODEV;
}
......@@ -1541,6 +1659,8 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
static void hdmi_conf_init(struct hdmi_context *hdata)
{
struct hdmi_infoframe infoframe;
/* disable HPD interrupts */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
......@@ -1575,9 +1695,17 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
infoframe.type = HDMI_PACKET_TYPE_AVI;
infoframe.ver = HDMI_AVI_VERSION;
infoframe.len = HDMI_AVI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
infoframe.type = HDMI_PACKET_TYPE_AUI;
infoframe.ver = HDMI_AUI_VERSION;
infoframe.len = HDMI_AUI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
/* enable AVI packet every vsync, fixes purple line problem */
hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
}
}
......@@ -1978,9 +2106,18 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
index = hdmi_v14_conf_index(m);
if (index >= 0) {
struct drm_mode_object base;
struct list_head head;
DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n");
/* preserve display mode header while copying. */
head = adjusted_mode->head;
base = adjusted_mode->base;
memcpy(adjusted_mode, m, sizeof(*m));
adjusted_mode->head = head;
adjusted_mode->base = base;
break;
}
}
......@@ -2166,27 +2303,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
memset(res, 0, sizeof(*res));
/* get clocks, power */
res->hdmi = clk_get(dev, "hdmi");
res->hdmi = devm_clk_get(dev, "hdmi");
if (IS_ERR_OR_NULL(res->hdmi)) {
DRM_ERROR("failed to get clock 'hdmi'\n");
goto fail;
}
res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
goto fail;
}
res->sclk_pixel = clk_get(dev, "sclk_pixel");
res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
if (IS_ERR_OR_NULL(res->sclk_pixel)) {
DRM_ERROR("failed to get clock 'sclk_pixel'\n");
goto fail;
}
res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
goto fail;
}
res->hdmiphy = clk_get(dev, "hdmiphy");
res->hdmiphy = devm_clk_get(dev, "hdmiphy");
if (IS_ERR_OR_NULL(res->hdmiphy)) {
DRM_ERROR("failed to get clock 'hdmiphy'\n");
goto fail;
......@@ -2194,7 +2331,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
sizeof(res->regul_bulk[0]), GFP_KERNEL);
if (!res->regul_bulk) {
DRM_ERROR("failed to get memory for regulators\n");
......@@ -2204,7 +2341,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
res->regul_bulk[i].supply = supply[i];
res->regul_bulk[i].consumer = NULL;
}
ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
if (ret) {
DRM_ERROR("failed to get regulators\n");
goto fail;
......@@ -2217,28 +2354,6 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
return -ENODEV;
}
static int hdmi_resources_cleanup(struct hdmi_context *hdata)
{
struct hdmi_resources *res = &hdata->res;
regulator_bulk_free(res->regul_count, res->regul_bulk);
/* kfree is NULL-safe */
kfree(res->regul_bulk);
if (!IS_ERR_OR_NULL(res->hdmiphy))
clk_put(res->hdmiphy);
if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
clk_put(res->sclk_hdmiphy);
if (!IS_ERR_OR_NULL(res->sclk_pixel))
clk_put(res->sclk_pixel);
if (!IS_ERR_OR_NULL(res->sclk_hdmi))
clk_put(res->sclk_hdmi);
if (!IS_ERR_OR_NULL(res->hdmi))
clk_put(res->hdmi);
memset(res, 0, sizeof(*res));
return 0;
}
static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
void hdmi_attach_ddc_client(struct i2c_client *ddc)
......@@ -2378,36 +2493,32 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
ret = hdmi_resources_init(hdata);
if (ret) {
ret = -EINVAL;
DRM_ERROR("hdmi_resources_init failed\n");
goto err_data;
return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
DRM_ERROR("failed to find registers\n");
ret = -ENOENT;
goto err_resource;
return -ENOENT;
}
hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!hdata->regs) {
DRM_ERROR("failed to map registers\n");
ret = -ENXIO;
goto err_resource;
return -ENXIO;
}
ret = gpio_request(hdata->hpd_gpio, "HPD");
ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
if (ret) {
DRM_ERROR("failed to request HPD gpio\n");
goto err_resource;
return ret;
}
/* DDC i2c driver */
if (i2c_add_driver(&ddc_driver)) {
DRM_ERROR("failed to register ddc i2c driver\n");
ret = -ENOENT;
goto err_gpio;
return -ENOENT;
}
hdata->ddc_port = hdmi_ddc;
......@@ -2470,11 +2581,6 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
i2c_del_driver(&hdmiphy_driver);
err_ddc:
i2c_del_driver(&ddc_driver);
err_gpio:
gpio_free(hdata->hpd_gpio);
err_resource:
hdmi_resources_cleanup(hdata);
err_data:
return ret;
}
......@@ -2491,9 +2597,6 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
free_irq(hdata->internal_irq, hdata);
free_irq(hdata->external_irq, hdata);
gpio_free(hdata->hpd_gpio);
hdmi_resources_cleanup(hdata);
/* hdmiphy i2c driver */
i2c_del_driver(&hdmiphy_driver);
......
......@@ -36,6 +36,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h"
#include "exynos_drm_iommu.h"
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
......@@ -80,6 +81,7 @@ enum mixer_version_id {
struct mixer_context {
struct device *dev;
struct drm_device *drm_dev;
int pipe;
bool interlace;
bool powered;
......@@ -90,6 +92,7 @@ struct mixer_context {
struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR];
enum mixer_version_id mxr_ver;
void *parent_ctx;
};
struct mixer_drv_data {
......@@ -665,6 +668,24 @@ static void mixer_win_reset(struct mixer_context *ctx)
spin_unlock_irqrestore(&res->reg_slock, flags);
}
static int mixer_iommu_on(void *ctx, bool enable)
{
struct exynos_drm_hdmi_context *drm_hdmi_ctx;
struct mixer_context *mdata = ctx;
struct drm_device *drm_dev;
drm_hdmi_ctx = mdata->parent_ctx;
drm_dev = drm_hdmi_ctx->drm_dev;
if (is_drm_iommu_supported(drm_dev)) {
if (enable)
return drm_iommu_attach_device(drm_dev, mdata->dev);
drm_iommu_detach_device(drm_dev, mdata->dev);
}
return 0;
}
static void mixer_poweron(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
......@@ -866,6 +887,7 @@ static void mixer_win_disable(void *ctx, int win)
static struct exynos_mixer_ops mixer_ops = {
/* manager */
.iommu_on = mixer_iommu_on,
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
.dpms = mixer_dpms,
......@@ -884,7 +906,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
......@@ -894,7 +915,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
is_checked = true;
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
......@@ -902,15 +922,8 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
if (is_checked)
/*
* call drm_vblank_put only in case that drm_vblank_get was
* called.
*/
if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
drm_vblank_put(drm_dev, crtc);
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
......@@ -971,57 +984,45 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
spin_lock_init(&mixer_res->reg_slock);
mixer_res->mixer = clk_get(dev, "mixer");
mixer_res->mixer = devm_clk_get(dev, "mixer");
if (IS_ERR_OR_NULL(mixer_res->mixer)) {
dev_err(dev, "failed to get clock 'mixer'\n");
ret = -ENODEV;
goto fail;
return -ENODEV;
}
mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
ret = -ENODEV;
goto fail;
return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
ret = -ENXIO;
goto fail;
return -ENXIO;
}
mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->mixer_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
ret = -ENXIO;
goto fail;
return -ENXIO;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(dev, "get interrupt resource failed.\n");
ret = -ENXIO;
goto fail;
return -ENXIO;
}
ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
0, "drm_mixer", ctx);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
goto fail;
return ret;
}
mixer_res->irq = res->start;
return 0;
fail:
if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
clk_put(mixer_res->sclk_hdmi);
if (!IS_ERR_OR_NULL(mixer_res->mixer))
clk_put(mixer_res->mixer);
return ret;
}
static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
......@@ -1031,25 +1032,21 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
struct device *dev = &pdev->dev;
struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res;
int ret;
mixer_res->vp = clk_get(dev, "vp");
mixer_res->vp = devm_clk_get(dev, "vp");
if (IS_ERR_OR_NULL(mixer_res->vp)) {
dev_err(dev, "failed to get clock 'vp'\n");
ret = -ENODEV;
goto fail;
return -ENODEV;
}
mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n");
ret = -ENODEV;
goto fail;
return -ENODEV;
}
mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
dev_err(dev, "failed to get clock 'sclk_dac'\n");
ret = -ENODEV;
goto fail;
return -ENODEV;
}
if (mixer_res->sclk_hdmi)
......@@ -1058,28 +1055,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
ret = -ENXIO;
goto fail;
return -ENXIO;
}
mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
ret = -ENXIO;
goto fail;
return -ENXIO;
}
return 0;
fail:
if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
clk_put(mixer_res->sclk_dac);
if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
clk_put(mixer_res->sclk_mixer);
if (!IS_ERR_OR_NULL(mixer_res->vp))
clk_put(mixer_res->vp);
return ret;
}
static struct mixer_drv_data exynos5_mxr_drv_data = {
......@@ -1149,6 +1135,7 @@ static int __devinit mixer_probe(struct platform_device *pdev)
}
ctx->dev = &pdev->dev;
ctx->parent_ctx = (void *)drm_hdmi_ctx;
drm_hdmi_ctx->ctx = (void *)ctx;
ctx->vp_enabled = drv->is_vp_enabled;
ctx->mxr_ver = drv->version;
......
......@@ -298,14 +298,14 @@
#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n))
#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1))
#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n))
#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1))
#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
......@@ -338,6 +338,19 @@
#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
/* AVI bit definition */
#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1)
#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1)
#define AVI_ACTIVE_FORMAT_VALID (1 << 4)
#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
/* AUI bit definition */
#define HDMI_AUI_CON_NO_TRAN (0 << 0)
/* VSI bit definition */
#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
/* HDCP related registers */
#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))
......
......@@ -17,6 +17,7 @@ enum dma_attr {
DMA_ATTR_NON_CONSISTENT,
DMA_ATTR_NO_KERNEL_MAPPING,
DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_FORCE_CONTIGUOUS,
DMA_ATTR_MAX,
};
......
......@@ -133,17 +133,26 @@ struct drm_exynos_g2d_cmd {
__u32 data;
};
enum drm_exynos_g2d_buf_type {
G2D_BUF_USERPTR = 1 << 31,
};
enum drm_exynos_g2d_event_type {
G2D_EVENT_NOT,
G2D_EVENT_NONSTOP,
G2D_EVENT_STOP, /* not yet */
};
struct drm_exynos_g2d_userptr {
unsigned long userptr;
unsigned long size;
};
struct drm_exynos_g2d_set_cmdlist {
__u64 cmd;
__u64 cmd_gem;
__u64 cmd_buf;
__u32 cmd_nr;
__u32 cmd_gem_nr;
__u32 cmd_buf_nr;
/* for g2d event */
__u64 event_type;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment