Commit f6e45661 authored by Luis R. Rodriguez's avatar Luis R. Rodriguez Committed by Ingo Molnar

dma, mm/pat: Rename dma_*_writecombine() to dma_*_wc()

Rename dma_*_writecombine() to dma_*_wc(), so that the naming
is coherent across the various write-combining APIs. Keep the
old names for compatibility for a while, these can be removed
at a later time. A guard is left to enable backporting of the
rename, and later remove of the old mapping defines seemlessly.

Build tested successfully with allmodconfig.

The following Coccinelle SmPL patch was used for this simple
transformation:

@ rename_dma_alloc_writecombine @
expression dev, size, dma_addr, gfp;
@@

-dma_alloc_writecombine(dev, size, dma_addr, gfp)
+dma_alloc_wc(dev, size, dma_addr, gfp)

@ rename_dma_free_writecombine @
expression dev, size, cpu_addr, dma_addr;
@@

-dma_free_writecombine(dev, size, cpu_addr, dma_addr)
+dma_free_wc(dev, size, cpu_addr, dma_addr)

@ rename_dma_mmap_writecombine @
expression dev, vma, cpu_addr, dma_addr, size;
@@

-dma_mmap_writecombine(dev, vma, cpu_addr, dma_addr, size)
+dma_mmap_wc(dev, vma, cpu_addr, dma_addr, size)

We also keep the old names as compatibility helpers, and
guard against their definition to make backporting easier.

Generated-by: Coccinelle SmPL
Suggested-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarLuis R. Rodriguez <mcgrof@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: airlied@linux.ie
Cc: akpm@linux-foundation.org
Cc: benh@kernel.crashing.org
Cc: bhelgaas@google.com
Cc: bp@suse.de
Cc: dan.j.williams@intel.com
Cc: daniel.vetter@ffwll.ch
Cc: dhowells@redhat.com
Cc: julia.lawall@lip6.fr
Cc: konrad.wilk@oracle.com
Cc: linux-fbdev@vger.kernel.org
Cc: linux-pci@vger.kernel.org
Cc: luto@amacapital.net
Cc: mst@redhat.com
Cc: tomi.valkeinen@ti.com
Cc: toshi.kani@hp.com
Cc: vinod.koul@intel.com
Cc: xen-devel@lists.xensource.com
Link: http://lkml.kernel.org/r/1453516462-4844-1-git-send-email-mcgrof@do-not-panic.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e2857b8f
...@@ -86,8 +86,8 @@ static int lpc32xx_clcd_setup(struct clcd_fb *fb) ...@@ -86,8 +86,8 @@ static int lpc32xx_clcd_setup(struct clcd_fb *fb)
{ {
dma_addr_t dma; dma_addr_t dma;
fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, PANEL_SIZE, &dma,
PANEL_SIZE, &dma, GFP_KERNEL); GFP_KERNEL);
if (!fb->fb.screen_base) { if (!fb->fb.screen_base) {
printk(KERN_ERR "CLCD: unable to map framebuffer\n"); printk(KERN_ERR "CLCD: unable to map framebuffer\n");
return -ENOMEM; return -ENOMEM;
...@@ -116,15 +116,14 @@ static int lpc32xx_clcd_setup(struct clcd_fb *fb) ...@@ -116,15 +116,14 @@ static int lpc32xx_clcd_setup(struct clcd_fb *fb)
static int lpc32xx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma) static int lpc32xx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
{ {
return dma_mmap_writecombine(&fb->dev->dev, vma, return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
fb->fb.screen_base, fb->fb.fix.smem_start, fb->fb.fix.smem_start, fb->fb.fix.smem_len);
fb->fb.fix.smem_len);
} }
static void lpc32xx_clcd_remove(struct clcd_fb *fb) static void lpc32xx_clcd_remove(struct clcd_fb *fb)
{ {
dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len, dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
fb->fb.screen_base, fb->fb.fix.smem_start); fb->fb.fix.smem_start);
} }
/* /*
......
...@@ -42,8 +42,8 @@ int netx_clcd_setup(struct clcd_fb *fb) ...@@ -42,8 +42,8 @@ int netx_clcd_setup(struct clcd_fb *fb)
fb->panel = netx_panel; fb->panel = netx_panel;
fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, 1024*1024, fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, 1024 * 1024, &dma,
&dma, GFP_KERNEL); GFP_KERNEL);
if (!fb->fb.screen_base) { if (!fb->fb.screen_base) {
printk(KERN_ERR "CLCD: unable to map framebuffer\n"); printk(KERN_ERR "CLCD: unable to map framebuffer\n");
return -ENOMEM; return -ENOMEM;
...@@ -57,16 +57,14 @@ int netx_clcd_setup(struct clcd_fb *fb) ...@@ -57,16 +57,14 @@ int netx_clcd_setup(struct clcd_fb *fb)
int netx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma) int netx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
{ {
return dma_mmap_writecombine(&fb->dev->dev, vma, return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
fb->fb.screen_base, fb->fb.fix.smem_start, fb->fb.fix.smem_len);
fb->fb.fix.smem_start,
fb->fb.fix.smem_len);
} }
void netx_clcd_remove(struct clcd_fb *fb) void netx_clcd_remove(struct clcd_fb *fb)
{ {
dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len, dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
fb->fb.screen_base, fb->fb.fix.smem_start); fb->fb.fix.smem_start);
} }
static AMBA_AHB_DEVICE(fb, "fb", 0, 0x00104000, { NETX_IRQ_LCD }, NULL); static AMBA_AHB_DEVICE(fb, "fb", 0, 0x00104000, { NETX_IRQ_LCD }, NULL);
......
...@@ -90,8 +90,8 @@ int nspire_clcd_setup(struct clcd_fb *fb) ...@@ -90,8 +90,8 @@ int nspire_clcd_setup(struct clcd_fb *fb)
panel_size = ((panel->mode.xres * panel->mode.yres) * panel->bpp) / 8; panel_size = ((panel->mode.xres * panel->mode.yres) * panel->bpp) / 8;
panel_size = ALIGN(panel_size, PAGE_SIZE); panel_size = ALIGN(panel_size, PAGE_SIZE);
fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, panel_size, &dma,
panel_size, &dma, GFP_KERNEL); GFP_KERNEL);
if (!fb->fb.screen_base) { if (!fb->fb.screen_base) {
pr_err("CLCD: unable to map framebuffer\n"); pr_err("CLCD: unable to map framebuffer\n");
...@@ -107,13 +107,12 @@ int nspire_clcd_setup(struct clcd_fb *fb) ...@@ -107,13 +107,12 @@ int nspire_clcd_setup(struct clcd_fb *fb)
int nspire_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma) int nspire_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
{ {
return dma_mmap_writecombine(&fb->dev->dev, vma, return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
fb->fb.screen_base, fb->fb.fix.smem_start, fb->fb.fix.smem_start, fb->fb.fix.smem_len);
fb->fb.fix.smem_len);
} }
void nspire_clcd_remove(struct clcd_fb *fb) void nspire_clcd_remove(struct clcd_fb *fb)
{ {
dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len, dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
fb->fb.screen_base, fb->fb.fix.smem_start); fb->fb.fix.smem_start);
} }
...@@ -1300,7 +1300,7 @@ static int iop_adma_probe(struct platform_device *pdev) ...@@ -1300,7 +1300,7 @@ static int iop_adma_probe(struct platform_device *pdev)
* note: writecombine gives slightly better performance, but * note: writecombine gives slightly better performance, but
* requires that we explicitly flush the writes * requires that we explicitly flush the writes
*/ */
adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, adev->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev,
plat_data->pool_size, plat_data->pool_size,
&adev->dma_desc_pool, &adev->dma_desc_pool,
GFP_KERNEL); GFP_KERNEL);
......
...@@ -964,8 +964,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev, ...@@ -964,8 +964,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
* requires that we explicitly flush the writes * requires that we explicitly flush the writes
*/ */
mv_chan->dma_desc_pool_virt = mv_chan->dma_desc_pool_virt =
dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE, dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
&mv_chan->dma_desc_pool, GFP_KERNEL); GFP_KERNEL);
if (!mv_chan->dma_desc_pool_virt) if (!mv_chan->dma_desc_pool_virt)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
......
...@@ -502,7 +502,7 @@ static int bam_alloc_chan(struct dma_chan *chan) ...@@ -502,7 +502,7 @@ static int bam_alloc_chan(struct dma_chan *chan)
return 0; return 0;
/* allocate FIFO descriptor space, but only if necessary */ /* allocate FIFO descriptor space, but only if necessary */
bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
&bchan->fifo_phys, GFP_KERNEL); &bchan->fifo_phys, GFP_KERNEL);
if (!bchan->fifo_virt) { if (!bchan->fifo_virt) {
...@@ -538,7 +538,7 @@ static void bam_free_chan(struct dma_chan *chan) ...@@ -538,7 +538,7 @@ static void bam_free_chan(struct dma_chan *chan)
bam_reset_channel(bchan); bam_reset_channel(bchan);
spin_unlock_irqrestore(&bchan->vc.lock, flags); spin_unlock_irqrestore(&bchan->vc.lock, flags);
dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt, dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
bchan->fifo_phys); bchan->fifo_phys);
bchan->fifo_virt = NULL; bchan->fifo_virt = NULL;
...@@ -1231,7 +1231,7 @@ static int bam_dma_remove(struct platform_device *pdev) ...@@ -1231,7 +1231,7 @@ static int bam_dma_remove(struct platform_device *pdev)
bam_dma_terminate_all(&bdev->channels[i].vc.chan); bam_dma_terminate_all(&bdev->channels[i].vc.chan);
tasklet_kill(&bdev->channels[i].vc.task); tasklet_kill(&bdev->channels[i].vc.task);
dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
bdev->channels[i].fifo_virt, bdev->channels[i].fifo_virt,
bdev->channels[i].fifo_phys); bdev->channels[i].fifo_phys);
} }
......
...@@ -109,8 +109,8 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, ...@@ -109,8 +109,8 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
if (IS_ERR(cma_obj)) if (IS_ERR(cma_obj))
return cma_obj; return cma_obj;
cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
&cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); GFP_KERNEL | __GFP_NOWARN);
if (!cma_obj->vaddr) { if (!cma_obj->vaddr) {
dev_err(drm->dev, "failed to allocate buffer with size %zu\n", dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
size); size);
...@@ -192,7 +192,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) ...@@ -192,7 +192,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
cma_obj = to_drm_gem_cma_obj(gem_obj); cma_obj = to_drm_gem_cma_obj(gem_obj);
if (cma_obj->vaddr) { if (cma_obj->vaddr) {
dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size, dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
cma_obj->vaddr, cma_obj->paddr); cma_obj->vaddr, cma_obj->paddr);
} else if (gem_obj->import_attach) { } else if (gem_obj->import_attach) {
drm_prime_gem_destroy(gem_obj, cma_obj->sgt); drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
...@@ -324,9 +324,8 @@ static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj, ...@@ -324,9 +324,8 @@ static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
vma->vm_flags &= ~VM_PFNMAP; vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0; vma->vm_pgoff = 0;
ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma, ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
cma_obj->vaddr, cma_obj->paddr, cma_obj->paddr, vma->vm_end - vma->vm_start);
vma->vm_end - vma->vm_start);
if (ret) if (ret)
drm_gem_vm_close(vma); drm_gem_vm_close(vma);
......
...@@ -1113,7 +1113,7 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size, ...@@ -1113,7 +1113,7 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
if (!cmdbuf) if (!cmdbuf)
return NULL; return NULL;
cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr, cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
GFP_KERNEL); GFP_KERNEL);
if (!cmdbuf->vaddr) { if (!cmdbuf->vaddr) {
kfree(cmdbuf); kfree(cmdbuf);
...@@ -1128,8 +1128,8 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size, ...@@ -1128,8 +1128,8 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf) void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
{ {
dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size, dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
cmdbuf->vaddr, cmdbuf->paddr); cmdbuf->paddr);
kfree(cmdbuf); kfree(cmdbuf);
} }
......
...@@ -573,10 +573,9 @@ static int omap_dmm_remove(struct platform_device *dev) ...@@ -573,10 +573,9 @@ static int omap_dmm_remove(struct platform_device *dev)
kfree(omap_dmm->engines); kfree(omap_dmm->engines);
if (omap_dmm->refill_va) if (omap_dmm->refill_va)
dma_free_writecombine(omap_dmm->dev, dma_free_wc(omap_dmm->dev,
REFILL_BUFFER_SIZE * omap_dmm->num_engines, REFILL_BUFFER_SIZE * omap_dmm->num_engines,
omap_dmm->refill_va, omap_dmm->refill_va, omap_dmm->refill_pa);
omap_dmm->refill_pa);
if (omap_dmm->dummy_page) if (omap_dmm->dummy_page)
__free_page(omap_dmm->dummy_page); __free_page(omap_dmm->dummy_page);
...@@ -701,7 +700,7 @@ static int omap_dmm_probe(struct platform_device *dev) ...@@ -701,7 +700,7 @@ static int omap_dmm_probe(struct platform_device *dev)
omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page); omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
/* alloc refill memory */ /* alloc refill memory */
omap_dmm->refill_va = dma_alloc_writecombine(&dev->dev, omap_dmm->refill_va = dma_alloc_wc(&dev->dev,
REFILL_BUFFER_SIZE * omap_dmm->num_engines, REFILL_BUFFER_SIZE * omap_dmm->num_engines,
&omap_dmm->refill_pa, GFP_KERNEL); &omap_dmm->refill_pa, GFP_KERNEL);
if (!omap_dmm->refill_va) { if (!omap_dmm->refill_va) {
......
...@@ -1330,8 +1330,8 @@ void omap_gem_free_object(struct drm_gem_object *obj) ...@@ -1330,8 +1330,8 @@ void omap_gem_free_object(struct drm_gem_object *obj)
omap_gem_detach_pages(obj); omap_gem_detach_pages(obj);
if (!is_shmem(obj)) { if (!is_shmem(obj)) {
dma_free_writecombine(dev->dev, obj->size, dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
omap_obj->vaddr, omap_obj->paddr); omap_obj->paddr);
} else if (omap_obj->vaddr) { } else if (omap_obj->vaddr) {
vunmap(omap_obj->vaddr); vunmap(omap_obj->vaddr);
} }
...@@ -1395,7 +1395,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, ...@@ -1395,7 +1395,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
/* attempt to allocate contiguous memory if we don't /* attempt to allocate contiguous memory if we don't
* have DMM for remappign discontiguous buffers * have DMM for remappign discontiguous buffers
*/ */
omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size, omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
&omap_obj->paddr, GFP_KERNEL); &omap_obj->paddr, GFP_KERNEL);
if (!omap_obj->vaddr) { if (!omap_obj->vaddr) {
kfree(omap_obj); kfree(omap_obj);
......
...@@ -157,14 +157,12 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane, ...@@ -157,14 +157,12 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
cursor->height = src_h; cursor->height = src_h;
if (cursor->pixmap.base) if (cursor->pixmap.base)
dma_free_writecombine(cursor->dev, dma_free_wc(cursor->dev, cursor->pixmap.size,
cursor->pixmap.size, cursor->pixmap.base, cursor->pixmap.paddr);
cursor->pixmap.base,
cursor->pixmap.paddr);
cursor->pixmap.size = cursor->width * cursor->height; cursor->pixmap.size = cursor->width * cursor->height;
cursor->pixmap.base = dma_alloc_writecombine(cursor->dev, cursor->pixmap.base = dma_alloc_wc(cursor->dev,
cursor->pixmap.size, cursor->pixmap.size,
&cursor->pixmap.paddr, &cursor->pixmap.paddr,
GFP_KERNEL | GFP_DMA); GFP_KERNEL | GFP_DMA);
...@@ -252,7 +250,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, ...@@ -252,7 +250,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
/* Allocate clut buffer */ /* Allocate clut buffer */
size = 0x100 * sizeof(unsigned short); size = 0x100 * sizeof(unsigned short);
cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr, cursor->clut = dma_alloc_wc(dev, size, &cursor->clut_paddr,
GFP_KERNEL | GFP_DMA); GFP_KERNEL | GFP_DMA);
if (!cursor->clut) { if (!cursor->clut) {
...@@ -286,7 +284,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, ...@@ -286,7 +284,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
return &cursor->plane.drm_plane; return &cursor->plane.drm_plane;
err_plane: err_plane:
dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr); dma_free_wc(dev, size, cursor->clut, cursor->clut_paddr);
err_clut: err_clut:
devm_kfree(dev, cursor); devm_kfree(dev, cursor);
return NULL; return NULL;
......
...@@ -312,8 +312,7 @@ static void sti_gdp_init(struct sti_gdp *gdp) ...@@ -312,8 +312,7 @@ static void sti_gdp_init(struct sti_gdp *gdp)
/* Allocate all the nodes within a single memory page */ /* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) * size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK; GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
base = dma_alloc_writecombine(gdp->dev, base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL | GFP_DMA);
size, &dma_addr, GFP_KERNEL | GFP_DMA);
if (!base) { if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n"); DRM_ERROR("Failed to allocate memory for GDP node\n");
......
...@@ -617,7 +617,7 @@ static void sti_hqvdp_init(struct sti_hqvdp *hqvdp) ...@@ -617,7 +617,7 @@ static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
/* Allocate memory for the VDP commands */ /* Allocate memory for the VDP commands */
size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd); size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
hqvdp->hqvdp_cmd = dma_alloc_writecombine(hqvdp->dev, size, hqvdp->hqvdp_cmd = dma_alloc_wc(hqvdp->dev, size,
&hqvdp->hqvdp_cmd_paddr, &hqvdp->hqvdp_cmd_paddr,
GFP_KERNEL | GFP_DMA); GFP_KERNEL | GFP_DMA);
if (!hqvdp->hqvdp_cmd) { if (!hqvdp->hqvdp_cmd) {
......
...@@ -175,8 +175,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) ...@@ -175,8 +175,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
sg_free_table(bo->sgt); sg_free_table(bo->sgt);
kfree(bo->sgt); kfree(bo->sgt);
} else if (bo->vaddr) { } else if (bo->vaddr) {
dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
bo->paddr);
} }
} }
...@@ -233,7 +232,7 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) ...@@ -233,7 +232,7 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
} else { } else {
size_t size = bo->gem.size; size_t size = bo->gem.size;
bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
GFP_KERNEL | __GFP_NOWARN); GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) { if (!bo->vaddr) {
dev_err(drm->dev, dev_err(drm->dev,
...@@ -472,8 +471,8 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -472,8 +471,8 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP; vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0; vma->vm_pgoff = 0;
ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr, ret = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
bo->paddr, gem->size); gem->size);
if (ret) { if (ret) {
drm_gem_vm_close(vma); drm_gem_vm_close(vma);
return ret; return ret;
......
...@@ -398,9 +398,8 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -398,9 +398,8 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP; vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0; vma->vm_pgoff = 0;
ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma, ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
bo->base.vaddr, bo->base.paddr, bo->base.paddr, vma->vm_end - vma->vm_start);
vma->vm_end - vma->vm_start);
if (ret) if (ret)
drm_gem_vm_close(vma); drm_gem_vm_close(vma);
......
...@@ -52,8 +52,8 @@ static void host1x_pushbuffer_destroy(struct push_buffer *pb) ...@@ -52,8 +52,8 @@ static void host1x_pushbuffer_destroy(struct push_buffer *pb)
struct host1x *host1x = cdma_to_host1x(cdma); struct host1x *host1x = cdma_to_host1x(cdma);
if (pb->phys != 0) if (pb->phys != 0)
dma_free_writecombine(host1x->dev, pb->size_bytes + 4, dma_free_wc(host1x->dev, pb->size_bytes + 4, pb->mapped,
pb->mapped, pb->phys); pb->phys);
pb->mapped = NULL; pb->mapped = NULL;
pb->phys = 0; pb->phys = 0;
...@@ -76,8 +76,8 @@ static int host1x_pushbuffer_init(struct push_buffer *pb) ...@@ -76,8 +76,8 @@ static int host1x_pushbuffer_init(struct push_buffer *pb)
pb->pos = 0; pb->pos = 0;
/* allocate and map pushbuffer memory */ /* allocate and map pushbuffer memory */
pb->mapped = dma_alloc_writecombine(host1x->dev, pb->size_bytes + 4, pb->mapped = dma_alloc_wc(host1x->dev, pb->size_bytes + 4, &pb->phys,
&pb->phys, GFP_KERNEL); GFP_KERNEL);
if (!pb->mapped) if (!pb->mapped)
goto fail; goto fail;
......
...@@ -467,8 +467,7 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev) ...@@ -467,8 +467,7 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
size += g->words * sizeof(u32); size += g->words * sizeof(u32);
} }
job->gather_copy_mapped = dma_alloc_writecombine(dev, size, job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
&job->gather_copy,
GFP_KERNEL); GFP_KERNEL);
if (!job->gather_copy_mapped) { if (!job->gather_copy_mapped) {
job->gather_copy_mapped = NULL; job->gather_copy_mapped = NULL;
...@@ -578,9 +577,8 @@ void host1x_job_unpin(struct host1x_job *job) ...@@ -578,9 +577,8 @@ void host1x_job_unpin(struct host1x_job *job)
job->num_unpins = 0; job->num_unpins = 0;
if (job->gather_copy_size) if (job->gather_copy_size)
dma_free_writecombine(job->channel->dev, job->gather_copy_size, dma_free_wc(job->channel->dev, job->gather_copy_size,
job->gather_copy_mapped, job->gather_copy_mapped, job->gather_copy);
job->gather_copy);
} }
EXPORT_SYMBOL(host1x_job_unpin); EXPORT_SYMBOL(host1x_job_unpin);
......
...@@ -1455,8 +1455,8 @@ static int coda_alloc_bitstream_buffer(struct coda_ctx *ctx, ...@@ -1455,8 +1455,8 @@ static int coda_alloc_bitstream_buffer(struct coda_ctx *ctx,
return 0; return 0;
ctx->bitstream.size = roundup_pow_of_two(q_data->sizeimage * 2); ctx->bitstream.size = roundup_pow_of_two(q_data->sizeimage * 2);
ctx->bitstream.vaddr = dma_alloc_writecombine( ctx->bitstream.vaddr = dma_alloc_wc(&ctx->dev->plat_dev->dev,
&ctx->dev->plat_dev->dev, ctx->bitstream.size, ctx->bitstream.size,
&ctx->bitstream.paddr, GFP_KERNEL); &ctx->bitstream.paddr, GFP_KERNEL);
if (!ctx->bitstream.vaddr) { if (!ctx->bitstream.vaddr) {
v4l2_err(&ctx->dev->v4l2_dev, v4l2_err(&ctx->dev->v4l2_dev,
...@@ -1474,7 +1474,7 @@ static void coda_free_bitstream_buffer(struct coda_ctx *ctx) ...@@ -1474,7 +1474,7 @@ static void coda_free_bitstream_buffer(struct coda_ctx *ctx)
if (ctx->bitstream.vaddr == NULL) if (ctx->bitstream.vaddr == NULL)
return; return;
dma_free_writecombine(&ctx->dev->plat_dev->dev, ctx->bitstream.size, dma_free_wc(&ctx->dev->plat_dev->dev, ctx->bitstream.size,
ctx->bitstream.vaddr, ctx->bitstream.paddr); ctx->bitstream.vaddr, ctx->bitstream.paddr);
ctx->bitstream.vaddr = NULL; ctx->bitstream.vaddr = NULL;
kfifo_init(&ctx->bitstream_fifo, NULL, 0); kfifo_init(&ctx->bitstream_fifo, NULL, 0);
......
...@@ -1040,7 +1040,7 @@ static int acornfb_probe(struct platform_device *dev) ...@@ -1040,7 +1040,7 @@ static int acornfb_probe(struct platform_device *dev)
* for the framebuffer if we are not using * for the framebuffer if we are not using
* VRAM. * VRAM.
*/ */
base = dma_alloc_writecombine(current_par.dev, size, &handle, base = dma_alloc_wc(current_par.dev, size, &handle,
GFP_KERNEL); GFP_KERNEL);
if (base == NULL) { if (base == NULL) {
printk(KERN_ERR "acornfb: unable to allocate screen " printk(KERN_ERR "acornfb: unable to allocate screen "
......
...@@ -154,8 +154,8 @@ int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize) ...@@ -154,8 +154,8 @@ int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize)
{ {
dma_addr_t dma; dma_addr_t dma;
fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, framesize, fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, framesize, &dma,
&dma, GFP_KERNEL); GFP_KERNEL);
if (!fb->fb.screen_base) { if (!fb->fb.screen_base) {
pr_err("CLCD: unable to map framebuffer\n"); pr_err("CLCD: unable to map framebuffer\n");
return -ENOMEM; return -ENOMEM;
...@@ -169,14 +169,12 @@ int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize) ...@@ -169,14 +169,12 @@ int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize)
int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vma) int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vma)
{ {
return dma_mmap_writecombine(&fb->dev->dev, vma, return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
fb->fb.screen_base, fb->fb.fix.smem_start, fb->fb.fix.smem_len);
fb->fb.fix.smem_start,
fb->fb.fix.smem_len);
} }
void versatile_clcd_remove_dma(struct clcd_fb *fb) void versatile_clcd_remove_dma(struct clcd_fb *fb)
{ {
dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len, dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
fb->fb.screen_base, fb->fb.fix.smem_start); fb->fb.fix.smem_start);
} }
...@@ -774,7 +774,7 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb) ...@@ -774,7 +774,7 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb)
static int clcdfb_of_dma_mmap(struct clcd_fb *fb, struct vm_area_struct *vma) static int clcdfb_of_dma_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
{ {
return dma_mmap_writecombine(&fb->dev->dev, vma, fb->fb.screen_base, return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
fb->fb.fix.smem_start, fb->fb.fix.smem_len); fb->fb.fix.smem_start, fb->fb.fix.smem_len);
} }
......
...@@ -414,8 +414,8 @@ static inline void atmel_lcdfb_free_video_memory(struct atmel_lcdfb_info *sinfo) ...@@ -414,8 +414,8 @@ static inline void atmel_lcdfb_free_video_memory(struct atmel_lcdfb_info *sinfo)
{ {
struct fb_info *info = sinfo->info; struct fb_info *info = sinfo->info;
dma_free_writecombine(info->device, info->fix.smem_len, dma_free_wc(info->device, info->fix.smem_len, info->screen_base,
info->screen_base, info->fix.smem_start); info->fix.smem_start);
} }
/** /**
...@@ -435,8 +435,9 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo) ...@@ -435,8 +435,9 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
* ((var->bits_per_pixel + 7) / 8)); * ((var->bits_per_pixel + 7) / 8));
info->fix.smem_len = max(smem_len, sinfo->smem_len); info->fix.smem_len = max(smem_len, sinfo->smem_len);
info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len, info->screen_base = dma_alloc_wc(info->device, info->fix.smem_len,
(dma_addr_t *)&info->fix.smem_start, GFP_KERNEL); (dma_addr_t *)&info->fix.smem_start,
GFP_KERNEL);
if (!info->screen_base) { if (!info->screen_base) {
return -ENOMEM; return -ENOMEM;
......
...@@ -316,9 +316,8 @@ static int ep93xxfb_mmap(struct fb_info *info, struct vm_area_struct *vma) ...@@ -316,9 +316,8 @@ static int ep93xxfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
unsigned int offset = vma->vm_pgoff << PAGE_SHIFT; unsigned int offset = vma->vm_pgoff << PAGE_SHIFT;
if (offset < info->fix.smem_len) { if (offset < info->fix.smem_len) {
return dma_mmap_writecombine(info->dev, vma, info->screen_base, return dma_mmap_wc(info->dev, vma, info->screen_base,
info->fix.smem_start, info->fix.smem_start, info->fix.smem_len);
info->fix.smem_len);
} }
return -EINVAL; return -EINVAL;
...@@ -428,8 +427,7 @@ static int ep93xxfb_alloc_videomem(struct fb_info *info) ...@@ -428,8 +427,7 @@ static int ep93xxfb_alloc_videomem(struct fb_info *info)
/* Maximum 16bpp -> used memory is maximum x*y*2 bytes */ /* Maximum 16bpp -> used memory is maximum x*y*2 bytes */
fb_size = EP93XXFB_MAX_XRES * EP93XXFB_MAX_YRES * 2; fb_size = EP93XXFB_MAX_XRES * EP93XXFB_MAX_YRES * 2;
virt_addr = dma_alloc_writecombine(info->dev, fb_size, virt_addr = dma_alloc_wc(info->dev, fb_size, &phys_addr, GFP_KERNEL);
&phys_addr, GFP_KERNEL);
if (!virt_addr) if (!virt_addr)
return -ENOMEM; return -ENOMEM;
......
...@@ -1185,8 +1185,8 @@ static int gbefb_probe(struct platform_device *p_dev) ...@@ -1185,8 +1185,8 @@ static int gbefb_probe(struct platform_device *p_dev)
} else { } else {
/* try to allocate memory with the classical allocator /* try to allocate memory with the classical allocator
* this has high chance to fail on low memory machines */ * this has high chance to fail on low memory machines */
gbe_mem = dma_alloc_writecombine(NULL, gbe_mem_size, gbe_mem = dma_alloc_wc(NULL, gbe_mem_size, &gbe_dma_addr,
&gbe_dma_addr, GFP_KERNEL); GFP_KERNEL);
if (!gbe_mem) { if (!gbe_mem) {
printk(KERN_ERR "gbefb: couldn't allocate framebuffer memory\n"); printk(KERN_ERR "gbefb: couldn't allocate framebuffer memory\n");
ret = -ENOMEM; ret = -ENOMEM;
...@@ -1238,7 +1238,7 @@ static int gbefb_probe(struct platform_device *p_dev) ...@@ -1238,7 +1238,7 @@ static int gbefb_probe(struct platform_device *p_dev)
out_gbe_unmap: out_gbe_unmap:
arch_phys_wc_del(par->wc_cookie); arch_phys_wc_del(par->wc_cookie);
if (gbe_dma_addr) if (gbe_dma_addr)
dma_free_writecombine(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys); dma_free_wc(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
out_tiles_free: out_tiles_free:
dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t), dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
(void *)gbe_tiles.cpu, gbe_tiles.dma); (void *)gbe_tiles.cpu, gbe_tiles.dma);
...@@ -1259,7 +1259,7 @@ static int gbefb_remove(struct platform_device* p_dev) ...@@ -1259,7 +1259,7 @@ static int gbefb_remove(struct platform_device* p_dev)
gbe_turn_off(); gbe_turn_off();
arch_phys_wc_del(par->wc_cookie); arch_phys_wc_del(par->wc_cookie);
if (gbe_dma_addr) if (gbe_dma_addr)
dma_free_writecombine(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys); dma_free_wc(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t), dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
(void *)gbe_tiles.cpu, gbe_tiles.dma); (void *)gbe_tiles.cpu, gbe_tiles.dma);
release_mem_region(GBE_BASE, sizeof(struct sgi_gbe)); release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
......
...@@ -937,7 +937,7 @@ static int imxfb_probe(struct platform_device *pdev) ...@@ -937,7 +937,7 @@ static int imxfb_probe(struct platform_device *pdev)
} }
fbi->map_size = PAGE_ALIGN(info->fix.smem_len); fbi->map_size = PAGE_ALIGN(info->fix.smem_len);
info->screen_base = dma_alloc_writecombine(&pdev->dev, fbi->map_size, info->screen_base = dma_alloc_wc(&pdev->dev, fbi->map_size,
&fbi->map_dma, GFP_KERNEL); &fbi->map_dma, GFP_KERNEL);
if (!info->screen_base) { if (!info->screen_base) {
...@@ -1005,7 +1005,7 @@ static int imxfb_probe(struct platform_device *pdev) ...@@ -1005,7 +1005,7 @@ static int imxfb_probe(struct platform_device *pdev)
if (pdata && pdata->exit) if (pdata && pdata->exit)
pdata->exit(fbi->pdev); pdata->exit(fbi->pdev);
failed_platform_init: failed_platform_init:
dma_free_writecombine(&pdev->dev, fbi->map_size, info->screen_base, dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
fbi->map_dma); fbi->map_dma);
failed_map: failed_map:
iounmap(fbi->regs); iounmap(fbi->regs);
...@@ -1041,7 +1041,7 @@ static int imxfb_remove(struct platform_device *pdev) ...@@ -1041,7 +1041,7 @@ static int imxfb_remove(struct platform_device *pdev)
kfree(info->pseudo_palette); kfree(info->pseudo_palette);
framebuffer_release(info); framebuffer_release(info);
dma_free_writecombine(&pdev->dev, fbi->map_size, info->screen_base, dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
fbi->map_dma); fbi->map_dma);
iounmap(fbi->regs); iounmap(fbi->regs);
......
...@@ -1336,9 +1336,8 @@ static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len, ...@@ -1336,9 +1336,8 @@ static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len,
int retval = 0; int retval = 0;
dma_addr_t addr; dma_addr_t addr;
fbi->screen_base = dma_alloc_writecombine(fbi->device, fbi->screen_base = dma_alloc_wc(fbi->device, mem_len, &addr,
mem_len, GFP_DMA | GFP_KERNEL);
&addr, GFP_DMA | GFP_KERNEL);
if (!fbi->screen_base) { if (!fbi->screen_base) {
dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n", dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
...@@ -1378,8 +1377,8 @@ static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len, ...@@ -1378,8 +1377,8 @@ static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len,
*/ */
static int mx3fb_unmap_video_memory(struct fb_info *fbi) static int mx3fb_unmap_video_memory(struct fb_info *fbi)
{ {
dma_free_writecombine(fbi->device, fbi->fix.smem_len, dma_free_wc(fbi->device, fbi->fix.smem_len, fbi->screen_base,
fbi->screen_base, fbi->fix.smem_start); fbi->fix.smem_start);
fbi->screen_base = NULL; fbi->screen_base = NULL;
mutex_lock(&fbi->mm_lock); mutex_lock(&fbi->mm_lock);
......
...@@ -396,8 +396,8 @@ static int nuc900fb_map_video_memory(struct fb_info *info) ...@@ -396,8 +396,8 @@ static int nuc900fb_map_video_memory(struct fb_info *info)
dev_dbg(fbi->dev, "nuc900fb_map_video_memory(fbi=%p) map_size %lu\n", dev_dbg(fbi->dev, "nuc900fb_map_video_memory(fbi=%p) map_size %lu\n",
fbi, map_size); fbi, map_size);
info->screen_base = dma_alloc_writecombine(fbi->dev, map_size, info->screen_base = dma_alloc_wc(fbi->dev, map_size, &map_dma,
&map_dma, GFP_KERNEL); GFP_KERNEL);
if (!info->screen_base) if (!info->screen_base)
return -ENOMEM; return -ENOMEM;
...@@ -411,7 +411,7 @@ static int nuc900fb_map_video_memory(struct fb_info *info) ...@@ -411,7 +411,7 @@ static int nuc900fb_map_video_memory(struct fb_info *info)
static inline void nuc900fb_unmap_video_memory(struct fb_info *info) static inline void nuc900fb_unmap_video_memory(struct fb_info *info)
{ {
struct nuc900fb_info *fbi = info->par; struct nuc900fb_info *fbi = info->par;
dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len), dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
info->screen_base, info->fix.smem_start); info->screen_base, info->fix.smem_start);
} }
......
...@@ -612,8 +612,8 @@ static void lcdc_dma_handler(u16 status, void *data) ...@@ -612,8 +612,8 @@ static void lcdc_dma_handler(u16 status, void *data)
static int alloc_palette_ram(void) static int alloc_palette_ram(void)
{ {
lcdc.palette_virt = dma_alloc_writecombine(lcdc.fbdev->dev, lcdc.palette_virt = dma_alloc_wc(lcdc.fbdev->dev, MAX_PALETTE_SIZE,
MAX_PALETTE_SIZE, &lcdc.palette_phys, GFP_KERNEL); &lcdc.palette_phys, GFP_KERNEL);
if (lcdc.palette_virt == NULL) { if (lcdc.palette_virt == NULL) {
dev_err(lcdc.fbdev->dev, "failed to alloc palette memory\n"); dev_err(lcdc.fbdev->dev, "failed to alloc palette memory\n");
return -ENOMEM; return -ENOMEM;
...@@ -625,8 +625,8 @@ static int alloc_palette_ram(void) ...@@ -625,8 +625,8 @@ static int alloc_palette_ram(void)
static void free_palette_ram(void) static void free_palette_ram(void)
{ {
dma_free_writecombine(lcdc.fbdev->dev, MAX_PALETTE_SIZE, dma_free_wc(lcdc.fbdev->dev, MAX_PALETTE_SIZE, lcdc.palette_virt,
lcdc.palette_virt, lcdc.palette_phys); lcdc.palette_phys);
} }
static int alloc_fbmem(struct omapfb_mem_region *region) static int alloc_fbmem(struct omapfb_mem_region *region)
...@@ -642,8 +642,8 @@ static int alloc_fbmem(struct omapfb_mem_region *region) ...@@ -642,8 +642,8 @@ static int alloc_fbmem(struct omapfb_mem_region *region)
if (region->size > frame_size) if (region->size > frame_size)
frame_size = region->size; frame_size = region->size;
lcdc.vram_size = frame_size; lcdc.vram_size = frame_size;
lcdc.vram_virt = dma_alloc_writecombine(lcdc.fbdev->dev, lcdc.vram_virt = dma_alloc_wc(lcdc.fbdev->dev, lcdc.vram_size,
lcdc.vram_size, &lcdc.vram_phys, GFP_KERNEL); &lcdc.vram_phys, GFP_KERNEL);
if (lcdc.vram_virt == NULL) { if (lcdc.vram_virt == NULL) {
dev_err(lcdc.fbdev->dev, "unable to allocate FB DMA memory\n"); dev_err(lcdc.fbdev->dev, "unable to allocate FB DMA memory\n");
return -ENOMEM; return -ENOMEM;
...@@ -660,8 +660,8 @@ static int alloc_fbmem(struct omapfb_mem_region *region) ...@@ -660,8 +660,8 @@ static int alloc_fbmem(struct omapfb_mem_region *region)
static void free_fbmem(void) static void free_fbmem(void)
{ {
dma_free_writecombine(lcdc.fbdev->dev, lcdc.vram_size, dma_free_wc(lcdc.fbdev->dev, lcdc.vram_size, lcdc.vram_virt,
lcdc.vram_virt, lcdc.vram_phys); lcdc.vram_phys);
} }
static int setup_fbmem(struct omapfb_mem_desc *req_md) static int setup_fbmem(struct omapfb_mem_desc *req_md)
......
...@@ -680,7 +680,7 @@ static int pxa168fb_probe(struct platform_device *pdev) ...@@ -680,7 +680,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
*/ */
info->fix.smem_len = PAGE_ALIGN(DEFAULT_FB_SIZE); info->fix.smem_len = PAGE_ALIGN(DEFAULT_FB_SIZE);
info->screen_base = dma_alloc_writecombine(fbi->dev, info->fix.smem_len, info->screen_base = dma_alloc_wc(fbi->dev, info->fix.smem_len,
&fbi->fb_start_dma, GFP_KERNEL); &fbi->fb_start_dma, GFP_KERNEL);
if (info->screen_base == NULL) { if (info->screen_base == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -804,7 +804,7 @@ static int pxa168fb_remove(struct platform_device *pdev) ...@@ -804,7 +804,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len), dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
info->screen_base, info->fix.smem_start); info->screen_base, info->fix.smem_start);
clk_disable(fbi->clk); clk_disable(fbi->clk);
......
...@@ -2446,8 +2446,8 @@ static int pxafb_remove(struct platform_device *dev) ...@@ -2446,8 +2446,8 @@ static int pxafb_remove(struct platform_device *dev)
free_pages_exact(fbi->video_mem, fbi->video_mem_size); free_pages_exact(fbi->video_mem, fbi->video_mem_size);
dma_free_writecombine(&dev->dev, fbi->dma_buff_size, dma_free_wc(&dev->dev, fbi->dma_buff_size, fbi->dma_buff,
fbi->dma_buff, fbi->dma_buff_phys); fbi->dma_buff_phys);
iounmap(fbi->mmio_base); iounmap(fbi->mmio_base);
......
...@@ -1105,8 +1105,7 @@ static int s3c_fb_alloc_memory(struct s3c_fb *sfb, struct s3c_fb_win *win) ...@@ -1105,8 +1105,7 @@ static int s3c_fb_alloc_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
dev_dbg(sfb->dev, "want %u bytes for window\n", size); dev_dbg(sfb->dev, "want %u bytes for window\n", size);
fbi->screen_base = dma_alloc_writecombine(sfb->dev, size, fbi->screen_base = dma_alloc_wc(sfb->dev, size, &map_dma, GFP_KERNEL);
&map_dma, GFP_KERNEL);
if (!fbi->screen_base) if (!fbi->screen_base)
return -ENOMEM; return -ENOMEM;
...@@ -1131,7 +1130,7 @@ static void s3c_fb_free_memory(struct s3c_fb *sfb, struct s3c_fb_win *win) ...@@ -1131,7 +1130,7 @@ static void s3c_fb_free_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
struct fb_info *fbi = win->fbinfo; struct fb_info *fbi = win->fbinfo;
if (fbi->screen_base) if (fbi->screen_base)
dma_free_writecombine(sfb->dev, PAGE_ALIGN(fbi->fix.smem_len), dma_free_wc(sfb->dev, PAGE_ALIGN(fbi->fix.smem_len),
fbi->screen_base, fbi->fix.smem_start); fbi->screen_base, fbi->fix.smem_start);
} }
......
...@@ -645,8 +645,8 @@ static int s3c2410fb_map_video_memory(struct fb_info *info) ...@@ -645,8 +645,8 @@ static int s3c2410fb_map_video_memory(struct fb_info *info)
dprintk("map_video_memory(fbi=%p) map_size %u\n", fbi, map_size); dprintk("map_video_memory(fbi=%p) map_size %u\n", fbi, map_size);
info->screen_base = dma_alloc_writecombine(fbi->dev, map_size, info->screen_base = dma_alloc_wc(fbi->dev, map_size, &map_dma,
&map_dma, GFP_KERNEL); GFP_KERNEL);
if (info->screen_base) { if (info->screen_base) {
/* prevent initial garbage on screen */ /* prevent initial garbage on screen */
...@@ -667,7 +667,7 @@ static inline void s3c2410fb_unmap_video_memory(struct fb_info *info) ...@@ -667,7 +667,7 @@ static inline void s3c2410fb_unmap_video_memory(struct fb_info *info)
{ {
struct s3c2410fb_info *fbi = info->par; struct s3c2410fb_info *fbi = info->par;
dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len), dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
info->screen_base, info->fix.smem_start); info->screen_base, info->fix.smem_start);
} }
......
...@@ -567,8 +567,8 @@ static int sa1100fb_mmap(struct fb_info *info, ...@@ -567,8 +567,8 @@ static int sa1100fb_mmap(struct fb_info *info,
if (off < info->fix.smem_len) { if (off < info->fix.smem_len) {
vma->vm_pgoff += 1; /* skip over the palette */ vma->vm_pgoff += 1; /* skip over the palette */
return dma_mmap_writecombine(fbi->dev, vma, fbi->map_cpu, return dma_mmap_wc(fbi->dev, vma, fbi->map_cpu, fbi->map_dma,
fbi->map_dma, fbi->map_size); fbi->map_size);
} }
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
...@@ -1099,8 +1099,8 @@ static int sa1100fb_map_video_memory(struct sa1100fb_info *fbi) ...@@ -1099,8 +1099,8 @@ static int sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
* of the framebuffer. * of the framebuffer.
*/ */
fbi->map_size = PAGE_ALIGN(fbi->fb.fix.smem_len + PAGE_SIZE); fbi->map_size = PAGE_ALIGN(fbi->fb.fix.smem_len + PAGE_SIZE);
fbi->map_cpu = dma_alloc_writecombine(fbi->dev, fbi->map_size, fbi->map_cpu = dma_alloc_wc(fbi->dev, fbi->map_size, &fbi->map_dma,
&fbi->map_dma, GFP_KERNEL); GFP_KERNEL);
if (fbi->map_cpu) { if (fbi->map_cpu) {
fbi->fb.screen_base = fbi->map_cpu + PAGE_SIZE; fbi->fb.screen_base = fbi->map_cpu + PAGE_SIZE;
......
...@@ -641,23 +641,29 @@ static inline void dmam_release_declared_memory(struct device *dev) ...@@ -641,23 +641,29 @@ static inline void dmam_release_declared_memory(struct device *dev)
} }
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
static inline void *dma_alloc_writecombine(struct device *dev, size_t size, static inline void *dma_alloc_wc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp) dma_addr_t *dma_addr, gfp_t gfp)
{ {
DEFINE_DMA_ATTRS(attrs); DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs); return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
} }
#ifndef dma_alloc_writecombine
#define dma_alloc_writecombine dma_alloc_wc
#endif
static inline void dma_free_writecombine(struct device *dev, size_t size, static inline void dma_free_wc(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr) void *cpu_addr, dma_addr_t dma_addr)
{ {
DEFINE_DMA_ATTRS(attrs); DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs); return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
} }
#ifndef dma_free_writecombine
#define dma_free_writecombine dma_free_wc
#endif
static inline int dma_mmap_writecombine(struct device *dev, static inline int dma_mmap_wc(struct device *dev,
struct vm_area_struct *vma, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, void *cpu_addr, dma_addr_t dma_addr,
size_t size) size_t size)
...@@ -666,6 +672,9 @@ static inline int dma_mmap_writecombine(struct device *dev, ...@@ -666,6 +672,9 @@ static inline int dma_mmap_writecombine(struct device *dev,
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
} }
#ifndef dma_mmap_writecombine
#define dma_mmap_writecombine dma_mmap_wc
#endif
#ifdef CONFIG_NEED_DMA_MAP_STATE #ifdef CONFIG_NEED_DMA_MAP_STATE
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
......
...@@ -141,10 +141,8 @@ int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream, ...@@ -141,10 +141,8 @@ int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_runtime *runtime = substream->runtime;
return dma_mmap_writecombine(substream->pcm->card->dev, vma, return dma_mmap_wc(substream->pcm->card->dev, vma, runtime->dma_area,
runtime->dma_area, runtime->dma_addr, runtime->dma_bytes);
runtime->dma_addr,
runtime->dma_bytes);
} }
EXPORT_SYMBOL(pxa2xx_pcm_mmap); EXPORT_SYMBOL(pxa2xx_pcm_mmap);
...@@ -156,8 +154,7 @@ int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) ...@@ -156,8 +154,7 @@ int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev; buf->dev.dev = pcm->card->dev;
buf->private_data = NULL; buf->private_data = NULL;
buf->area = dma_alloc_writecombine(pcm->card->dev, size, buf->area = dma_alloc_wc(pcm->card->dev, size, &buf->addr, GFP_KERNEL);
&buf->addr, GFP_KERNEL);
if (!buf->area) if (!buf->area)
return -ENOMEM; return -ENOMEM;
buf->bytes = size; buf->bytes = size;
...@@ -178,8 +175,7 @@ void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm) ...@@ -178,8 +175,7 @@ void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
buf = &substream->dma_buffer; buf = &substream->dma_buffer;
if (!buf->area) if (!buf->area)
continue; continue;
dma_free_writecombine(pcm->card->dev, buf->bytes, dma_free_wc(pcm->card->dev, buf->bytes, buf->area, buf->addr);
buf->area, buf->addr);
buf->area = NULL; buf->area = NULL;
} }
} }
......
...@@ -217,8 +217,8 @@ static int snd_imx_pcm_mmap(struct snd_pcm_substream *substream, ...@@ -217,8 +217,8 @@ static int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,
struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_runtime *runtime = substream->runtime;
int ret; int ret;
ret = dma_mmap_writecombine(substream->pcm->card->dev, vma, ret = dma_mmap_wc(substream->pcm->card->dev, vma, runtime->dma_area,
runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); runtime->dma_addr, runtime->dma_bytes);
pr_debug("%s: ret: %d %p %pad 0x%08x\n", __func__, ret, pr_debug("%s: ret: %d %p %pad 0x%08x\n", __func__, ret,
runtime->dma_area, runtime->dma_area,
...@@ -247,8 +247,7 @@ static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) ...@@ -247,8 +247,7 @@ static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev; buf->dev.dev = pcm->card->dev;
buf->private_data = NULL; buf->private_data = NULL;
buf->area = dma_alloc_writecombine(pcm->card->dev, size, buf->area = dma_alloc_wc(pcm->card->dev, size, &buf->addr, GFP_KERNEL);
&buf->addr, GFP_KERNEL);
if (!buf->area) if (!buf->area)
return -ENOMEM; return -ENOMEM;
buf->bytes = size; buf->bytes = size;
...@@ -330,8 +329,7 @@ static void imx_pcm_free(struct snd_pcm *pcm) ...@@ -330,8 +329,7 @@ static void imx_pcm_free(struct snd_pcm *pcm)
if (!buf->area) if (!buf->area)
continue; continue;
dma_free_writecombine(pcm->card->dev, buf->bytes, dma_free_wc(pcm->card->dev, buf->bytes, buf->area, buf->addr);
buf->area, buf->addr);
buf->area = NULL; buf->area = NULL;
} }
} }
......
...@@ -267,10 +267,8 @@ static int nuc900_dma_mmap(struct snd_pcm_substream *substream, ...@@ -267,10 +267,8 @@ static int nuc900_dma_mmap(struct snd_pcm_substream *substream,
{ {
struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_runtime *runtime = substream->runtime;
return dma_mmap_writecombine(substream->pcm->card->dev, vma, return dma_mmap_wc(substream->pcm->card->dev, vma, runtime->dma_area,
runtime->dma_area, runtime->dma_addr, runtime->dma_bytes);
runtime->dma_addr,
runtime->dma_bytes);
} }
static struct snd_pcm_ops nuc900_dma_ops = { static struct snd_pcm_ops nuc900_dma_ops = {
......
...@@ -156,10 +156,8 @@ static int omap_pcm_mmap(struct snd_pcm_substream *substream, ...@@ -156,10 +156,8 @@ static int omap_pcm_mmap(struct snd_pcm_substream *substream,
{ {
struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_runtime *runtime = substream->runtime;
return dma_mmap_writecombine(substream->pcm->card->dev, vma, return dma_mmap_wc(substream->pcm->card->dev, vma, runtime->dma_area,
runtime->dma_area, runtime->dma_addr, runtime->dma_bytes);
runtime->dma_addr,
runtime->dma_bytes);
} }
static struct snd_pcm_ops omap_pcm_ops = { static struct snd_pcm_ops omap_pcm_ops = {
...@@ -183,8 +181,7 @@ static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, ...@@ -183,8 +181,7 @@ static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev; buf->dev.dev = pcm->card->dev;
buf->private_data = NULL; buf->private_data = NULL;
buf->area = dma_alloc_writecombine(pcm->card->dev, size, buf->area = dma_alloc_wc(pcm->card->dev, size, &buf->addr, GFP_KERNEL);
&buf->addr, GFP_KERNEL);
if (!buf->area) if (!buf->area)
return -ENOMEM; return -ENOMEM;
...@@ -207,8 +204,7 @@ static void omap_pcm_free_dma_buffers(struct snd_pcm *pcm) ...@@ -207,8 +204,7 @@ static void omap_pcm_free_dma_buffers(struct snd_pcm *pcm)
if (!buf->area) if (!buf->area)
continue; continue;
dma_free_writecombine(pcm->card->dev, buf->bytes, dma_free_wc(pcm->card->dev, buf->bytes, buf->area, buf->addr);
buf->area, buf->addr);
buf->area = NULL; buf->area = NULL;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment