Commit 53b7670e authored by Christoph Hellwig's avatar Christoph Hellwig

sparc: factor the dma coherent mapping into helper

Factor the code to remap memory returned from the DMA coherent allocator
into two helpers that can be shared by the IOMMU and direct mapping code.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarSam Ravnborg <sam@ravnborg.org>
parent 7227b202
...@@ -247,6 +247,53 @@ static void _sparc_free_io(struct resource *res) ...@@ -247,6 +247,53 @@ static void _sparc_free_io(struct resource *res)
release_resource(res); release_resource(res);
} }
static unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len)
{
struct resource *res;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return 0;
res->name = dev->of_node->name;
if (allocate_resource(&_sparc_dvma, res, len, _sparc_dvma.start,
_sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
printk("%s: cannot occupy 0x%zx", __func__, len);
kfree(res);
return 0;
}
return res->start;
}
static bool sparc_dma_free_resource(void *cpu_addr, size_t size)
{
unsigned long addr = (unsigned long)cpu_addr;
struct resource *res;
res = lookup_resource(&_sparc_dvma, addr);
if (!res) {
printk("%s: cannot free %p\n", __func__, cpu_addr);
return false;
}
if ((addr & (PAGE_SIZE - 1)) != 0) {
printk("%s: unaligned va %p\n", __func__, cpu_addr);
return false;
}
size = PAGE_ALIGN(size);
if (resource_size(res) != size) {
printk("%s: region 0x%lx asked 0x%zx\n",
__func__, (long)resource_size(res), size);
return false;
}
release_resource(res);
kfree(res);
return true;
}
#ifdef CONFIG_SBUS #ifdef CONFIG_SBUS
void sbus_set_sbus64(struct device *dev, int x) void sbus_set_sbus64(struct device *dev, int x)
...@@ -264,10 +311,8 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len, ...@@ -264,10 +311,8 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
dma_addr_t *dma_addrp, gfp_t gfp, dma_addr_t *dma_addrp, gfp_t gfp,
unsigned long attrs) unsigned long attrs)
{ {
struct platform_device *op = to_platform_device(dev);
unsigned long len_total = PAGE_ALIGN(len); unsigned long len_total = PAGE_ALIGN(len);
unsigned long va; unsigned long va, addr;
struct resource *res;
int order; int order;
/* XXX why are some lengths signed, others unsigned? */ /* XXX why are some lengths signed, others unsigned? */
...@@ -284,32 +329,23 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len, ...@@ -284,32 +329,23 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
if (va == 0) if (va == 0)
goto err_nopages; goto err_nopages;
if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) addr = sparc_dma_alloc_resource(dev, len_total);
if (!addr)
goto err_nomem; goto err_nomem;
if (allocate_resource(&_sparc_dvma, res, len_total,
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
goto err_nova;
}
// XXX The sbus_map_dma_area does this for us below, see comments. // XXX The sbus_map_dma_area does this for us below, see comments.
// srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total); // srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
/* /*
* XXX That's where sdev would be used. Currently we load * XXX That's where sdev would be used. Currently we load
* all iommu tables with the same translations. * all iommu tables with the same translations.
*/ */
if (sbus_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0) if (sbus_map_dma_area(dev, dma_addrp, va, addr, len_total) != 0)
goto err_noiommu; goto err_noiommu;
res->name = op->dev.of_node->name; return (void *)addr;
return (void *)(unsigned long)res->start;
err_noiommu: err_noiommu:
release_resource(res); sparc_dma_free_resource((void *)addr, len_total);
err_nova:
kfree(res);
err_nomem: err_nomem:
free_pages(va, order); free_pages(va, order);
err_nopages: err_nopages:
...@@ -319,29 +355,11 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len, ...@@ -319,29 +355,11 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
static void sbus_free_coherent(struct device *dev, size_t n, void *p, static void sbus_free_coherent(struct device *dev, size_t n, void *p,
dma_addr_t ba, unsigned long attrs) dma_addr_t ba, unsigned long attrs)
{ {
struct resource *res;
struct page *pgv; struct page *pgv;
if ((res = lookup_resource(&_sparc_dvma,
(unsigned long)p)) == NULL) {
printk("sbus_free_consistent: cannot free %p\n", p);
return;
}
if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
printk("sbus_free_consistent: unaligned va %p\n", p);
return;
}
n = PAGE_ALIGN(n); n = PAGE_ALIGN(n);
if (resource_size(res) != n) { if (!sparc_dma_free_resource(p, n))
printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
(long)resource_size(res), n);
return; return;
}
release_resource(res);
kfree(res);
pgv = virt_to_page(p); pgv = virt_to_page(p);
sbus_unmap_dma_area(dev, ba, n); sbus_unmap_dma_area(dev, ba, n);
...@@ -418,45 +436,30 @@ arch_initcall(sparc_register_ioport); ...@@ -418,45 +436,30 @@ arch_initcall(sparc_register_ioport);
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs) gfp_t gfp, unsigned long attrs)
{ {
unsigned long len_total = PAGE_ALIGN(size); unsigned long addr;
void *va; void *va;
struct resource *res;
int order;
if (size == 0) { if (!size || size > 256 * 1024) /* __get_free_pages() limit */
return NULL; return NULL;
}
if (size > 256*1024) { /* __get_free_pages() limit */
return NULL;
}
order = get_order(len_total); size = PAGE_ALIGN(size);
va = (void *) __get_free_pages(gfp, order); va = (void *) __get_free_pages(gfp, get_order(size));
if (va == NULL) { if (!va) {
printk("%s: no %ld pages\n", __func__, len_total>>PAGE_SHIFT); printk("%s: no %zd pages\n", __func__, size >> PAGE_SHIFT);
goto err_nopages; return NULL;
} }
if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { addr = sparc_dma_alloc_resource(dev, size);
printk("%s: no core\n", __func__); if (!addr)
goto err_nomem; goto err_nomem;
}
if (allocate_resource(&_sparc_dvma, res, len_total, srmmu_mapiorange(0, virt_to_phys(va), addr, size);
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
printk("%s: cannot occupy 0x%lx", __func__, len_total);
goto err_nova;
}
srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
*dma_handle = virt_to_phys(va); *dma_handle = virt_to_phys(va);
return (void *) res->start; return (void *)addr;
err_nova:
kfree(res);
err_nomem: err_nomem:
free_pages((unsigned long)va, order); free_pages((unsigned long)va, get_order(size));
err_nopages:
return NULL; return NULL;
} }
...@@ -471,31 +474,11 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -471,31 +474,11 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs) dma_addr_t dma_addr, unsigned long attrs)
{ {
struct resource *res; if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
if ((res = lookup_resource(&_sparc_dvma,
(unsigned long)cpu_addr)) == NULL) {
printk("%s: cannot free %p\n", __func__, cpu_addr);
return;
}
if (((unsigned long)cpu_addr & (PAGE_SIZE-1)) != 0) {
printk("%s: unaligned va %p\n", __func__, cpu_addr);
return; return;
}
size = PAGE_ALIGN(size);
if (resource_size(res) != size) {
printk("%s: region 0x%lx asked 0x%zx\n", __func__,
(long)resource_size(res), size);
return;
}
dma_make_coherent(dma_addr, size); dma_make_coherent(dma_addr, size);
srmmu_unmapiorange((unsigned long)cpu_addr, size); srmmu_unmapiorange((unsigned long)cpu_addr, size);
release_resource(res);
kfree(res);
free_pages((unsigned long)phys_to_virt(dma_addr), get_order(size)); free_pages((unsigned long)phys_to_virt(dma_addr), get_order(size));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment