Commit 1bd4d8a6 authored by Thomas Petazzoni's avatar Thomas Petazzoni Committed by Andrew Lunn

ARM: mvebu: use arm_coherent_dma_ops and re-enable hardware I/O coherency

Now that we have enabled automatic I/O synchronization barriers, we no
longer need any explicit barriers. We can therefore simplify
arch/arm/mach-mvebu/coherency.c by using the existing
arm_coherent_dma_ops instead of our custom mvebu_hwcc_dma_ops, and
re-enable hardware I/O coherency support.
Signed-off-by: default avatarThomas Petazzoni <thomas.petazzoni@free-electrons.com>
[Andrew Lunn <andrew@lunn.ch>: Remove forgotten comment]
Signed-off-by: default avatarAndrew Lunn <andrew@lunn.ch>
parent a0b5cd4a
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
#include <asm/dma-mapping.h>
#include "coherency.h" #include "coherency.h"
#include "mvebu-soc-id.h" #include "mvebu-soc-id.h"
...@@ -76,54 +77,6 @@ int set_cpu_coherent(void) ...@@ -76,54 +77,6 @@ int set_cpu_coherent(void)
return ll_enable_coherency(); return ll_enable_coherency();
} }
static inline void mvebu_hwcc_sync_io_barrier(void)
{
writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
}
static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
if (dir != DMA_TO_DEVICE)
mvebu_hwcc_sync_io_barrier();
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
if (dir != DMA_TO_DEVICE)
mvebu_hwcc_sync_io_barrier();
}
static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir)
{
if (dir != DMA_TO_DEVICE)
mvebu_hwcc_sync_io_barrier();
}
static struct dma_map_ops mvebu_hwcc_dma_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
.mmap = arm_dma_mmap,
.map_page = mvebu_hwcc_dma_map_page,
.unmap_page = mvebu_hwcc_dma_unmap_page,
.get_sgtable = arm_dma_get_sgtable,
.map_sg = arm_dma_map_sg,
.unmap_sg = arm_dma_unmap_sg,
.sync_single_for_cpu = mvebu_hwcc_dma_sync,
.sync_single_for_device = mvebu_hwcc_dma_sync,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
.set_dma_mask = arm_dma_set_mask,
};
static int mvebu_hwcc_notifier(struct notifier_block *nb, static int mvebu_hwcc_notifier(struct notifier_block *nb,
unsigned long event, void *__dev) unsigned long event, void *__dev)
{ {
...@@ -131,7 +84,7 @@ static int mvebu_hwcc_notifier(struct notifier_block *nb, ...@@ -131,7 +84,7 @@ static int mvebu_hwcc_notifier(struct notifier_block *nb,
if (event != BUS_NOTIFY_ADD_DEVICE) if (event != BUS_NOTIFY_ADD_DEVICE)
return NOTIFY_DONE; return NOTIFY_DONE;
set_dma_ops(dev, &mvebu_hwcc_dma_ops); set_dma_ops(dev, &arm_coherent_dma_ops);
return NOTIFY_OK; return NOTIFY_OK;
} }
...@@ -246,14 +199,9 @@ static int coherency_type(void) ...@@ -246,14 +199,9 @@ static int coherency_type(void)
return type; return type;
} }
/*
* As a precaution, we currently completely disable hardware I/O
* coherency, until enough testing is done with automatic I/O
* synchronization barriers to validate that it is a proper solution.
*/
int coherency_available(void) int coherency_available(void)
{ {
return false; return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
} }
int __init coherency_init(void) int __init coherency_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment