Commit 3b9c6c11 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Linus Torvalds

dma-mapping: remove dma_is_consistent API

Architectures implement dma_is_consistent() in different ways (some
misinterpret the definition of API in DMA-API.txt).  So it hasn't been so
useful for drivers.  We have only one user of the API in tree.  Unlikely
out-of-tree drivers use the API.

Even if we fix dma_is_consistent() in some architectures, it doesn't look
useful at all.  It was invented long ago for some old systems that can't
allocate coherent memory at all.  It's better to export only APIs that are
definitely necessary for drivers.

Let's remove this API.
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d80e0d96
......@@ -455,12 +455,6 @@ Free memory allocated by the nonconsistent API. All parameters must
be identical to those passed in (and returned by
dma_alloc_noncoherent()).
int
dma_is_consistent(struct device *dev, dma_addr_t dma_handle)
Returns true if the device dev is performing consistent DMA on the memory
area pointed to by the dma_handle.
int
dma_get_cache_alignment(void)
......
......@@ -41,7 +41,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
#define dma_cache_sync(dev, va, size, dir) ((void)0)
......
......@@ -144,11 +144,6 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
return 0;
}
static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
{
return !!arch_is_coherent();
}
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
......
......@@ -336,9 +336,4 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return 1;
}
#endif /* __ASM_AVR32_DMA_MAPPING_H */
......@@ -21,7 +21,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_supported(d, m) (1)
#define dma_is_consistent(d, h) (1)
static inline int
dma_set_mask(struct device *dev, u64 dma_mask)
......
......@@ -152,8 +152,6 @@ dma_set_mask(struct device *dev, u64 mask)
return 0;
}
#define dma_is_consistent(d, h) (1)
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
......
......@@ -125,8 +125,6 @@ int dma_set_mask(struct device *dev, u64 mask)
return 0;
}
#define dma_is_consistent(d, h) (1)
static inline
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
......
......@@ -97,6 +97,4 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size,
mb();
}
#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
#endif /* _ASM_IA64_DMA_MAPPING_H */
......@@ -16,11 +16,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
return 0;
}
static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
extern void *dma_alloc_coherent(struct device *, size_t,
dma_addr_t *, gfp_t);
extern void dma_free_coherent(struct device *, size_t,
......
......@@ -106,7 +106,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
......
......@@ -62,8 +62,6 @@ dma_set_mask(struct device *dev, u64 mask)
return 0;
}
extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr);
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
......
......@@ -357,13 +357,6 @@ int dma_supported(struct device *dev, u64 mask)
EXPORT_SYMBOL(dma_supported);
int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return plat_device_is_coherent(dev);
}
EXPORT_SYMBOL(dma_is_consistent);
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
......
......@@ -161,8 +161,6 @@ int dma_set_mask(struct device *dev, u64 mask)
return 0;
}
#define dma_is_consistent(d) (1)
static inline
void dma_cache_sync(void *vaddr, size_t size,
enum dma_data_direction direction)
......
......@@ -184,12 +184,6 @@ dma_set_mask(struct device *dev, u64 mask)
return 0;
}
static inline int
dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return (hppa_dma_ops->dma_sync_single_for_cpu == NULL);
}
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
......
......@@ -209,11 +209,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#ifdef CONFIG_NOT_COHERENT_CACHE
#define dma_is_consistent(d, h) (0)
#else
#define dma_is_consistent(d, h) (1)
#endif
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
......
......@@ -42,12 +42,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#ifdef CONFIG_DMA_COHERENT
#define dma_is_consistent(d, h) (1)
#else
#define dma_is_consistent(d, h) (0)
#endif
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *ops = get_dma_ops(dev);
......
......@@ -11,7 +11,6 @@ extern int dma_supported(struct device *dev, u64 mask);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
extern struct dma_map_ops *dma_ops, pci32_dma_ops;
extern struct bus_type pci_bus_type;
......
......@@ -90,6 +90,4 @@ dma_set_mask(struct device *dev, u64 mask)
return 0;
}
#define dma_is_consistent(d, h) (1)
#endif /* _ASM_TILE_DMA_MAPPING_H */
......@@ -94,7 +94,6 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
static inline int
dma_get_cache_alignment(void)
......
......@@ -54,7 +54,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
extern int dma_supported(struct device *hwdev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask);
......
......@@ -161,8 +161,6 @@ dma_set_mask(struct device *dev, u64 mask)
return 0;
}
#define dma_is_consistent(d, h) (1)
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
......
......@@ -72,9 +72,6 @@ dma_set_mask(struct device *dev, u64 mask);
extern int
dma_get_cache_alignment(void);
extern int
dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
extern void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment