Commit b49efd76 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-mapping: move dma_mark_clean to dma-direct.h

And unlike the other helpers we don't require a <asm/dma-direct.h> as
this helper is a special case for ia64 only, and this keeps it as
simple as possible.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent ea8c64ac
...@@ -109,8 +109,6 @@ static inline bool is_device_dma_coherent(struct device *dev) ...@@ -109,8 +109,6 @@ static inline bool is_device_dma_coherent(struct device *dev)
return dev->archdata.dma_coherent; return dev->archdata.dma_coherent;
} }
static inline void dma_mark_clean(void *addr, size_t size) { }
/** /**
* arm_dma_alloc - allocate consistent memory for DMA * arm_dma_alloc - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
......
...@@ -50,9 +50,5 @@ static inline bool is_device_dma_coherent(struct device *dev) ...@@ -50,9 +50,5 @@ static inline bool is_device_dma_coherent(struct device *dev)
return dev->archdata.dma_coherent; return dev->archdata.dma_coherent;
} }
static inline void dma_mark_clean(void *addr, size_t size)
{
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASM_DMA_MAPPING_H */ #endif /* __ASM_DMA_MAPPING_H */
...@@ -33,6 +33,7 @@ config IA64 ...@@ -33,6 +33,7 @@ config IA64
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP select HAVE_MEMBLOCK_NODE_MAP
select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING
select ARCH_HAS_DMA_MARK_CLEAN
select ARCH_HAS_SG_CHAIN select ARCH_HAS_SG_CHAIN
select VIRT_TO_BUS select VIRT_TO_BUS
select ARCH_DISCARD_MEMBLOCK select ARCH_DISCARD_MEMBLOCK
......
...@@ -20,6 +20,4 @@ extern unsigned long MAX_DMA_ADDRESS; ...@@ -20,6 +20,4 @@ extern unsigned long MAX_DMA_ADDRESS;
#define free_dma(x) #define free_dma(x)
void dma_mark_clean(void *addr, size_t size);
#endif /* _ASM_IA64_DMA_H */ #endif /* _ASM_IA64_DMA_H */
...@@ -17,8 +17,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -17,8 +17,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return mips_dma_map_ops; return mips_dma_map_ops;
} }
static inline void dma_mark_clean(void *addr, size_t size) {}
#define arch_setup_dma_ops arch_setup_dma_ops #define arch_setup_dma_ops arch_setup_dma_ops
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
u64 size, const struct iommu_ops *iommu, u64 size, const struct iommu_ops *iommu,
......
...@@ -15,8 +15,6 @@ ...@@ -15,8 +15,6 @@
extern const struct dma_map_ops swiotlb_dma_ops; extern const struct dma_map_ops swiotlb_dma_ops;
static inline void dma_mark_clean(void *addr, size_t size) {}
extern unsigned int ppc_swiotlb_enable; extern unsigned int ppc_swiotlb_enable;
int __init swiotlb_setup_bus_notifier(void); int __init swiotlb_setup_bus_notifier(void);
......
...@@ -44,8 +44,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off) ...@@ -44,8 +44,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
dev->archdata.dma_offset = off; dev->archdata.dma_offset = off;
} }
static inline void dma_mark_clean(void *addr, size_t size) {}
#define HAVE_ARCH_DMA_SET_MASK 1 #define HAVE_ARCH_DMA_SET_MASK 1
int dma_set_mask(struct device *dev, u64 mask); int dma_set_mask(struct device *dev, u64 mask);
......
...@@ -25,7 +25,5 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -25,7 +25,5 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &swiotlb_dma_map_ops; return &swiotlb_dma_map_ops;
} }
static inline void dma_mark_clean(void *addr, size_t size) {}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
...@@ -28,8 +28,6 @@ static inline void pci_swiotlb_late_init(void) ...@@ -28,8 +28,6 @@ static inline void pci_swiotlb_late_init(void)
} }
#endif #endif
static inline void dma_mark_clean(void *addr, size_t size) {}
extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags, dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs); unsigned long attrs);
......
...@@ -29,4 +29,13 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) ...@@ -29,4 +29,13 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
return addr + size - 1 <= *dev->dma_mask; return addr + size - 1 <= *dev->dma_mask;
} }
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
void dma_mark_clean(void *addr, size_t size);
#else
static inline void dma_mark_clean(void *addr, size_t size)
{
}
#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
#endif /* _LINUX_DMA_DIRECT_H */ #endif /* _LINUX_DMA_DIRECT_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment