Commit 419e2f18 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-mapping: remove arch_dma_mmap_pgprot

arch_dma_mmap_pgprot is used for two things:

 1) to override the "normal" uncached page attributes for mapping
    memory coherent to devices that can't snoop the CPU caches
 2) to provide the special DMA_ATTR_WRITE_COMBINE semantics on older
    arm systems and some mips platforms

Replace one with the pgprot_dmacoherent macro that is already provided
by arm and much simpler to use, and lift the DMA_ATTR_WRITE_COMBINE
handling to common code with an explicit arch opt-in.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>	# m68k
Acked-by: Paul Burton <paul.burton@mips.com>		# mips
parent b898e50f
...@@ -8,7 +8,7 @@ config ARM ...@@ -8,7 +8,7 @@ config ARM
select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
select ARCH_HAS_DMA_MMAP_PGPROT if SWIOTLB select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KEEPINITRD select ARCH_HAS_KEEPINITRD
......
...@@ -2402,12 +2402,6 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, ...@@ -2402,12 +2402,6 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
return dma_to_pfn(dev, dma_addr); return dma_to_pfn(dev, dma_addr);
} }
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs)
{
return __get_dma_pgprot(attrs, prot);
}
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs) gfp_t gfp, unsigned long attrs)
{ {
......
...@@ -13,7 +13,6 @@ config ARM64 ...@@ -13,7 +13,6 @@ config ARM64
select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_COHERENT_TO_PFN select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_MMAP_PGPROT
select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
......
...@@ -435,6 +435,10 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd) ...@@ -435,6 +435,10 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
#define pgprot_device(prot) \ #define pgprot_device(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
#define __HAVE_PHYS_MEM_ACCESS_PROT #define __HAVE_PHYS_MEM_ACCESS_PROT
struct file; struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
......
...@@ -11,12 +11,6 @@ ...@@ -11,12 +11,6 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs)
{
return pgprot_writecombine(prot);
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
......
...@@ -4,7 +4,6 @@ config M68K ...@@ -4,7 +4,6 @@ config M68K
default y default y
select ARCH_32BIT_OFF_T select ARCH_32BIT_OFF_T
select ARCH_HAS_BINFMT_FLAT select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DMA_MMAP_PGPROT if MMU && !COLDFIRE
select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
select ARCH_MIGHT_HAVE_PC_PARPORT if ISA select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
......
...@@ -169,6 +169,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, ...@@ -169,6 +169,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \ ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \
: (prot))) : (prot)))
pgprot_t pgprot_dmacoherent(pgprot_t prot);
#define pgprot_dmacoherent(prot) pgprot_dmacoherent(prot)
#endif /* CONFIG_COLDFIRE */ #endif /* CONFIG_COLDFIRE */
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -23,8 +23,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) ...@@ -23,8 +23,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
cache_push(page_to_phys(page), size); cache_push(page_to_phys(page), size);
} }
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, pgprot_t pgprot_dmacoherent(pgprot_t prot)
unsigned long attrs)
{ {
if (CPU_IS_040_OR_060) { if (CPU_IS_040_OR_060) {
pgprot_val(prot) &= ~_PAGE_CACHE040; pgprot_val(prot) &= ~_PAGE_CACHE040;
......
...@@ -1119,7 +1119,7 @@ config DMA_PERDEV_COHERENT ...@@ -1119,7 +1119,7 @@ config DMA_PERDEV_COHERENT
config DMA_NONCOHERENT config DMA_NONCOHERENT
bool bool
select ARCH_HAS_DMA_MMAP_PGPROT select ARCH_HAS_DMA_WRITE_COMBINE
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_UNCACHED_SEGMENT select ARCH_HAS_UNCACHED_SEGMENT
select NEED_DMA_MAP_STATE select NEED_DMA_MAP_STATE
......
...@@ -65,14 +65,6 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, ...@@ -65,14 +65,6 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
return page_to_pfn(virt_to_page(cached_kernel_address(cpu_addr))); return page_to_pfn(virt_to_page(cached_kernel_address(cpu_addr)));
} }
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs)
{
if (attrs & DMA_ATTR_WRITE_COMBINE)
return pgprot_writecombine(prot);
return pgprot_noncached(prot);
}
static inline void dma_sync_virt(void *addr, size_t size, static inline void dma_sync_virt(void *addr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#define _LINUX_DMA_NONCOHERENT_H 1 #define _LINUX_DMA_NONCOHERENT_H 1
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <asm/pgtable.h>
#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H #ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
#include <asm/dma-coherence.h> #include <asm/dma-coherence.h>
...@@ -42,10 +43,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, ...@@ -42,10 +43,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs); dma_addr_t dma_addr, unsigned long attrs);
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr); dma_addr_t dma_addr);
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/*
* Page protection so that devices that can't snoop CPU caches can use the
* memory coherently. We default to pgprot_noncached which is usually used
* for ioremap as a safe bet, but architectures can override this with less
* strict semantics if possible.
*/
#ifndef pgprot_dmacoherent
#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
#endif
pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
#else #else
static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
......
...@@ -20,6 +20,15 @@ config ARCH_HAS_DMA_COHERENCE_H ...@@ -20,6 +20,15 @@ config ARCH_HAS_DMA_COHERENCE_H
config ARCH_HAS_DMA_SET_MASK config ARCH_HAS_DMA_SET_MASK
bool bool
#
# Select this option if the architecture needs special handling for
# DMA_ATTR_WRITE_COMBINE. Normally the "uncached" mapping should be what
# people thing of when saying write combine, so very few platforms should
# need to enable this.
#
config ARCH_HAS_DMA_WRITE_COMBINE
bool
config DMA_DECLARE_COHERENT config DMA_DECLARE_COHERENT
bool bool
...@@ -45,9 +54,6 @@ config ARCH_HAS_DMA_PREP_COHERENT ...@@ -45,9 +54,6 @@ config ARCH_HAS_DMA_PREP_COHERENT
config ARCH_HAS_DMA_COHERENT_TO_PFN config ARCH_HAS_DMA_COHERENT_TO_PFN
bool bool
config ARCH_HAS_DMA_MMAP_PGPROT
bool
config ARCH_HAS_FORCE_DMA_UNENCRYPTED config ARCH_HAS_FORCE_DMA_UNENCRYPTED
bool bool
......
...@@ -161,9 +161,11 @@ pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) ...@@ -161,9 +161,11 @@ pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
(IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
(attrs & DMA_ATTR_NON_CONSISTENT))) (attrs & DMA_ATTR_NON_CONSISTENT)))
return prot; return prot;
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT)) #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
return arch_dma_mmap_pgprot(dev, prot, attrs); if (attrs & DMA_ATTR_WRITE_COMBINE)
return pgprot_noncached(prot); return pgprot_writecombine(prot);
#endif
return pgprot_dmacoherent(prot);
} }
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment