Commit ed0093d9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arc-4.18-final' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC fixes from Vineet Gupta:
 "Another batch of fixes for ARC, this time mainly DMA API rework
  wreckage:

   - Fix software managed DMA wreckage after rework in 4.17 [Euginey]
      * missing cache flush
      * SMP_CACHE_BYTES vs cache_line_size

   - Fix allmodconfig build errors [Randy]

   - Maintainer update for Mellanox (EZChip) NPS platform"

* tag 'arc-4.18-final' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  arc: fix type warnings in arc/mm/cache.c
  arc: fix build errors in arc/include/asm/delay.h
  arc: [plat-eznps] fix printk warning in arc/plat-eznps/mtm.c
  arc: [plat-eznps] fix data type errors in platform headers
  ARC: [plat-eznps] Add missing struct nps_host_reg_aux_dpc
  ARC: add SMP_CACHE_BYTES value validate
  ARC: dma [non-IOC] setup SMP_CACHE_BYTES and cache_line_size
  ARC: dma [non IOC]: fix arc_dma_sync_single_for_(device|cpu)
  ARC: Add Ofer Levi as plat-eznps maintainer
parents 98d7e100 ec837d62
...@@ -5444,6 +5444,7 @@ F: drivers/iommu/exynos-iommu.c ...@@ -5444,6 +5444,7 @@ F: drivers/iommu/exynos-iommu.c
EZchip NPS platform support EZchip NPS platform support
M: Vineet Gupta <vgupta@synopsys.com> M: Vineet Gupta <vgupta@synopsys.com>
M: Ofer Levi <oferle@mellanox.com>
S: Supported S: Supported
F: arch/arc/plat-eznps F: arch/arc/plat-eznps
F: arch/arc/boot/dts/eznps.dts F: arch/arc/boot/dts/eznps.dts
......
...@@ -50,6 +50,9 @@ config ARC ...@@ -50,6 +50,9 @@ config ARC
select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZMA
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
config MIGHT_HAVE_PCI config MIGHT_HAVE_PCI
bool bool
......
...@@ -48,7 +48,9 @@ ...@@ -48,7 +48,9 @@
}) })
/* Largest line length for either L1 or L2 is 128 bytes */ /* Largest line length for either L1 or L2 is 128 bytes */
#define ARCH_DMA_MINALIGN 128 #define SMP_CACHE_BYTES 128
#define cache_line_size() SMP_CACHE_BYTES
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
extern void arc_cache_init(void); extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
......
...@@ -17,8 +17,11 @@ ...@@ -17,8 +17,11 @@
#ifndef __ASM_ARC_UDELAY_H #ifndef __ASM_ARC_UDELAY_H
#define __ASM_ARC_UDELAY_H #define __ASM_ARC_UDELAY_H
#include <asm-generic/types.h>
#include <asm/param.h> /* HZ */ #include <asm/param.h> /* HZ */
extern unsigned long loops_per_jiffy;
static inline void __delay(unsigned long loops) static inline void __delay(unsigned long loops)
{ {
__asm__ __volatile__( __asm__ __volatile__(
......
...@@ -1038,7 +1038,7 @@ void flush_cache_mm(struct mm_struct *mm) ...@@ -1038,7 +1038,7 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
unsigned long pfn) unsigned long pfn)
{ {
unsigned int paddr = pfn << PAGE_SHIFT; phys_addr_t paddr = pfn << PAGE_SHIFT;
u_vaddr &= PAGE_MASK; u_vaddr &= PAGE_MASK;
...@@ -1058,8 +1058,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, ...@@ -1058,8 +1058,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
unsigned long u_vaddr) unsigned long u_vaddr)
{ {
/* TBD: do we really need to clear the kernel mapping */ /* TBD: do we really need to clear the kernel mapping */
__flush_dcache_page(page_address(page), u_vaddr); __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
__flush_dcache_page(page_address(page), page_address(page)); __flush_dcache_page((phys_addr_t)page_address(page),
(phys_addr_t)page_address(page));
} }
...@@ -1246,6 +1247,16 @@ void __init arc_cache_init_master(void) ...@@ -1246,6 +1247,16 @@ void __init arc_cache_init_master(void)
} }
} }
/*
* Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
* or equal to any cache line length.
*/
BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
"SMP_CACHE_BYTES must be >= any cache line length");
if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
panic("L2 Cache line [%d] > kernel Config [%d]\n",
l2_line_sz, SMP_CACHE_BYTES);
/* Note that SLC disable not formally supported till HS 3.0 */ /* Note that SLC disable not formally supported till HS 3.0 */
if (is_isa_arcv2() && l2_line_sz && !slc_enable) if (is_isa_arcv2() && l2_line_sz && !slc_enable)
arc_slc_disable(); arc_slc_disable();
......
...@@ -129,14 +129,59 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -129,14 +129,59 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
return ret; return ret;
} }
/*
* Cache operations depending on function and direction argument, inspired by
* https://lkml.org/lkml/2018/5/18/979
* "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
* dma-mapping: provide a generic dma-noncoherent implementation)"
*
* | map == for_device | unmap == for_cpu
* |----------------------------------------------------------------
* TO_DEV | writeback writeback | none none
* FROM_DEV | invalidate invalidate | invalidate* invalidate*
* BIDIR | writeback+inv writeback+inv | invalidate invalidate
*
* [*] needed for CPU speculative prefetches
*
* NOTE: we don't check the validity of direction argument as it is done in
* upper layer functions (in include/linux/dma-mapping.h)
*/
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
dma_cache_wback(paddr, size); switch (dir) {
case DMA_TO_DEVICE:
dma_cache_wback(paddr, size);
break;
case DMA_FROM_DEVICE:
dma_cache_inv(paddr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(paddr, size);
break;
default:
break;
}
} }
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
dma_cache_inv(paddr, size); switch (dir) {
case DMA_TO_DEVICE:
break;
/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
dma_cache_inv(paddr, size);
break;
default:
break;
}
} }
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#error "Incorrect ctop.h include" #error "Incorrect ctop.h include"
#endif #endif
#include <linux/types.h>
#include <soc/nps/common.h> #include <soc/nps/common.h>
/* core auxiliary registers */ /* core auxiliary registers */
...@@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst { ...@@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst {
}; };
/* AUX registers definition */ /* AUX registers definition */
struct nps_host_reg_aux_dpc {
union {
struct {
u32 ien:1, men:1, hen:1, reserved:29;
};
u32 value;
};
};
struct nps_host_reg_aux_udmc { struct nps_host_reg_aux_udmc {
union { union {
struct { struct {
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
*/ */
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <asm/arcregs.h> #include <asm/arcregs.h>
...@@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu) ...@@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu)
/* Verify and set the value of the mtm hs counter */ /* Verify and set the value of the mtm hs counter */
static int __init set_mtm_hs_ctr(char *ctr_str) static int __init set_mtm_hs_ctr(char *ctr_str)
{ {
long hs_ctr; int hs_ctr;
int ret; int ret;
ret = kstrtol(ctr_str, 0, &hs_ctr); ret = kstrtoint(ctr_str, 0, &hs_ctr);
if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) { if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) {
pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n", pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment