Commit 82abb273 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS updates from Ralf Baechle:
 - three fixes for 3.15 that didn't make it in time
 - limited Octeon 3 support.
 - paravirtualization support
 - improvment to platform support for Netlogix SOCs.
 - add support for powering down the Malta eval board in software
 - add many instructions to the in-kernel microassembler.
 - add support for the BPF JIT.
 - minor cleanups of the BCM47xx code.
 - large cleanup of math emu code resulting in significant code size
   reduction, better readability of the code and more accurate
   emulation.
 - improvments to the MIPS CPS code.
 - support C3 power status for the R4k count/compare clock device.
 - improvments to the GIO support for older SGI workstations.
 - increase number of supported CPUs to 256; this can be reached on
   certain embedded multithreaded ccNUMA configurations.
 - various small cleanups, updates and fixes

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (173 commits)
  MIPS: IP22/IP28: Improve GIO support
  MIPS: Octeon: Add twsi interrupt initialization for OCTEON 3XXX, 5XXX, 63XX
  DEC: Document the R4k MB ASIC mini interrupt controller
  DEC: Add self as the maintainer
  MIPS: Add microMIPS MSA support.
  MIPS: Replace calls to obsolete strict_strto call with kstrto* equivalents.
  MIPS: Replace obsolete strict_strto call with kstrto
  MIPS: BFP: Simplify code slightly.
  MIPS: Call find_vma with the mmap_sem held
  MIPS: Fix 'write_msa_##' inline macro.
  MIPS: Fix MSA toolchain support detection.
  mips: Update the email address of Geert Uytterhoeven
  MIPS: Add minimal defconfig for mips_paravirt
  MIPS: Enable build for new system 'paravirt'
  MIPS: paravirt: Add pci controller for virtio
  MIPS: Add code for new system 'paravirt'
  MIPS: Add functions for hypervisor call
  MIPS: OCTEON: Add OCTEON3 to __get_cpu_type
  MIPS: Add function get_ebase_cpunum
  MIPS: Add minimal support for OCTEON3 to c-r4k.c
  ...
parents 9b651cc2 f8647b50
...@@ -2700,6 +2700,15 @@ S: Orphan ...@@ -2700,6 +2700,15 @@ S: Orphan
F: Documentation/networking/decnet.txt F: Documentation/networking/decnet.txt
F: net/decnet/ F: net/decnet/
DECSTATION PLATFORM SUPPORT
M: "Maciej W. Rozycki" <macro@linux-mips.org>
L: linux-mips@linux-mips.org
W: http://www.linux-mips.org/wiki/DECstation
S: Maintained
F: arch/mips/dec/
F: arch/mips/include/asm/dec/
F: arch/mips/include/asm/mach-dec/
DEFXX FDDI NETWORK DRIVER DEFXX FDDI NETWORK DRIVER
M: "Maciej W. Rozycki" <macro@linux-mips.org> M: "Maciej W. Rozycki" <macro@linux-mips.org>
S: Maintained S: Maintained
......
...@@ -16,7 +16,7 @@ obj- := $(platform-) ...@@ -16,7 +16,7 @@ obj- := $(platform-)
obj-y += kernel/ obj-y += kernel/
obj-y += mm/ obj-y += mm/
obj-y += math-emu/ obj-y += net/
ifdef CONFIG_KVM ifdef CONFIG_KVM
obj-y += kvm/ obj-y += kvm/
......
...@@ -18,6 +18,7 @@ platforms += loongson1 ...@@ -18,6 +18,7 @@ platforms += loongson1
platforms += mti-malta platforms += mti-malta
platforms += mti-sead3 platforms += mti-sead3
platforms += netlogic platforms += netlogic
platforms += paravirt
platforms += pmcs-msp71xx platforms += pmcs-msp71xx
platforms += pnx833x platforms += pnx833x
platforms += ralink platforms += ralink
......
This diff is collapsed.
...@@ -79,15 +79,6 @@ config CMDLINE_OVERRIDE ...@@ -79,15 +79,6 @@ config CMDLINE_OVERRIDE
Normally, you will choose 'N' here. Normally, you will choose 'N' here.
config SMTC_IDLE_HOOK_DEBUG
bool "Enable additional debug checks before going into CPU idle loop"
depends on DEBUG_KERNEL && MIPS_MT_SMTC
help
This option enables Enable additional debug checks before going into
CPU idle loop. For details on these checks, see
arch/mips/kernel/smtc.c. This debugging option result in significant
overhead so should be disabled in production kernels.
config SB1XXX_CORELIS config SB1XXX_CORELIS
bool "Corelis Debugger" bool "Corelis Debugger"
depends on SIBYTE_SB1xxx_SOC depends on SIBYTE_SB1xxx_SOC
......
...@@ -120,7 +120,7 @@ cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ ...@@ -120,7 +120,7 @@ cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
-fno-omit-frame-pointer -fno-omit-frame-pointer
ifeq ($(CONFIG_CPU_HAS_MSA),y) ifeq ($(CONFIG_CPU_HAS_MSA),y)
toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -mmsa) toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa)
cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
endif endif
...@@ -251,6 +251,7 @@ OBJCOPYFLAGS += --remove-section=.reginfo ...@@ -251,6 +251,7 @@ OBJCOPYFLAGS += --remove-section=.reginfo
head-y := arch/mips/kernel/head.o head-y := arch/mips/kernel/head.o
libs-y += arch/mips/lib/ libs-y += arch/mips/lib/
libs-y += arch/mips/math-emu/
# See arch/mips/Kbuild for content of core part of the kernel # See arch/mips/Kbuild for content of core part of the kernel
core-y += arch/mips/ core-y += arch/mips/
......
...@@ -49,7 +49,7 @@ void __init prom_init(void) ...@@ -49,7 +49,7 @@ void __init prom_init(void)
prom_init_cmdline(); prom_init_cmdline();
memsize_str = prom_getenv("memsize"); memsize_str = prom_getenv("memsize");
if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize)) if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
memsize = 0x04000000; memsize = 0x04000000;
add_memory_region(0, memsize, BOOT_MEM_RAM); add_memory_region(0, memsize, BOOT_MEM_RAM);
......
...@@ -67,6 +67,12 @@ void __init plat_mem_setup(void) ...@@ -67,6 +67,12 @@ void __init plat_mem_setup(void)
case ALCHEMY_CPU_AU1500: case ALCHEMY_CPU_AU1500:
case ALCHEMY_CPU_AU1100: case ALCHEMY_CPU_AU1100:
coherentio = 0; coherentio = 0;
break;
case ALCHEMY_CPU_AU1200:
/* Au1200 AB USB does not support coherent memory */
if (0 == (read_c0_prid() & PRID_REV_MASK))
coherentio = 0;
break;
} }
board_setup(); /* board specific setup */ board_setup(); /* board specific setup */
......
...@@ -355,47 +355,25 @@ static inline void __au1200_udc_control(void __iomem *base, int enable) ...@@ -355,47 +355,25 @@ static inline void __au1200_udc_control(void __iomem *base, int enable)
} }
} }
static inline int au1200_coherency_bug(void)
{
#if defined(CONFIG_DMA_COHERENT)
/* Au1200 AB USB does not support coherent memory */
if (!(read_c0_prid() & PRID_REV_MASK)) {
printk(KERN_INFO "Au1200 USB: this is chip revision AB !!\n");
printk(KERN_INFO "Au1200 USB: update your board or re-configure"
" the kernel\n");
return -ENODEV;
}
#endif
return 0;
}
static inline int au1200_usb_control(int block, int enable) static inline int au1200_usb_control(int block, int enable)
{ {
void __iomem *base = void __iomem *base =
(void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR); (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR);
int ret = 0;
switch (block) { switch (block) {
case ALCHEMY_USB_OHCI0: case ALCHEMY_USB_OHCI0:
ret = au1200_coherency_bug();
if (ret && enable)
goto out;
__au1200_ohci_control(base, enable); __au1200_ohci_control(base, enable);
break; break;
case ALCHEMY_USB_UDC0: case ALCHEMY_USB_UDC0:
__au1200_udc_control(base, enable); __au1200_udc_control(base, enable);
break; break;
case ALCHEMY_USB_EHCI0: case ALCHEMY_USB_EHCI0:
ret = au1200_coherency_bug();
if (ret && enable)
goto out;
__au1200_ehci_control(base, enable); __au1200_ehci_control(base, enable);
break; break;
default: default:
ret = -ENODEV; return -ENODEV;
} }
out: return 0;
return ret;
} }
......
...@@ -158,7 +158,7 @@ static ssize_t db1x_pmattr_store(struct kobject *kobj, ...@@ -158,7 +158,7 @@ static ssize_t db1x_pmattr_store(struct kobject *kobj,
int tmp; int tmp;
if (ATTRCMP(timer_timeout)) { if (ATTRCMP(timer_timeout)) {
tmp = strict_strtoul(instr, 0, &l); tmp = kstrtoul(instr, 0, &l);
if (tmp) if (tmp)
return tmp; return tmp;
...@@ -181,7 +181,7 @@ static ssize_t db1x_pmattr_store(struct kobject *kobj, ...@@ -181,7 +181,7 @@ static ssize_t db1x_pmattr_store(struct kobject *kobj,
} }
} else if (ATTRCMP(wakemsk)) { } else if (ATTRCMP(wakemsk)) {
tmp = strict_strtoul(instr, 0, &l); tmp = kstrtoul(instr, 0, &l);
if (tmp) if (tmp)
return tmp; return tmp;
......
...@@ -69,15 +69,18 @@ static __init void prom_init_mem(void) ...@@ -69,15 +69,18 @@ static __init void prom_init_mem(void)
* BCM47XX uses 128MB for addressing the ram, if the system contains * BCM47XX uses 128MB for addressing the ram, if the system contains
* less that that amount of ram it remaps the ram more often into the * less that that amount of ram it remaps the ram more often into the
* available space. * available space.
* Accessing memory after 128MB will cause an exception.
* max contains the biggest possible address supported by the platform.
* If the method wants to try something above we assume 128MB ram.
*/ */
off = (unsigned long)prom_init;
max = off | ((128 << 20) - 1); /* Physical address, without mapping to any kernel segment */
for (mem = (1 << 20); mem < (128 << 20); mem += (1 << 20)) { off = CPHYSADDR((unsigned long)prom_init);
if ((off + mem) > max) {
mem = (128 << 20); /* Accessing memory after 128 MiB will cause an exception */
max = 128 << 20;
for (mem = 1 << 20; mem < max; mem += 1 << 20) {
/* Loop condition may be not enough, off may be over 1 MiB */
if (off + mem >= max) {
mem = max;
printk(KERN_DEBUG "assume 128MB RAM\n"); printk(KERN_DEBUG "assume 128MB RAM\n");
break; break;
} }
......
...@@ -10,6 +10,17 @@ config CAVIUM_CN63XXP1 ...@@ -10,6 +10,17 @@ config CAVIUM_CN63XXP1
non-CN63XXP1 hardware, so it is recommended to select "n" non-CN63XXP1 hardware, so it is recommended to select "n"
unless it is known the workarounds are needed. unless it is known the workarounds are needed.
config CAVIUM_OCTEON_CVMSEG_SIZE
int "Number of L1 cache lines reserved for CVMSEG memory"
range 0 54
default 1
help
CVMSEG LM is a segment that accesses portions of the dcache as a
local memory; the larger CVMSEG is, the smaller the cache is.
This selects the size of CVMSEG LM, which is in cache blocks. The
legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is
between zero and 6192 bytes).
endif # CPU_CAVIUM_OCTEON endif # CPU_CAVIUM_OCTEON
if CAVIUM_OCTEON_SOC if CAVIUM_OCTEON_SOC
...@@ -23,17 +34,6 @@ config CAVIUM_OCTEON_2ND_KERNEL ...@@ -23,17 +34,6 @@ config CAVIUM_OCTEON_2ND_KERNEL
with this option to be run at the same time as one built without this with this option to be run at the same time as one built without this
option. option.
config CAVIUM_OCTEON_CVMSEG_SIZE
int "Number of L1 cache lines reserved for CVMSEG memory"
range 0 54
default 1
help
CVMSEG LM is a segment that accesses portions of the dcache as a
local memory; the larger CVMSEG is, the smaller the cache is.
This selects the size of CVMSEG LM, which is in cache blocks. The
legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is
between zero and 6192 bytes).
config CAVIUM_OCTEON_LOCK_L2 config CAVIUM_OCTEON_LOCK_L2
bool "Lock often used kernel code in the L2" bool "Lock often used kernel code in the L2"
default "y" default "y"
...@@ -86,7 +86,6 @@ config SWIOTLB ...@@ -86,7 +86,6 @@ config SWIOTLB
select IOMMU_HELPER select IOMMU_HELPER
select NEED_SG_DMA_LENGTH select NEED_SG_DMA_LENGTH
config OCTEON_ILM config OCTEON_ILM
tristate "Module to measure interrupt latency using Octeon CIU Timer" tristate "Module to measure interrupt latency using Octeon CIU Timer"
help help
......
...@@ -105,6 +105,158 @@ int cvmx_helper_ports_on_interface(int interface) ...@@ -105,6 +105,158 @@ int cvmx_helper_ports_on_interface(int interface)
} }
EXPORT_SYMBOL_GPL(cvmx_helper_ports_on_interface); EXPORT_SYMBOL_GPL(cvmx_helper_ports_on_interface);
/**
* @INTERNAL
* Return interface mode for CN68xx.
*/
static cvmx_helper_interface_mode_t __cvmx_get_mode_cn68xx(int interface)
{
union cvmx_mio_qlmx_cfg qlm_cfg;
switch (interface) {
case 0:
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
/* QLM is disabled when QLM SPD is 15. */
if (qlm_cfg.s.qlm_spd == 15)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
if (qlm_cfg.s.qlm_cfg == 2)
return CVMX_HELPER_INTERFACE_MODE_SGMII;
else if (qlm_cfg.s.qlm_cfg == 3)
return CVMX_HELPER_INTERFACE_MODE_XAUI;
else
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
case 2:
case 3:
case 4:
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(interface));
/* QLM is disabled when QLM SPD is 15. */
if (qlm_cfg.s.qlm_spd == 15)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
if (qlm_cfg.s.qlm_cfg == 2)
return CVMX_HELPER_INTERFACE_MODE_SGMII;
else if (qlm_cfg.s.qlm_cfg == 3)
return CVMX_HELPER_INTERFACE_MODE_XAUI;
else
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
case 7:
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(3));
/* QLM is disabled when QLM SPD is 15. */
if (qlm_cfg.s.qlm_spd == 15) {
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
} else if (qlm_cfg.s.qlm_cfg != 0) {
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
if (qlm_cfg.s.qlm_cfg != 0)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
return CVMX_HELPER_INTERFACE_MODE_NPI;
case 8:
return CVMX_HELPER_INTERFACE_MODE_LOOP;
default:
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
}
/**
* @INTERNAL
* Return interface mode for an Octeon II
*/
static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
{
union cvmx_gmxx_inf_mode mode;
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
return __cvmx_get_mode_cn68xx(interface);
if (interface == 2)
return CVMX_HELPER_INTERFACE_MODE_NPI;
if (interface == 3)
return CVMX_HELPER_INTERFACE_MODE_LOOP;
/* Only present in CN63XX & CN66XX Octeon model */
if ((OCTEON_IS_MODEL(OCTEON_CN63XX) &&
(interface == 4 || interface == 5)) ||
(OCTEON_IS_MODEL(OCTEON_CN66XX) &&
interface >= 4 && interface <= 7)) {
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
union cvmx_mio_qlmx_cfg mio_qlm_cfg;
/* QLM2 is SGMII0 and QLM1 is SGMII1 */
if (interface == 0)
mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2));
else if (interface == 1)
mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
else
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
if (mio_qlm_cfg.s.qlm_spd == 15)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
if (mio_qlm_cfg.s.qlm_cfg == 9)
return CVMX_HELPER_INTERFACE_MODE_SGMII;
else if (mio_qlm_cfg.s.qlm_cfg == 11)
return CVMX_HELPER_INTERFACE_MODE_XAUI;
else
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
} else if (OCTEON_IS_MODEL(OCTEON_CN61XX)) {
union cvmx_mio_qlmx_cfg qlm_cfg;
if (interface == 0) {
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2));
if (qlm_cfg.s.qlm_cfg == 2)
return CVMX_HELPER_INTERFACE_MODE_SGMII;
else if (qlm_cfg.s.qlm_cfg == 3)
return CVMX_HELPER_INTERFACE_MODE_XAUI;
else
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
} else if (interface == 1) {
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
if (qlm_cfg.s.qlm_cfg == 2)
return CVMX_HELPER_INTERFACE_MODE_SGMII;
else if (qlm_cfg.s.qlm_cfg == 3)
return CVMX_HELPER_INTERFACE_MODE_XAUI;
else
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
} else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
if (interface == 0) {
union cvmx_mio_qlmx_cfg qlm_cfg;
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
if (qlm_cfg.s.qlm_cfg == 2)
return CVMX_HELPER_INTERFACE_MODE_SGMII;
}
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
if (interface == 1 && OCTEON_IS_MODEL(OCTEON_CN63XX))
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
switch (mode.cn63xx.mode) {
case 0:
return CVMX_HELPER_INTERFACE_MODE_SGMII;
case 1:
return CVMX_HELPER_INTERFACE_MODE_XAUI;
default:
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
} else {
if (!mode.s.en)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
if (mode.s.type)
return CVMX_HELPER_INTERFACE_MODE_GMII;
else
return CVMX_HELPER_INTERFACE_MODE_RGMII;
}
}
/** /**
* Get the operating mode of an interface. Depending on the Octeon * Get the operating mode of an interface. Depending on the Octeon
* chip and configuration, this function returns an enumeration * chip and configuration, this function returns an enumeration
...@@ -118,6 +270,20 @@ EXPORT_SYMBOL_GPL(cvmx_helper_ports_on_interface); ...@@ -118,6 +270,20 @@ EXPORT_SYMBOL_GPL(cvmx_helper_ports_on_interface);
cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface) cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
{ {
union cvmx_gmxx_inf_mode mode; union cvmx_gmxx_inf_mode mode;
if (interface < 0 ||
interface >= cvmx_helper_get_number_of_interfaces())
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
/*
* Octeon II models
*/
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
return __cvmx_get_mode_octeon2(interface);
/*
* Octeon and Octeon Plus models
*/
if (interface == 2) if (interface == 2)
return CVMX_HELPER_INTERFACE_MODE_NPI; return CVMX_HELPER_INTERFACE_MODE_NPI;
......
...@@ -1260,11 +1260,13 @@ static void __init octeon_irq_init_ciu(void) ...@@ -1260,11 +1260,13 @@ static void __init octeon_irq_init_ciu(void)
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
/* CIU_1 */ /* CIU_1 */
for (i = 0; i < 16; i++) for (i = 0; i < 16; i++)
......
...@@ -729,17 +729,6 @@ void __init prom_init(void) ...@@ -729,17 +729,6 @@ void __init prom_init(void)
octeon_write_lcd("Linux"); octeon_write_lcd("Linux");
#endif #endif
#ifdef CONFIG_CAVIUM_GDB
/*
* When debugging the linux kernel, force the cores to enter
* the debug exception handler to break in.
*/
if (octeon_get_boot_debug_flag()) {
cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num());
cvmx_read_csr(CVMX_CIU_DINT);
}
#endif
octeon_setup_delays(); octeon_setup_delays();
/* /*
...@@ -779,12 +768,6 @@ void __init prom_init(void) ...@@ -779,12 +768,6 @@ void __init prom_init(void)
MAX_MEMORY = 32ull << 30; MAX_MEMORY = 32ull << 30;
if (*p == '@') if (*p == '@')
RESERVE_LOW_MEM = memparse(p + 1, &p); RESERVE_LOW_MEM = memparse(p + 1, &p);
} else if (strcmp(arg, "ecc_verbose") == 0) {
#ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
__cvmx_interrupt_ecc_report_single_bit_errors = 1;
pr_notice("Reporting of single bit ECC errors is "
"turned on\n");
#endif
#ifdef CONFIG_KEXEC #ifdef CONFIG_KEXEC
} else if (strncmp(arg, "crashkernel=", 12) == 0) { } else if (strncmp(arg, "crashkernel=", 12) == 0) {
crashk_size = memparse(arg+12, &p); crashk_size = memparse(arg+12, &p);
......
...@@ -218,15 +218,6 @@ void octeon_prepare_cpus(unsigned int max_cpus) ...@@ -218,15 +218,6 @@ void octeon_prepare_cpus(unsigned int max_cpus)
*/ */
static void octeon_smp_finish(void) static void octeon_smp_finish(void)
{ {
#ifdef CONFIG_CAVIUM_GDB
unsigned long tmp;
/* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
to be not masked by this core so we know the signal is received by
someone */
asm volatile ("dmfc0 %0, $22\n"
"ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
#endif
octeon_user_io_init(); octeon_user_io_init();
/* to generate the first CPU timer interrupt */ /* to generate the first CPU timer interrupt */
...@@ -234,21 +225,6 @@ static void octeon_smp_finish(void) ...@@ -234,21 +225,6 @@ static void octeon_smp_finish(void)
local_irq_enable(); local_irq_enable();
} }
/**
* Hook for after all CPUs are online
*/
static void octeon_cpus_done(void)
{
#ifdef CONFIG_CAVIUM_GDB
unsigned long tmp;
/* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
to be not masked by this core so we know the signal is received by
someone */
asm volatile ("dmfc0 %0, $22\n"
"ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
#endif
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
/* State of each CPU. */ /* State of each CPU. */
...@@ -405,7 +381,6 @@ struct plat_smp_ops octeon_smp_ops = { ...@@ -405,7 +381,6 @@ struct plat_smp_ops octeon_smp_ops = {
.send_ipi_mask = octeon_send_ipi_mask, .send_ipi_mask = octeon_send_ipi_mask,
.init_secondary = octeon_init_secondary, .init_secondary = octeon_init_secondary,
.smp_finish = octeon_smp_finish, .smp_finish = octeon_smp_finish,
.cpus_done = octeon_cpus_done,
.boot_secondary = octeon_boot_secondary, .boot_secondary = octeon_boot_secondary,
.smp_setup = octeon_smp_setup, .smp_setup = octeon_smp_setup,
.prepare_cpus = octeon_prepare_cpus, .prepare_cpus = octeon_prepare_cpus,
......
...@@ -46,7 +46,6 @@ CONFIG_MTD=y ...@@ -46,7 +46,6 @@ CONFIG_MTD=y
CONFIG_MTD_REDBOOT_PARTS=y CONFIG_MTD_REDBOOT_PARTS=y
CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2 CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2
CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y CONFIG_MTD_JEDECPROBE=y
...@@ -54,7 +53,7 @@ CONFIG_MTD_CFI_AMDSTD=y ...@@ -54,7 +53,7 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_M25P80=y CONFIG_MTD_M25P80=y
# CONFIG_M25PXX_USE_FAST_READ is not set CONFIG_MTD_SPI_NOR=y
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
# CONFIG_NET_PACKET_ENGINE is not set # CONFIG_NET_PACKET_ENGINE is not set
CONFIG_ATH_COMMON=m CONFIG_ATH_COMMON=m
......
...@@ -113,6 +113,7 @@ CONFIG_MTD_NAND=y ...@@ -113,6 +113,7 @@ CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_ECC_BCH=y CONFIG_MTD_NAND_ECC_BCH=y
CONFIG_MTD_NAND_AU1550=y CONFIG_MTD_NAND_AU1550=y
CONFIG_MTD_NAND_PLATFORM=y CONFIG_MTD_NAND_PLATFORM=y
CONFIG_MTD_SPI_NOR=y
CONFIG_EEPROM_AT24=y CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_AT25=y CONFIG_EEPROM_AT25=y
CONFIG_SCSI_TGT=y CONFIG_SCSI_TGT=y
......
CONFIG_MIPS_MALTA=y
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_CPU_MIPS32_R2=y
CONFIG_PAGE_SIZE_16KB=y
CONFIG_MIPS_MT_SMTC=y
# CONFIG_MIPS_MT_FPAFF is not set
CONFIG_NR_CPUS=9
CONFIG_HZ_48=y
CONFIG_LOCALVERSION="smtc"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PCI=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_IPIP=m
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
# CONFIG_INET_LRO is not set
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_SCH_PRIO=m
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TEQL=m
CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=y
CONFIG_NET_CLS_IND=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_IDE=y
# CONFIG_IDE_PROC_FS is not set
# CONFIG_IDEPCI_PCIBUS_ORDER is not set
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_PIIX=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_ADAPTEC is not set
# CONFIG_NET_VENDOR_ALTEON is not set
CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_CISCO is not set
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_EXAR is not set
# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MYRI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
# CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_TOSHIBA is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_WLAN is not set
# CONFIG_VT is not set
CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MATROX=y
CONFIG_FB_MATROX_G=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_IDE_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_CIFS=m
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
# CONFIG_FTRACE is not set
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
...@@ -4,10 +4,9 @@ CONFIG_CPU_MIPS32_R2=y ...@@ -4,10 +4,9 @@ CONFIG_CPU_MIPS32_R2=y
CONFIG_PAGE_SIZE_16KB=y CONFIG_PAGE_SIZE_16KB=y
CONFIG_MIPS_MT_SMP=y CONFIG_MIPS_MT_SMP=y
CONFIG_SCHED_SMT=y CONFIG_SCHED_SMT=y
CONFIG_MIPS_CMP=y CONFIG_MIPS_CPS=y
CONFIG_NR_CPUS=8 CONFIG_NR_CPUS=8
CONFIG_HZ_100=y CONFIG_HZ_100=y
CONFIG_LOCALVERSION="cmp"
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y CONFIG_AUDIT=y
......
...@@ -5,10 +5,9 @@ CONFIG_CPU_MIPS32_3_5_FEATURES=y ...@@ -5,10 +5,9 @@ CONFIG_CPU_MIPS32_3_5_FEATURES=y
CONFIG_PAGE_SIZE_16KB=y CONFIG_PAGE_SIZE_16KB=y
CONFIG_MIPS_MT_SMP=y CONFIG_MIPS_MT_SMP=y
CONFIG_SCHED_SMT=y CONFIG_SCHED_SMT=y
CONFIG_MIPS_CMP=y CONFIG_MIPS_CPS=y
CONFIG_NR_CPUS=8 CONFIG_NR_CPUS=8
CONFIG_HZ_100=y CONFIG_HZ_100=y
CONFIG_LOCALVERSION="cmp"
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y CONFIG_AUDIT=y
......
CONFIG_MIPS_PARAVIRT=y
CONFIG_CPU_MIPS64_R2=y
CONFIG_64BIT=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_SMP=y
CONFIG_HZ_1000=y
CONFIG_PREEMPT=y
CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PCI=y
CONFIG_MIPS32_COMPAT=y
CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
# CONFIG_INET_LRO is not set
CONFIG_IPV6=y
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
CONFIG_VIRTIO_NET=y
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PHYLIB=y
CONFIG_MARVELL_PHY=y
CONFIG_BROADCOM_PHY=y
CONFIG_BCM87XX_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
CONFIG_VIRTIO_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_MMIO=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_FTRACE is not set
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
...@@ -81,7 +81,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ...@@ -81,7 +81,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_CFI_AMDSTD=y
...@@ -89,6 +88,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y ...@@ -89,6 +88,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_PHYSMAP_OF=y CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_M25P80=y CONFIG_MTD_M25P80=y
CONFIG_MTD_SPI_NOR=y
CONFIG_EEPROM_93CX6=m CONFIG_EEPROM_93CX6=m
CONFIG_SCSI=y CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SD=y
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/irq_cpu.h> #include <asm/irq_cpu.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
...@@ -748,6 +749,10 @@ void __init arch_init_irq(void) ...@@ -748,6 +749,10 @@ void __init arch_init_irq(void)
cpu_fpu_mask = 0; cpu_fpu_mask = 0;
dec_interrupt[DEC_IRQ_FPU] = -1; dec_interrupt[DEC_IRQ_FPU] = -1;
} }
/* Free the halt interrupt unused on R4k systems. */
if (current_cpu_type() == CPU_R4000SC ||
current_cpu_type() == CPU_R4400SC)
dec_interrupt[DEC_IRQ_HALT] = -1;
/* Register board interrupts: FPU and cascade. */ /* Register board interrupts: FPU and cascade. */
if (dec_interrupt[DEC_IRQ_FPU] >= 0) if (dec_interrupt[DEC_IRQ_FPU] >= 0)
......
...@@ -17,26 +17,8 @@ ...@@ -17,26 +17,8 @@
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#include <asm/asmmacro-64.h> #include <asm/asmmacro-64.h>
#endif #endif
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif
#ifdef CONFIG_MIPS_MT_SMTC
.macro local_irq_enable reg=t0
mfc0 \reg, CP0_TCSTATUS
ori \reg, \reg, TCSTATUS_IXMT
xori \reg, \reg, TCSTATUS_IXMT
mtc0 \reg, CP0_TCSTATUS
_ehb
.endm
.macro local_irq_disable reg=t0 #ifdef CONFIG_CPU_MIPSR2
mfc0 \reg, CP0_TCSTATUS
ori \reg, \reg, TCSTATUS_IXMT
mtc0 \reg, CP0_TCSTATUS
_ehb
.endm
#elif defined(CONFIG_CPU_MIPSR2)
.macro local_irq_enable reg=t0 .macro local_irq_enable reg=t0
ei ei
irq_enable_hazard irq_enable_hazard
...@@ -71,7 +53,7 @@ ...@@ -71,7 +53,7 @@
sw \reg, TI_PRE_COUNT($28) sw \reg, TI_PRE_COUNT($28)
#endif #endif
.endm .endm
#endif /* CONFIG_MIPS_MT_SMTC */ #endif /* CONFIG_CPU_MIPSR2 */
.macro fpu_save_16even thread tmp=t0 .macro fpu_save_16even thread tmp=t0
cfc1 \tmp, fcr31 cfc1 \tmp, fcr31
...@@ -267,13 +249,35 @@ ...@@ -267,13 +249,35 @@
.set pop .set pop
.endm .endm
#else #else
#ifdef CONFIG_CPU_MICROMIPS
#define CFC_MSA_INSN 0x587e0056
#define CTC_MSA_INSN 0x583e0816
#define LDD_MSA_INSN 0x58000837
#define STD_MSA_INSN 0x5800083f
#define COPY_UW_MSA_INSN 0x58f00056
#define COPY_UD_MSA_INSN 0x58f80056
#define INSERT_W_MSA_INSN 0x59300816
#define INSERT_D_MSA_INSN 0x59380816
#else
#define CFC_MSA_INSN 0x787e0059
#define CTC_MSA_INSN 0x783e0819
#define LDD_MSA_INSN 0x78000823
#define STD_MSA_INSN 0x78000827
#define COPY_UW_MSA_INSN 0x78f00059
#define COPY_UD_MSA_INSN 0x78f80059
#define INSERT_W_MSA_INSN 0x79300819
#define INSERT_D_MSA_INSN 0x79380819
#endif
/* /*
* Temporary until all toolchains in use include MSA support. * Temporary until all toolchains in use include MSA support.
*/ */
.macro cfcmsa rd, cs .macro cfcmsa rd, cs
.set push .set push
.set noat .set noat
.word 0x787e0059 | (\cs << 11) .insn
.word CFC_MSA_INSN | (\cs << 11)
move \rd, $1 move \rd, $1
.set pop .set pop
.endm .endm
...@@ -282,7 +286,7 @@ ...@@ -282,7 +286,7 @@
.set push .set push
.set noat .set noat
move $1, \rs move $1, \rs
.word 0x783e0819 | (\cd << 6) .word CTC_MSA_INSN | (\cd << 6)
.set pop .set pop
.endm .endm
...@@ -290,7 +294,7 @@ ...@@ -290,7 +294,7 @@
.set push .set push
.set noat .set noat
add $1, \base, \off add $1, \base, \off
.word 0x78000823 | (\wd << 6) .word LDD_MSA_INSN | (\wd << 6)
.set pop .set pop
.endm .endm
...@@ -298,14 +302,15 @@ ...@@ -298,14 +302,15 @@
.set push .set push
.set noat .set noat
add $1, \base, \off add $1, \base, \off
.word 0x78000827 | (\wd << 6) .word STD_MSA_INSN | (\wd << 6)
.set pop .set pop
.endm .endm
.macro copy_u_w rd, ws, n .macro copy_u_w rd, ws, n
.set push .set push
.set noat .set noat
.word 0x78f00059 | (\n << 16) | (\ws << 11) .insn
.word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
/* move triggers an assembler bug... */ /* move triggers an assembler bug... */
or \rd, $1, zero or \rd, $1, zero
.set pop .set pop
...@@ -314,7 +319,8 @@ ...@@ -314,7 +319,8 @@
.macro copy_u_d rd, ws, n .macro copy_u_d rd, ws, n
.set push .set push
.set noat .set noat
.word 0x78f80059 | (\n << 16) | (\ws << 11) .insn
.word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
/* move triggers an assembler bug... */ /* move triggers an assembler bug... */
or \rd, $1, zero or \rd, $1, zero
.set pop .set pop
...@@ -325,7 +331,7 @@ ...@@ -325,7 +331,7 @@
.set noat .set noat
/* move triggers an assembler bug... */ /* move triggers an assembler bug... */
or $1, \rs, zero or $1, \rs, zero
.word 0x79300819 | (\n << 16) | (\wd << 6) .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
.set pop .set pop
.endm .endm
...@@ -334,7 +340,7 @@ ...@@ -334,7 +340,7 @@
.set noat .set noat
/* move triggers an assembler bug... */ /* move triggers an assembler bug... */
or $1, \rs, zero or $1, \rs, zero
.word 0x79380819 | (\n << 16) | (\wd << 6) .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
.set pop .set pop
.endm .endm
#endif #endif
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#ifndef _ASM_BRANCH_H #ifndef _ASM_BRANCH_H
#define _ASM_BRANCH_H #define _ASM_BRANCH_H
#include <asm/cpu-features.h>
#include <asm/mipsregs.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/inst.h> #include <asm/inst.h>
...@@ -18,12 +20,40 @@ extern int __compute_return_epc_for_insn(struct pt_regs *regs, ...@@ -18,12 +20,40 @@ extern int __compute_return_epc_for_insn(struct pt_regs *regs,
extern int __microMIPS_compute_return_epc(struct pt_regs *regs); extern int __microMIPS_compute_return_epc(struct pt_regs *regs);
extern int __MIPS16e_compute_return_epc(struct pt_regs *regs); extern int __MIPS16e_compute_return_epc(struct pt_regs *regs);
/*
* microMIPS bitfields
*/
#define MM_POOL32A_MINOR_MASK 0x3f
#define MM_POOL32A_MINOR_SHIFT 0x6
#define MM_MIPS32_COND_FC 0x30
extern int __mm_isBranchInstr(struct pt_regs *regs,
struct mm_decoded_insn dec_insn, unsigned long *contpc);
static inline int mm_isBranchInstr(struct pt_regs *regs,
struct mm_decoded_insn dec_insn, unsigned long *contpc)
{
if (!cpu_has_mmips)
return 0;
return __mm_isBranchInstr(regs, dec_insn, contpc);
}
static inline int delay_slot(struct pt_regs *regs) static inline int delay_slot(struct pt_regs *regs)
{ {
return regs->cp0_cause & CAUSEF_BD; return regs->cp0_cause & CAUSEF_BD;
} }
static inline void clear_delay_slot(struct pt_regs *regs)
{
regs->cp0_cause &= ~CAUSEF_BD;
}
static inline void set_delay_slot(struct pt_regs *regs)
{
regs->cp0_cause |= CAUSEF_BD;
}
static inline unsigned long exception_epc(struct pt_regs *regs) static inline unsigned long exception_epc(struct pt_regs *regs)
{ {
if (likely(!delay_slot(regs))) if (likely(!delay_slot(regs)))
......
...@@ -113,6 +113,12 @@ unsigned long run_uncached(void *func); ...@@ -113,6 +113,12 @@ unsigned long run_uncached(void *func);
extern void *kmap_coherent(struct page *page, unsigned long addr); extern void *kmap_coherent(struct page *page, unsigned long addr);
extern void kunmap_coherent(void); extern void kunmap_coherent(void);
extern void *kmap_noncoherent(struct page *page, unsigned long addr);
static inline void kunmap_noncoherent(void)
{
kunmap_coherent();
}
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page(struct page *page) static inline void flush_kernel_dcache_page(struct page *page)
......
...@@ -10,7 +10,6 @@ extern void cmp_smp_setup(void); ...@@ -10,7 +10,6 @@ extern void cmp_smp_setup(void);
extern void cmp_smp_finish(void); extern void cmp_smp_finish(void);
extern void cmp_boot_secondary(int cpu, struct task_struct *t); extern void cmp_boot_secondary(int cpu, struct task_struct *t);
extern void cmp_init_secondary(void); extern void cmp_init_secondary(void);
extern void cmp_cpus_done(void);
extern void cmp_prepare_cpus(unsigned int max_cpus); extern void cmp_prepare_cpus(unsigned int max_cpus);
/* This is platform specific */ /* This is platform specific */
......
...@@ -110,9 +110,15 @@ ...@@ -110,9 +110,15 @@
#ifndef cpu_has_smartmips #ifndef cpu_has_smartmips
#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS) #define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS)
#endif #endif
#ifndef cpu_has_rixi #ifndef cpu_has_rixi
#define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI) # ifdef CONFIG_64BIT
# define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI)
# else /* CONFIG_32BIT */
# define cpu_has_rixi ((cpu_data[0].options & MIPS_CPU_RIXI) && !cpu_has_64bits)
# endif
#endif #endif
#ifndef cpu_has_mmips #ifndef cpu_has_mmips
# ifdef CONFIG_SYS_SUPPORTS_MICROMIPS # ifdef CONFIG_SYS_SUPPORTS_MICROMIPS
# define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS) # define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS)
...@@ -120,6 +126,7 @@ ...@@ -120,6 +126,7 @@
# define cpu_has_mmips 0 # define cpu_has_mmips 0
# endif # endif
#endif #endif
#ifndef cpu_has_vtag_icache #ifndef cpu_has_vtag_icache
#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG) #define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
#endif #endif
...@@ -183,6 +190,17 @@ ...@@ -183,6 +190,17 @@
/* /*
* Shortcuts ... * Shortcuts ...
*/ */
#define cpu_has_mips_2_3_4_5 (cpu_has_mips_2 | cpu_has_mips_3_4_5)
#define cpu_has_mips_3_4_5 (cpu_has_mips_3 | cpu_has_mips_4_5)
#define cpu_has_mips_4_5 (cpu_has_mips_4 | cpu_has_mips_5)
#define cpu_has_mips_2_3_4_5_r (cpu_has_mips_2 | cpu_has_mips_3_4_5_r)
#define cpu_has_mips_3_4_5_r (cpu_has_mips_3 | cpu_has_mips_4_5_r)
#define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r)
#define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r)
#define cpu_has_mips_4_5_r2 (cpu_has_mips_4_5 | cpu_has_mips_r2)
#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2) #define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2) #define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1) #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
......
...@@ -65,17 +65,12 @@ struct cpuinfo_mips { ...@@ -65,17 +65,12 @@ struct cpuinfo_mips {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
int vmbits; /* Virtual memory size in bits */ int vmbits; /* Virtual memory size in bits */
#endif #endif
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) #ifdef CONFIG_MIPS_MT_SMP
/* /*
* In the MIPS MT "SMTC" model, each TC is considered * There is not necessarily a 1:1 mapping of VPE num to CPU number
* to be a "CPU" for the purposes of scheduling, but * in particular on multi-core systems.
* exception resources, ASID spaces, etc, are common
* to all TCs within the same VPE.
*/ */
int vpe_id; /* Virtual Processor number */ int vpe_id; /* Virtual Processor number */
#endif
#ifdef CONFIG_MIPS_MT_SMTC
int tc_id; /* Thread Context number */
#endif #endif
void *data; /* Additional data */ void *data; /* Additional data */
unsigned int watch_reg_count; /* Number that exist */ unsigned int watch_reg_count; /* Number that exist */
...@@ -117,7 +112,7 @@ struct proc_cpuinfo_notifier_args { ...@@ -117,7 +112,7 @@ struct proc_cpuinfo_notifier_args {
unsigned long n; unsigned long n;
}; };
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) #ifdef CONFIG_MIPS_MT_SMP
# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id) # define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id)
#else #else
# define cpu_vpe_id(cpuinfo) 0 # define cpu_vpe_id(cpuinfo) 0
......
...@@ -155,9 +155,6 @@ static inline int __pure __get_cpu_type(const int cpu_type) ...@@ -155,9 +155,6 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_RM7000: case CPU_RM7000:
case CPU_SR71000: case CPU_SR71000:
#endif #endif
#ifdef CONFIG_SYS_HAS_CPU_RM9000
case CPU_RM9000:
#endif
#ifdef CONFIG_SYS_HAS_CPU_SB1 #ifdef CONFIG_SYS_HAS_CPU_SB1
case CPU_SB1: case CPU_SB1:
case CPU_SB1A: case CPU_SB1A:
...@@ -166,6 +163,7 @@ static inline int __pure __get_cpu_type(const int cpu_type) ...@@ -166,6 +163,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_CAVIUM_OCTEON: case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS: case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2: case CPU_CAVIUM_OCTEON2:
case CPU_CAVIUM_OCTEON3:
#endif #endif
#if defined(CONFIG_SYS_HAS_CPU_BMIPS32_3300) || \ #if defined(CONFIG_SYS_HAS_CPU_BMIPS32_3300) || \
......
...@@ -201,6 +201,7 @@ ...@@ -201,6 +201,7 @@
#define PRID_IMP_NETLOGIC_XLP3XX 0x1100 #define PRID_IMP_NETLOGIC_XLP3XX 0x1100
#define PRID_IMP_NETLOGIC_XLP2XX 0x1200 #define PRID_IMP_NETLOGIC_XLP2XX 0x1200
#define PRID_IMP_NETLOGIC_XLP9XX 0x1500 #define PRID_IMP_NETLOGIC_XLP9XX 0x1500
#define PRID_IMP_NETLOGIC_XLP5XX 0x1300
/* /*
* Particular Revision values for bits 7:0 of the PRId register. * Particular Revision values for bits 7:0 of the PRId register.
...@@ -281,7 +282,7 @@ enum cpu_type_enum { ...@@ -281,7 +282,7 @@ enum cpu_type_enum {
CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R5432, CPU_R10000, CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R5432, CPU_R10000,
CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121, CPU_VR4122, CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121, CPU_VR4122,
CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000, CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000,
CPU_SR71000, CPU_RM9000, CPU_TX49XX, CPU_SR71000, CPU_TX49XX,
/* /*
* R8000 class processors * R8000 class processors
......
...@@ -48,13 +48,21 @@ ...@@ -48,13 +48,21 @@
#define KN4K_RES_14 (14*IOASIC_SLOT_SIZE) /* unused? */ #define KN4K_RES_14 (14*IOASIC_SLOT_SIZE) /* unused? */
#define KN4K_RES_15 (15*IOASIC_SLOT_SIZE) /* unused? */ #define KN4K_RES_15 (15*IOASIC_SLOT_SIZE) /* unused? */
/*
* MB ASIC interrupt bits.
*/
#define KN4K_MB_INR_MB 4 /* ??? */
#define KN4K_MB_INR_MT 3 /* memory, I/O bus read/write errors */
#define KN4K_MB_INR_RES_2 2 /* unused */
#define KN4K_MB_INR_RTC 1 /* RTC */
#define KN4K_MB_INR_TC 0 /* I/O ASIC cascade */
/* /*
* Bits for the MB interrupt register. * Bits for the MB interrupt register.
* The register appears read-only. * The register appears read-only.
*/ */
#define KN4K_MB_INT_TC (1<<0) /* TURBOchannel? */ #define KN4K_MB_INT_IRQ (0x1f<<0) /* CPU Int[4:0] status. */
#define KN4K_MB_INT_RTC (1<<1) /* RTC? */ #define KN4K_MB_INT_IRQ_N(n) (1<<(n)) /* Individual status bits. */
#define KN4K_MB_INT_MT (1<<3) /* I/O ASIC cascade */
/* /*
* Bits for the MB control & status register. * Bits for the MB control & status register.
...@@ -70,6 +78,7 @@ ...@@ -70,6 +78,7 @@
#define KN4K_MB_CSR_NC (1<<14) /* ??? */ #define KN4K_MB_CSR_NC (1<<14) /* ??? */
#define KN4K_MB_CSR_EE (1<<15) /* (bus) Exception Enable? */ #define KN4K_MB_CSR_EE (1<<15) /* (bus) Exception Enable? */
#define KN4K_MB_CSR_MSK (0x1f<<16) /* CPU Int[4:0] mask */ #define KN4K_MB_CSR_MSK (0x1f<<16) /* CPU Int[4:0] mask */
#define KN4K_MB_CSR_MSK_N(n) (1<<((n)+16)) /* Individual mask bits. */
#define KN4K_MB_CSR_FW (1<<21) /* ??? */ #define KN4K_MB_CSR_FW (1<<21) /* ??? */
#define KN4K_MB_CSR_W (1<<31) /* ??? */ #define KN4K_MB_CSR_W (1<<31) /* ??? */
......
...@@ -48,11 +48,7 @@ ...@@ -48,11 +48,7 @@
enum fixed_addresses { enum fixed_addresses {
#define FIX_N_COLOURS 8 #define FIX_N_COLOURS 8
FIX_CMAP_BEGIN, FIX_CMAP_BEGIN,
#ifdef CONFIG_MIPS_MT_SMTC
FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS * 2),
#else
FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2), FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2),
#endif
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/* reserved pte's for temporary kernel mappings */ /* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN = FIX_CMAP_END + 1, FIX_KMAP_BEGIN = FIX_CMAP_END + 1,
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/fpu_emulator.h>
#include <asm/hazards.h> #include <asm/hazards.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/current.h> #include <asm/current.h>
...@@ -28,7 +29,6 @@ ...@@ -28,7 +29,6 @@
struct sigcontext; struct sigcontext;
struct sigcontext32; struct sigcontext32;
extern void fpu_emulator_init_fpu(void);
extern void _init_fpu(void); extern void _init_fpu(void);
extern void _save_fp(struct task_struct *); extern void _save_fp(struct task_struct *);
extern void _restore_fp(struct task_struct *); extern void _restore_fp(struct task_struct *);
...@@ -156,15 +156,16 @@ static inline int init_fpu(void) ...@@ -156,15 +156,16 @@ static inline int init_fpu(void)
int ret = 0; int ret = 0;
preempt_disable(); preempt_disable();
if (cpu_has_fpu) { if (cpu_has_fpu) {
ret = __own_fpu(); ret = __own_fpu();
if (!ret) if (!ret)
_init_fpu(); _init_fpu();
} else { } else
fpu_emulator_init_fpu(); fpu_emulator_init_fpu();
}
preempt_enable(); preempt_enable();
return ret; return ret;
} }
......
...@@ -23,9 +23,12 @@ ...@@ -23,9 +23,12 @@
#ifndef _ASM_FPU_EMULATOR_H #ifndef _ASM_FPU_EMULATOR_H
#define _ASM_FPU_EMULATOR_H #define _ASM_FPU_EMULATOR_H
#include <linux/sched.h>
#include <asm/break.h> #include <asm/break.h>
#include <asm/thread_info.h>
#include <asm/inst.h> #include <asm/inst.h>
#include <asm/local.h> #include <asm/local.h>
#include <asm/processor.h>
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
...@@ -36,6 +39,11 @@ struct mips_fpu_emulator_stats { ...@@ -36,6 +39,11 @@ struct mips_fpu_emulator_stats {
local_t cp1ops; local_t cp1ops;
local_t cp1xops; local_t cp1xops;
local_t errors; local_t errors;
local_t ieee754_inexact;
local_t ieee754_underflow;
local_t ieee754_overflow;
local_t ieee754_zerodiv;
local_t ieee754_invalidop;
}; };
DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
...@@ -71,4 +79,17 @@ int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, ...@@ -71,4 +79,17 @@ int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
*/ */
#define BREAK_MATH (0x0000000d | (BRK_MEMU << 16)) #define BREAK_MATH (0x0000000d | (BRK_MEMU << 16))
#define SIGNALLING_NAN 0x7ff800007ff80000LL
static inline void fpu_emulator_init_fpu(void)
{
struct task_struct *t = current;
int i;
t->thread.fpu.fcr31 = 0;
for (i = 0; i < 32; i++)
set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
}
#endif /* _ASM_FPU_EMULATOR_H */ #endif /* _ASM_FPU_EMULATOR_H */
...@@ -380,6 +380,7 @@ extern unsigned int gic_compare_int (void); ...@@ -380,6 +380,7 @@ extern unsigned int gic_compare_int (void);
extern cycle_t gic_read_count(void); extern cycle_t gic_read_count(void);
extern cycle_t gic_read_compare(void); extern cycle_t gic_read_compare(void);
extern void gic_write_compare(cycle_t cnt); extern void gic_write_compare(cycle_t cnt);
extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
extern void gic_send_ipi(unsigned int intr); extern void gic_send_ipi(unsigned int intr);
extern unsigned int plat_ipi_call_int_xlate(unsigned int); extern unsigned int plat_ipi_call_int_xlate(unsigned int);
extern unsigned int plat_ipi_resched_int_xlate(unsigned int); extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
......
...@@ -50,7 +50,7 @@ static inline void gio_device_free(struct gio_device *dev) ...@@ -50,7 +50,7 @@ static inline void gio_device_free(struct gio_device *dev)
extern int gio_register_driver(struct gio_driver *); extern int gio_register_driver(struct gio_driver *);
extern void gio_unregister_driver(struct gio_driver *); extern void gio_unregister_driver(struct gio_driver *);
#define gio_get_drvdata(_dev) drv_get_drvdata(&(_dev)->dev) #define gio_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev)
#define gio_set_drvdata(_dev, data) drv_set_drvdata(&(_dev)->dev, (data)) #define gio_set_drvdata(_dev, data) dev_set_drvdata(&(_dev)->dev, (data))
extern void gio_set_master(struct gio_device *); extern void gio_set_master(struct gio_device *);
#ifndef __ASM_IDLE_H #ifndef __ASM_IDLE_H
#define __ASM_IDLE_H #define __ASM_IDLE_H
#include <linux/cpuidle.h>
#include <linux/linkage.h> #include <linux/linkage.h>
extern void (*cpu_wait)(void); extern void (*cpu_wait)(void);
...@@ -20,4 +21,17 @@ static inline int address_is_in_r4k_wait_irqoff(unsigned long addr) ...@@ -20,4 +21,17 @@ static inline int address_is_in_r4k_wait_irqoff(unsigned long addr)
addr < (unsigned long)__pastwait; addr < (unsigned long)__pastwait;
} }
extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
#define MIPS_CPUIDLE_WAIT_STATE {\
.enter = mips_cpuidle_wait_enter,\
.exit_latency = 1,\
.target_residency = 1,\
.power_usage = UINT_MAX,\
.flags = CPUIDLE_FLAG_TIME_VALID,\
.name = "wait",\
.desc = "MIPS wait",\
}
#endif /* __ASM_IDLE_H */ #endif /* __ASM_IDLE_H */
...@@ -26,104 +26,8 @@ static inline int irq_canonicalize(int irq) ...@@ -26,104 +26,8 @@ static inline int irq_canonicalize(int irq)
#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
#endif #endif
#ifdef CONFIG_MIPS_MT_SMTC
struct irqaction;
extern unsigned long irq_hwmask[];
extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
unsigned long hwmask);
static inline void smtc_im_ack_irq(unsigned int irq)
{
if (irq_hwmask[irq] & ST0_IM)
set_c0_status(irq_hwmask[irq] & ST0_IM);
}
#else
static inline void smtc_im_ack_irq(unsigned int irq)
{
}
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
#include <linux/cpumask.h>
extern int plat_set_irq_affinity(struct irq_data *d,
const struct cpumask *affinity, bool force);
extern void smtc_forward_irq(struct irq_data *d);
/*
* IRQ affinity hook invoked at the beginning of interrupt dispatch
* if option is enabled.
*
* Up through Linux 2.6.22 (at least) cpumask operations are very
* inefficient on MIPS. Initial prototypes of SMTC IRQ affinity
* used a "fast path" per-IRQ-descriptor cache of affinity information
* to reduce latency. As there is a project afoot to optimize the
* cpumask implementations, this version is optimistically assuming
* that cpumask.h macro overhead is reasonable during interrupt dispatch.
*/
static inline int handle_on_other_cpu(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
if (cpumask_test_cpu(smp_processor_id(), d->affinity))
return 0;
smtc_forward_irq(d);
return 1;
}
#else /* Not doing SMTC affinity */
static inline int handle_on_other_cpu(unsigned int irq) { return 0; }
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
static inline void smtc_im_backstop(unsigned int irq)
{
if (irq_hwmask[irq] & 0x0000ff00)
write_c0_tccontext(read_c0_tccontext() &
~(irq_hwmask[irq] & 0x0000ff00));
}
/*
* Clear interrupt mask handling "backstop" if irq_hwmask
* entry so indicates. This implies that the ack() or end()
* functions will take over re-enabling the low-level mask.
* Otherwise it will be done on return from exception.
*/
static inline int smtc_handle_on_other_cpu(unsigned int irq)
{
int ret = handle_on_other_cpu(irq);
if (!ret)
smtc_im_backstop(irq);
return ret;
}
#else
static inline void smtc_im_backstop(unsigned int irq) { }
static inline int smtc_handle_on_other_cpu(unsigned int irq)
{
return handle_on_other_cpu(irq);
}
#endif
extern void do_IRQ(unsigned int irq); extern void do_IRQ(unsigned int irq);
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
extern void do_IRQ_no_affinity(unsigned int irq);
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
extern void arch_init_irq(void); extern void arch_init_irq(void);
extern void spurious_interrupt(void); extern void spurious_interrupt(void);
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include <linux/stringify.h> #include <linux/stringify.h>
#include <asm/hazards.h> #include <asm/hazards.h>
#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) #ifdef CONFIG_CPU_MIPSR2
static inline void arch_local_irq_disable(void) static inline void arch_local_irq_disable(void)
{ {
...@@ -118,30 +118,15 @@ void arch_local_irq_disable(void); ...@@ -118,30 +118,15 @@ void arch_local_irq_disable(void);
unsigned long arch_local_irq_save(void); unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags); void arch_local_irq_restore(unsigned long flags);
void __arch_local_irq_restore(unsigned long flags); void __arch_local_irq_restore(unsigned long flags);
#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ #endif /* CONFIG_CPU_MIPSR2 */
extern void smtc_ipi_replay(void);
static inline void arch_local_irq_enable(void) static inline void arch_local_irq_enable(void)
{ {
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of call overhead on each local_irq_enable()
*/
smtc_ipi_replay();
#endif
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n" " .set push \n"
" .set reorder \n" " .set reorder \n"
" .set noat \n" " .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC #if defined(CONFIG_CPU_MIPSR2)
" mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
" ori $1, 0x400 \n"
" xori $1, 0x400 \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" ei \n" " ei \n"
#else #else
" mfc0 $1,$12 \n" " mfc0 $1,$12 \n"
...@@ -163,11 +148,7 @@ static inline unsigned long arch_local_save_flags(void) ...@@ -163,11 +148,7 @@ static inline unsigned long arch_local_save_flags(void)
asm __volatile__( asm __volatile__(
" .set push \n" " .set push \n"
" .set reorder \n" " .set reorder \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 %[flags], $2, 1 \n"
#else
" mfc0 %[flags], $12 \n" " mfc0 %[flags], $12 \n"
#endif
" .set pop \n" " .set pop \n"
: [flags] "=r" (flags)); : [flags] "=r" (flags));
...@@ -177,14 +158,7 @@ static inline unsigned long arch_local_save_flags(void) ...@@ -177,14 +158,7 @@ static inline unsigned long arch_local_save_flags(void)
static inline int arch_irqs_disabled_flags(unsigned long flags) static inline int arch_irqs_disabled_flags(unsigned long flags)
{ {
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
*/
return flags & 0x400;
#else
return !(flags & 1); return !(flags & 1);
#endif
} }
#endif /* #ifndef __ASSEMBLY__ */ #endif /* #ifndef __ASSEMBLY__ */
......
#ifndef _ASM_MIPS_KVM_PARA_H
#define _ASM_MIPS_KVM_PARA_H
#include <uapi/asm/kvm_para.h>
#define KVM_HYPERCALL ".word 0x42000028"
/*
* Hypercalls for KVM.
*
* Hypercall number is passed in v0.
* Return value will be placed in v0.
* Up to 3 arguments are passed in a0, a1, and a2.
*/
static inline unsigned long kvm_hypercall0(unsigned long num)
{
register unsigned long n asm("v0");
register unsigned long r asm("v0");
n = num;
__asm__ __volatile__(
KVM_HYPERCALL
: "=r" (r) : "r" (n) : "memory"
);
return r;
}
static inline unsigned long kvm_hypercall1(unsigned long num,
unsigned long arg0)
{
register unsigned long n asm("v0");
register unsigned long r asm("v0");
register unsigned long a0 asm("a0");
n = num;
a0 = arg0;
__asm__ __volatile__(
KVM_HYPERCALL
: "=r" (r) : "r" (n), "r" (a0) : "memory"
);
return r;
}
static inline unsigned long kvm_hypercall2(unsigned long num,
unsigned long arg0, unsigned long arg1)
{
register unsigned long n asm("v0");
register unsigned long r asm("v0");
register unsigned long a0 asm("a0");
register unsigned long a1 asm("a1");
n = num;
a0 = arg0;
a1 = arg1;
__asm__ __volatile__(
KVM_HYPERCALL
: "=r" (r) : "r" (n), "r" (a0), "r" (a1) : "memory"
);
return r;
}
static inline unsigned long kvm_hypercall3(unsigned long num,
unsigned long arg0, unsigned long arg1, unsigned long arg2)
{
register unsigned long n asm("v0");
register unsigned long r asm("v0");
register unsigned long a0 asm("a0");
register unsigned long a1 asm("a1");
register unsigned long a2 asm("a2");
n = num;
a0 = arg0;
a1 = arg1;
a2 = arg2;
__asm__ __volatile__(
KVM_HYPERCALL
: "=r" (r) : "r" (n), "r" (a0), "r" (a1), "r" (a2) : "memory"
);
return r;
}
static inline bool kvm_check_and_clear_guest_paused(void)
{
return false;
}
static inline unsigned int kvm_arch_para_features(void)
{
return 0;
}
#ifdef CONFIG_MIPS_PARAVIRT
static inline bool kvm_para_available(void)
{
return true;
}
#else
static inline bool kvm_para_available(void)
{
return false;
}
#endif
#endif /* _ASM_MIPS_KVM_PARA_H */
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#define cpu_has_3k_cache 0 #define cpu_has_3k_cache 0
#define cpu_has_4k_cache 0 #define cpu_has_4k_cache 0
#define cpu_has_tx39_cache 0 #define cpu_has_tx39_cache 0
#define cpu_has_fpu 0
#define cpu_has_counter 1 #define cpu_has_counter 1
#define cpu_has_watch 1 #define cpu_has_watch 1
#define cpu_has_divec 1 #define cpu_has_divec 1
......
...@@ -35,6 +35,8 @@ enum octeon_irq { ...@@ -35,6 +35,8 @@ enum octeon_irq {
OCTEON_IRQ_PCI_MSI2, OCTEON_IRQ_PCI_MSI2,
OCTEON_IRQ_PCI_MSI3, OCTEON_IRQ_PCI_MSI3,
OCTEON_IRQ_TWSI,
OCTEON_IRQ_TWSI2,
OCTEON_IRQ_RML, OCTEON_IRQ_RML,
OCTEON_IRQ_TIMER0, OCTEON_IRQ_TIMER0,
OCTEON_IRQ_TIMER1, OCTEON_IRQ_TIMER1,
......
...@@ -39,6 +39,10 @@ ...@@ -39,6 +39,10 @@
#define cpu_has_nofpuex 0 #define cpu_has_nofpuex 0
#define cpu_has_64bits 1 #define cpu_has_64bits 1
#define cpu_has_mips_2 1
#define cpu_has_mips_3 1
#define cpu_has_mips_5 0
#define cpu_has_mips32r1 0 #define cpu_has_mips32r1 0
#define cpu_has_mips32r2 0 #define cpu_has_mips32r2 0
#define cpu_has_mips64r1 0 #define cpu_has_mips64r1 0
......
...@@ -80,36 +80,6 @@ ...@@ -80,36 +80,6 @@
.endm .endm
.macro kernel_entry_setup .macro kernel_entry_setup
#ifdef CONFIG_MIPS_MT_SMTC
mfc0 t0, CP0_CONFIG
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 1
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 2
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 3
and t0, 1<<2
bnez t0, 0f
9:
/* Assume we came from YAMON... */
PTR_LA v0, 0x9fc00534 /* YAMON print */
lw v0, (v0)
move a0, zero
PTR_LA a1, nonmt_processor
jal v0
PTR_LA v0, 0x9fc00520 /* YAMON exit */
lw v0, (v0)
li a0, 1
jal v0
1: b 1b
__INITDATA
nonmt_processor:
.asciz "SMTC kernel requires the MT ASE to run\n"
__FINIT
#endif
#ifdef CONFIG_EVA #ifdef CONFIG_EVA
sync sync
......
/*
* Copyright (C) 2014 Imagination Technologies
* Author: Paul Burton <paul.burton@imgtec.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __ASM_MIPS_MACH_MALTA_PM_H__
#define __ASM_MIPS_MACH_MALTA_PM_H__
#include <asm/mips-boards/piix4.h>
#ifdef CONFIG_MIPS_MALTA_PM
/**
* mips_pm_suspend - enter a suspend state
* @state: the state to enter, one of PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_*
*
* Enters a suspend state via the Malta's PIIX4. If the state to be entered
* is one which loses context (eg. SOFF) then this function will never
* return.
*/
extern int mips_pm_suspend(unsigned state);
#else /* !CONFIG_MIPS_MALTA_PM */
static inline int mips_pm_suspend(unsigned state)
{
return -EINVAL;
}
#endif /* !CONFIG_MIPS_MALTA_PM */
#endif /* __ASM_MIPS_MACH_MALTA_PM_H__ */
...@@ -10,10 +10,12 @@ ...@@ -10,10 +10,12 @@
#include <asm/mach-netlogic/multi-node.h> #include <asm/mach-netlogic/multi-node.h>
#ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) cpu_to_node(cpu) #define topology_physical_package_id(cpu) cpu_to_node(cpu)
#define topology_core_id(cpu) (cpu_logical_map(cpu) / NLM_THREADS_PER_CORE) #define topology_core_id(cpu) (cpu_logical_map(cpu) / NLM_THREADS_PER_CORE)
#define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu]) #define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu])
#define topology_core_cpumask(cpu) cpumask_of_node(cpu_to_node(cpu)) #define topology_core_cpumask(cpu) cpumask_of_node(cpu_to_node(cpu))
#endif
#include <asm-generic/topology.h> #include <asm-generic/topology.h>
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Cavium, Inc.
*/
#ifndef __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H
#define __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H
#define cpu_has_4kex 1
#define cpu_has_3k_cache 0
#define cpu_has_tx39_cache 0
#define cpu_has_counter 1
#define cpu_has_llsc 1
/*
* We Disable LL/SC on non SMP systems as it is faster to disable
* interrupts for atomic access than a LL/SC.
*/
#ifdef CONFIG_SMP
# define kernel_uses_llsc 1
#else
# define kernel_uses_llsc 0
#endif
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#define cpu_dcache_line_size() 128
#define cpu_icache_line_size() 128
#define cpu_has_octeon_cache 1
#define cpu_has_4k_cache 0
#else
#define cpu_has_octeon_cache 0
#define cpu_has_4k_cache 1
#endif
#endif /* __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Cavium, Inc.
*/
#ifndef __ASM_MACH_PARAVIRT_IRQ_H__
#define __ASM_MACH_PARAVIRT_IRQ_H__
#define NR_IRQS 64
#define MIPS_CPU_IRQ_BASE 1
#define MIPS_IRQ_PCIA (MIPS_CPU_IRQ_BASE + 8)
#define MIPS_IRQ_MBOX0 (MIPS_CPU_IRQ_BASE + 32)
#define MIPS_IRQ_MBOX1 (MIPS_CPU_IRQ_BASE + 33)
#endif /* __ASM_MACH_PARAVIRT_IRQ_H__ */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Cavium, Inc
*/
#ifndef __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H
#define __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H
#define CP0_EBASE $15, 1
.macro kernel_entry_setup
mfc0 t0, CP0_EBASE
andi t0, t0, 0x3ff # CPUNum
beqz t0, 1f
# CPUs other than zero goto smp_bootstrap
j smp_bootstrap
1:
.endm
/*
* Do SMP slave processor setup necessary before we can safely execute
* C code.
*/
.macro smp_slave_setup
mfc0 t0, CP0_EBASE
andi t0, t0, 0x3ff # CPUNum
slti t1, t0, NR_CPUS
bnez t1, 1f
2:
di
wait
b 2b # Unknown CPU, loop forever.
1:
PTR_LA t1, paravirt_smp_sp
PTR_SLL t0, PTR_SCALESHIFT
PTR_ADDU t1, t1, t0
3:
PTR_L sp, 0(t1)
beqz sp, 3b # Spin until told to proceed.
PTR_LA t1, paravirt_smp_gp
PTR_ADDU t1, t1, t0
sync
PTR_L gp, 0(t1)
.endm
#endif /* __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
* Copyright (C) 2013 Cavium Networks <support@caviumnetworks.com>
*/
#ifndef __ASM_MIPS_MACH_PARAVIRT_WAR_H
#define __ASM_MIPS_MACH_PARAVIRT_WAR_H
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
#define R4600_V2_HIT_CACHEOP_WAR 0
#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
#endif /* __ASM_MIPS_MACH_PARAVIRT_WAR_H */
...@@ -25,11 +25,7 @@ ...@@ -25,11 +25,7 @@
#ifndef MSP_USB_H_ #ifndef MSP_USB_H_
#define MSP_USB_H_ #define MSP_USB_H_
#ifdef CONFIG_MSP_HAS_DUAL_USB
#define NUM_USB_DEVS 2
#else
#define NUM_USB_DEVS 1 #define NUM_USB_DEVS 1
#endif
/* Register spaces for USB host 0 */ /* Register spaces for USB host 0 */
#define MSP_USB0_MAB_START (MSP_USB0_BASE + 0x0) #define MSP_USB0_MAB_START (MSP_USB0_BASE + 0x0)
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0 #define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0 #define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0 #define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0 #define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0 #define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0 #define MIPS34K_MISSED_ITLB_WAR 0
......
...@@ -10,37 +10,6 @@ ...@@ -10,37 +10,6 @@
#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
.macro kernel_entry_setup .macro kernel_entry_setup
#ifdef CONFIG_MIPS_MT_SMTC
mfc0 t0, CP0_CONFIG
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 1
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 2
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 3
and t0, 1<<2
bnez t0, 0f
9 :
/* Assume we came from YAMON... */
PTR_LA v0, 0x9fc00534 /* YAMON print */
lw v0, (v0)
move a0, zero
PTR_LA a1, nonmt_processor
jal v0
PTR_LA v0, 0x9fc00520 /* YAMON exit */
lw v0, (v0)
li a0, 1
jal v0
1 : b 1b
__INITDATA
nonmt_processor :
.asciz "SMTC kernel requires the MT ASE to run\n"
__FINIT
0 :
#endif
.endm .endm
/* /*
......
...@@ -55,4 +55,16 @@ ...@@ -55,4 +55,16 @@
#define PIIX4_FUNC3_PMREGMISC 0x80 #define PIIX4_FUNC3_PMREGMISC 0x80
#define PIIX4_FUNC3_PMREGMISC_EN (1 << 0) #define PIIX4_FUNC3_PMREGMISC_EN (1 << 0)
/* Power Management IO Space */
#define PIIX4_FUNC3IO_PMSTS 0x00
#define PIIX4_FUNC3IO_PMSTS_PWRBTN_STS (1 << 8)
#define PIIX4_FUNC3IO_PMCNTRL 0x04
#define PIIX4_FUNC3IO_PMCNTRL_SUS_EN (1 << 13)
#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP (0x7 << 10)
#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF (0x0 << 10)
#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_STR (0x1 << 10)
/* Data for magic special PCI cycle */
#define PIIX4_SUSPEND_MAGIC 0x00120002
#endif /* __ASM_MIPS_BOARDS_PIIX4_H */ #endif /* __ASM_MIPS_BOARDS_PIIX4_H */
...@@ -72,7 +72,12 @@ static inline bool mips_cpc_present(void) ...@@ -72,7 +72,12 @@ static inline bool mips_cpc_present(void)
#define MIPS_CPC_COCB_OFS 0x4000 #define MIPS_CPC_COCB_OFS 0x4000
/* Macros to ease the creation of register access functions */ /* Macros to ease the creation of register access functions */
#define BUILD_CPC_R_(name, off) \ #define BUILD_CPC_R_(name, off) \
static inline u32 *addr_cpc_##name(void) \
{ \
return (u32 *)(mips_cpc_base + (off)); \
} \
\
static inline u32 read_cpc_##name(void) \ static inline u32 read_cpc_##name(void) \
{ \ { \
return __raw_readl(mips_cpc_base + (off)); \ return __raw_readl(mips_cpc_base + (off)); \
...@@ -147,4 +152,31 @@ BUILD_CPC_Cx_RW(other, 0x10) ...@@ -147,4 +152,31 @@ BUILD_CPC_Cx_RW(other, 0x10)
#define CPC_Cx_OTHER_CORENUM_SHF 16 #define CPC_Cx_OTHER_CORENUM_SHF 16
#define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16) #define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16)
#ifdef CONFIG_MIPS_CPC
/**
* mips_cpc_lock_other - lock access to another core
* core: the other core to be accessed
*
* Call before operating upon a core via the 'other' register region in
* order to prevent the region being moved during access. Must be followed
* by a call to mips_cpc_unlock_other.
*/
extern void mips_cpc_lock_other(unsigned int core);
/**
* mips_cpc_unlock_other - unlock access to another core
*
* Call after operating upon another core via the 'other' register region.
* Must be called after mips_cpc_lock_other.
*/
extern void mips_cpc_unlock_other(void);
#else /* !CONFIG_MIPS_CPC */
static inline void mips_cpc_lock_other(unsigned int core) { }
static inline void mips_cpc_unlock_other(void) { }
#endif /* !CONFIG_MIPS_CPC */
#endif /* __MIPS_ASM_MIPS_CPC_H__ */ #endif /* __MIPS_ASM_MIPS_CPC_H__ */
/* /*
* Definitions and decalrations for MIPS MT support * Definitions and decalrations for MIPS MT support that are common between
* that are common between SMTC, VSMP, and/or AP/SP * the VSMP, and AP/SP kernel models.
* kernel models.
*/ */
#ifndef __ASM_MIPS_MT_H #ifndef __ASM_MIPS_MT_H
#define __ASM_MIPS_MT_H #define __ASM_MIPS_MT_H
......
...@@ -36,6 +36,8 @@ ...@@ -36,6 +36,8 @@
#define read_c0_tcbind() __read_32bit_c0_register($2, 2) #define read_c0_tcbind() __read_32bit_c0_register($2, 2)
#define write_c0_tchalt(val) __write_32bit_c0_register($2, 4, val)
#define read_c0_tccontext() __read_32bit_c0_register($2, 5) #define read_c0_tccontext() __read_32bit_c0_register($2, 5)
#define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val) #define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val)
......
...@@ -709,11 +709,18 @@ ...@@ -709,11 +709,18 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* /*
* Macros for handling the ISA mode bit for microMIPS. * Macros for handling the ISA mode bit for MIPS16 and microMIPS.
*/ */
#if defined(CONFIG_SYS_SUPPORTS_MIPS16) || \
defined(CONFIG_SYS_SUPPORTS_MICROMIPS)
#define get_isa16_mode(x) ((x) & 0x1) #define get_isa16_mode(x) ((x) & 0x1)
#define msk_isa16_mode(x) ((x) & ~0x1) #define msk_isa16_mode(x) ((x) & ~0x1)
#define set_isa16_mode(x) do { (x) |= 0x1; } while(0) #define set_isa16_mode(x) do { (x) |= 0x1; } while(0)
#else
#define get_isa16_mode(x) 0
#define msk_isa16_mode(x) (x)
#define set_isa16_mode(x) do { } while(0)
#endif
/* /*
* microMIPS instructions can be 16-bit or 32-bit in length. This * microMIPS instructions can be 16-bit or 32-bit in length. This
...@@ -1007,19 +1014,8 @@ do { \ ...@@ -1007,19 +1014,8 @@ do { \
#define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) #define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val)
#define read_c0_status() __read_32bit_c0_register($12, 0) #define read_c0_status() __read_32bit_c0_register($12, 0)
#ifdef CONFIG_MIPS_MT_SMTC
#define write_c0_status(val) \
do { \
__write_32bit_c0_register($12, 0, val); \
__ehb(); \
} while (0)
#else
/*
* Legacy non-SMTC code, which may be hazardous
* but which might not support EHB
*/
#define write_c0_status(val) __write_32bit_c0_register($12, 0, val) #define write_c0_status(val) __write_32bit_c0_register($12, 0, val)
#endif /* CONFIG_MIPS_MT_SMTC */
#define read_c0_cause() __read_32bit_c0_register($13, 0) #define read_c0_cause() __read_32bit_c0_register($13, 0)
#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) #define write_c0_cause(val) __write_32bit_c0_register($13, 0, val)
...@@ -1743,11 +1739,6 @@ static inline void tlb_write_random(void) ...@@ -1743,11 +1739,6 @@ static inline void tlb_write_random(void)
/* /*
* Manipulate bits in a c0 register. * Manipulate bits in a c0 register.
*/ */
#ifndef CONFIG_MIPS_MT_SMTC
/*
* SMTC Linux requires shutting-down microthread scheduling
* during CP0 register read-modify-write sequences.
*/
#define __BUILD_SET_C0(name) \ #define __BUILD_SET_C0(name) \
static inline unsigned int \ static inline unsigned int \
set_c0_##name(unsigned int set) \ set_c0_##name(unsigned int set) \
...@@ -1786,121 +1777,6 @@ change_c0_##name(unsigned int change, unsigned int val) \ ...@@ -1786,121 +1777,6 @@ change_c0_##name(unsigned int change, unsigned int val) \
return res; \ return res; \
} }
#else /* SMTC versions that manage MT scheduling */
#include <linux/irqflags.h>
/*
* This is a duplicate of dmt() in mipsmtregs.h to avoid problems with
* header file recursion.
*/
static inline unsigned int __dmt(void)
{
int res;
__asm__ __volatile__(
" .set push \n"
" .set mips32r2 \n"
" .set noat \n"
" .word 0x41610BC1 # dmt $1 \n"
" ehb \n"
" move %0, $1 \n"
" .set pop \n"
: "=r" (res));
instruction_hazard();
return res;
}
#define __VPECONTROL_TE_SHIFT 15
#define __VPECONTROL_TE (1UL << __VPECONTROL_TE_SHIFT)
#define __EMT_ENABLE __VPECONTROL_TE
static inline void __emt(unsigned int previous)
{
if ((previous & __EMT_ENABLE))
__asm__ __volatile__(
" .set mips32r2 \n"
" .word 0x41600be1 # emt \n"
" ehb \n"
" .set mips0 \n");
}
static inline void __ehb(void)
{
__asm__ __volatile__(
" .set mips32r2 \n"
" ehb \n" " .set mips0 \n");
}
/*
* Note that local_irq_save/restore affect TC-specific IXMT state,
* not Status.IE as in non-SMTC kernel.
*/
#define __BUILD_SET_C0(name) \
static inline unsigned int \
set_c0_##name(unsigned int set) \
{ \
unsigned int res; \
unsigned int new; \
unsigned int omt; \
unsigned long flags; \
\
local_irq_save(flags); \
omt = __dmt(); \
res = read_c0_##name(); \
new = res | set; \
write_c0_##name(new); \
__emt(omt); \
local_irq_restore(flags); \
\
return res; \
} \
\
static inline unsigned int \
clear_c0_##name(unsigned int clear) \
{ \
unsigned int res; \
unsigned int new; \
unsigned int omt; \
unsigned long flags; \
\
local_irq_save(flags); \
omt = __dmt(); \
res = read_c0_##name(); \
new = res & ~clear; \
write_c0_##name(new); \
__emt(omt); \
local_irq_restore(flags); \
\
return res; \
} \
\
static inline unsigned int \
change_c0_##name(unsigned int change, unsigned int newbits) \
{ \
unsigned int res; \
unsigned int new; \
unsigned int omt; \
unsigned long flags; \
\
local_irq_save(flags); \
\
omt = __dmt(); \
res = read_c0_##name(); \
new = res & ~change; \
new |= (newbits & change); \
write_c0_##name(new); \
__emt(omt); \
local_irq_restore(flags); \
\
return res; \
}
#endif
__BUILD_SET_C0(status) __BUILD_SET_C0(status)
__BUILD_SET_C0(cause) __BUILD_SET_C0(cause)
__BUILD_SET_C0(config) __BUILD_SET_C0(config)
...@@ -1916,6 +1792,15 @@ __BUILD_SET_C0(brcm_cmt_ctrl) ...@@ -1916,6 +1792,15 @@ __BUILD_SET_C0(brcm_cmt_ctrl)
__BUILD_SET_C0(brcm_config) __BUILD_SET_C0(brcm_config)
__BUILD_SET_C0(brcm_mode) __BUILD_SET_C0(brcm_mode)
/*
* Return low 10 bits of ebase.
* Note that under KVM (MIPSVZ) this returns vcpu id.
*/
static inline unsigned int get_ebase_cpunum(void)
{
return read_c0_ebase() & 0x3ff;
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_MIPSREGS_H */ #endif /* _ASM_MIPSREGS_H */
...@@ -18,10 +18,6 @@ ...@@ -18,10 +18,6 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/hazards.h> #include <asm/hazards.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#include <asm/smtc.h>
#endif /* SMTC */
#include <asm-generic/mm_hooks.h> #include <asm-generic/mm_hooks.h>
#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ #define TLBMISS_HANDLER_SETUP_PGD(pgd) \
...@@ -31,11 +27,15 @@ do { \ ...@@ -31,11 +27,15 @@ do { \
} while (0) } while (0)
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
#define TLBMISS_HANDLER_RESTORE() \
write_c0_xcontext((unsigned long) smp_processor_id() << \
SMP_CPUID_REGSHIFT)
#define TLBMISS_HANDLER_SETUP() \ #define TLBMISS_HANDLER_SETUP() \
do { \ do { \
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \ TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \
write_c0_xcontext((unsigned long) smp_processor_id() << \ TLBMISS_HANDLER_RESTORE(); \
SMP_CPUID_REGSHIFT); \
} while (0) } while (0)
#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/ #else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
...@@ -47,9 +47,12 @@ do { \ ...@@ -47,9 +47,12 @@ do { \
*/ */
extern unsigned long pgd_current[]; extern unsigned long pgd_current[];
#define TLBMISS_HANDLER_SETUP() \ #define TLBMISS_HANDLER_RESTORE() \
write_c0_context((unsigned long) smp_processor_id() << \ write_c0_context((unsigned long) smp_processor_id() << \
SMP_CPUID_REGSHIFT); \ SMP_CPUID_REGSHIFT)
#define TLBMISS_HANDLER_SETUP() \
TLBMISS_HANDLER_RESTORE(); \
back_to_back_c0_hazard(); \ back_to_back_c0_hazard(); \
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
...@@ -63,13 +66,6 @@ extern unsigned long pgd_current[]; ...@@ -63,13 +66,6 @@ extern unsigned long pgd_current[];
#define ASID_INC 0x10 #define ASID_INC 0x10
#define ASID_MASK 0xff0 #define ASID_MASK 0xff0
#elif defined(CONFIG_MIPS_MT_SMTC)
#define ASID_INC 0x1
extern unsigned long smtc_asid_mask;
#define ASID_MASK (smtc_asid_mask)
#define HW_ASID_MASK 0xff
/* End SMTC/34K debug hack */
#else /* FIXME: not correct for R6000 */ #else /* FIXME: not correct for R6000 */
#define ASID_INC 0x1 #define ASID_INC 0x1
...@@ -92,7 +88,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) ...@@ -92,7 +88,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
#ifndef CONFIG_MIPS_MT_SMTC
/* Normal, classic MIPS get_new_mmu_context */ /* Normal, classic MIPS get_new_mmu_context */
static inline void static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
...@@ -115,12 +110,6 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) ...@@ -115,12 +110,6 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
cpu_context(cpu, mm) = asid_cache(cpu) = asid; cpu_context(cpu, mm) = asid_cache(cpu) = asid;
} }
#else /* CONFIG_MIPS_MT_SMTC */
#define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu))
#endif /* CONFIG_MIPS_MT_SMTC */
/* /*
* Initialize the context related info for a new mm_struct * Initialize the context related info for a new mm_struct
* instance. * instance.
...@@ -141,46 +130,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -141,46 +130,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
unsigned long flags; unsigned long flags;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long oldasid;
unsigned long mtflags;
int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
local_irq_save(flags); local_irq_save(flags);
mtflags = dvpe();
#else /* Not SMTC */
local_irq_save(flags);
#endif /* CONFIG_MIPS_MT_SMTC */
/* Check if our ASID is of an older version and thus invalid */ /* Check if our ASID is of an older version and thus invalid */
if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
get_new_mmu_context(next, cpu); get_new_mmu_context(next, cpu);
#ifdef CONFIG_MIPS_MT_SMTC
/*
* If the EntryHi ASID being replaced happens to be
* the value flagged at ASID recycling time as having
* an extended life, clear the bit showing it being
* in use by this "CPU", and if that's the last bit,
* free up the ASID value for use and flush any old
* instances of it from the TLB.
*/
oldasid = (read_c0_entryhi() & ASID_MASK);
if(smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
smtc_flush_tlb_asid(oldasid);
}
/*
* Tread softly on EntryHi, and so long as we support
* having ASID_MASK smaller than the hardware maximum,
* make sure no "soft" bits become "hard"...
*/
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
cpu_asid(cpu, next));
ehb(); /* Make sure it propagates to TCStatus */
evpe(mtflags);
#else
write_c0_entryhi(cpu_asid(cpu, next)); write_c0_entryhi(cpu_asid(cpu, next));
#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP_PGD(next->pgd); TLBMISS_HANDLER_SETUP_PGD(next->pgd);
/* /*
...@@ -213,34 +168,12 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) ...@@ -213,34 +168,12 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
unsigned long flags; unsigned long flags;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long oldasid;
unsigned long mtflags;
int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
#endif /* CONFIG_MIPS_MT_SMTC */
local_irq_save(flags); local_irq_save(flags);
/* Unconditionally get a new ASID. */ /* Unconditionally get a new ASID. */
get_new_mmu_context(next, cpu); get_new_mmu_context(next, cpu);
#ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
mtflags = dvpe();
oldasid = read_c0_entryhi() & ASID_MASK;
if(smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
smtc_flush_tlb_asid(oldasid);
}
/* See comments for similar code above */
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
cpu_asid(cpu, next));
ehb(); /* Make sure it propagates to TCStatus */
evpe(mtflags);
#else
write_c0_entryhi(cpu_asid(cpu, next)); write_c0_entryhi(cpu_asid(cpu, next));
#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP_PGD(next->pgd); TLBMISS_HANDLER_SETUP_PGD(next->pgd);
/* mark mmu ownership change */ /* mark mmu ownership change */
...@@ -258,48 +191,15 @@ static inline void ...@@ -258,48 +191,15 @@ static inline void
drop_mmu_context(struct mm_struct *mm, unsigned cpu) drop_mmu_context(struct mm_struct *mm, unsigned cpu)
{ {
unsigned long flags; unsigned long flags;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long oldasid;
/* Can't use spinlock because called from TLB flush within DVPE */
unsigned int prevvpe;
int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
#endif /* CONFIG_MIPS_MT_SMTC */
local_irq_save(flags); local_irq_save(flags);
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
get_new_mmu_context(mm, cpu); get_new_mmu_context(mm, cpu);
#ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
prevvpe = dvpe();
oldasid = (read_c0_entryhi() & ASID_MASK);
if (smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
smtc_flush_tlb_asid(oldasid);
}
/* See comments for similar code above */
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
| cpu_asid(cpu, mm));
ehb(); /* Make sure it propagates to TCStatus */
evpe(prevvpe);
#else /* not CONFIG_MIPS_MT_SMTC */
write_c0_entryhi(cpu_asid(cpu, mm)); write_c0_entryhi(cpu_asid(cpu, mm));
#endif /* CONFIG_MIPS_MT_SMTC */
} else { } else {
/* will get a new context next time */ /* will get a new context next time */
#ifndef CONFIG_MIPS_MT_SMTC
cpu_context(cpu, mm) = 0; cpu_context(cpu, mm) = 0;
#else /* SMTC */
int i;
/* SMTC shares the TLB (and ASIDs) across VPEs */
for_each_online_cpu(i) {
if((smtc_status & SMTC_TLB_SHARED)
|| (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
cpu_context(i, mm) = 0;
}
#endif /* CONFIG_MIPS_MT_SMTC */
} }
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -144,13 +144,7 @@ search_module_dbetables(unsigned long addr) ...@@ -144,13 +144,7 @@ search_module_dbetables(unsigned long addr)
#define MODULE_KERNEL_TYPE "64BIT " #define MODULE_KERNEL_TYPE "64BIT "
#endif #endif
#ifdef CONFIG_MIPS_MT_SMTC
#define MODULE_KERNEL_SMTC "MT_SMTC "
#else
#define MODULE_KERNEL_SMTC ""
#endif
#define MODULE_ARCH_VERMAGIC \ #define MODULE_ARCH_VERMAGIC \
MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC MODULE_PROC_FAMILY MODULE_KERNEL_TYPE
#endif /* _ASM_MODULE_H */ #endif /* _ASM_MODULE_H */
...@@ -84,7 +84,7 @@ static inline void write_msa_##name(unsigned int val) \ ...@@ -84,7 +84,7 @@ static inline void write_msa_##name(unsigned int val) \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push\n" \ " .set push\n" \
" .set msa\n" \ " .set msa\n" \
" cfcmsa $" #cs ", %0\n" \ " ctcmsa $" #cs ", %0\n" \
" .set pop\n" \ " .set pop\n" \
: : "r"(val)); \ : : "r"(val)); \
} }
...@@ -96,6 +96,13 @@ static inline void write_msa_##name(unsigned int val) \ ...@@ -96,6 +96,13 @@ static inline void write_msa_##name(unsigned int val) \
* allow compilation with toolchains that do not support MSA. Once all * allow compilation with toolchains that do not support MSA. Once all
* toolchains in use support MSA these can be removed. * toolchains in use support MSA these can be removed.
*/ */
#ifdef CONFIG_CPU_MICROMIPS
#define CFC_MSA_INSN 0x587e0056
#define CTC_MSA_INSN 0x583e0816
#else
#define CFC_MSA_INSN 0x787e0059
#define CTC_MSA_INSN 0x783e0819
#endif
#define __BUILD_MSA_CTL_REG(name, cs) \ #define __BUILD_MSA_CTL_REG(name, cs) \
static inline unsigned int read_msa_##name(void) \ static inline unsigned int read_msa_##name(void) \
...@@ -104,7 +111,8 @@ static inline unsigned int read_msa_##name(void) \ ...@@ -104,7 +111,8 @@ static inline unsigned int read_msa_##name(void) \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push\n" \ " .set push\n" \
" .set noat\n" \ " .set noat\n" \
" .word 0x787e0059 | (" #cs " << 11)\n" \ " .insn\n" \
" .word #CFC_MSA_INSN | (" #cs " << 11)\n" \
" move %0, $1\n" \ " move %0, $1\n" \
" .set pop\n" \ " .set pop\n" \
: "=r"(reg)); \ : "=r"(reg)); \
...@@ -117,7 +125,8 @@ static inline void write_msa_##name(unsigned int val) \ ...@@ -117,7 +125,8 @@ static inline void write_msa_##name(unsigned int val) \
" .set push\n" \ " .set push\n" \
" .set noat\n" \ " .set noat\n" \
" move $1, %0\n" \ " move $1, %0\n" \
" .word 0x783e0819 | (" #cs " << 6)\n" \ " .insn\n" \
" .word #CTC_MSA_INSN | (" #cs " << 6)\n" \
" .set pop\n" \ " .set pop\n" \
: : "r"(val)); \ : : "r"(val)); \
} }
......
...@@ -146,9 +146,10 @@ static inline int hard_smp_processor_id(void) ...@@ -146,9 +146,10 @@ static inline int hard_smp_processor_id(void)
static inline int nlm_nodeid(void) static inline int nlm_nodeid(void)
{ {
uint32_t prid = read_c0_prid(); uint32_t prid = read_c0_prid() & PRID_IMP_MASK;
if ((prid & 0xff00) == PRID_IMP_NETLOGIC_XLP9XX) if ((prid == PRID_IMP_NETLOGIC_XLP9XX) ||
(prid == PRID_IMP_NETLOGIC_XLP5XX))
return (__read_32bit_c0_register($15, 1) >> 7) & 0x7; return (__read_32bit_c0_register($15, 1) >> 7) & 0x7;
else else
return (__read_32bit_c0_register($15, 1) >> 5) & 0x3; return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
......
...@@ -74,6 +74,8 @@ ...@@ -74,6 +74,8 @@
#define XLP_IO_USB_OHCI2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 4) #define XLP_IO_USB_OHCI2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 4)
#define XLP_IO_USB_OHCI3_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 5) #define XLP_IO_USB_OHCI3_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 5)
#define XLP_IO_SATA_OFFSET(node) XLP_HDR_OFFSET(node, 0, 3, 2)
/* XLP2xx has an updated USB block */ /* XLP2xx has an updated USB block */
#define XLP2XX_IO_USB_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 4, i) #define XLP2XX_IO_USB_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 4, i)
#define XLP2XX_IO_USB_XHCI0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 1) #define XLP2XX_IO_USB_XHCI0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 1)
...@@ -103,13 +105,11 @@ ...@@ -103,13 +105,11 @@
#define XLP_IO_SYS_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 5) #define XLP_IO_SYS_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 5)
#define XLP_IO_JTAG_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 6) #define XLP_IO_JTAG_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 6)
/* Flash */
#define XLP_IO_NOR_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 0) #define XLP_IO_NOR_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 0)
#define XLP_IO_NAND_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 1) #define XLP_IO_NAND_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 1)
#define XLP_IO_SPI_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 2) #define XLP_IO_SPI_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 2)
/* SD flash */ #define XLP_IO_MMC_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3)
#define XLP_IO_SD_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3)
#define XLP_IO_MMC_OFFSET(node, slot) \
((XLP_IO_SD_OFFSET(node))+(slot*0x100)+XLP_IO_PCI_HDRSZ)
/* Things have changed drastically in XLP 9XX */ /* Things have changed drastically in XLP 9XX */
#define XLP9XX_HDR_OFFSET(n, d, f) \ #define XLP9XX_HDR_OFFSET(n, d, f) \
...@@ -120,6 +120,8 @@ ...@@ -120,6 +120,8 @@
#define XLP9XX_IO_UART_OFFSET(node) XLP9XX_HDR_OFFSET(node, 2, 2) #define XLP9XX_IO_UART_OFFSET(node) XLP9XX_HDR_OFFSET(node, 2, 2)
#define XLP9XX_IO_SYS_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 0) #define XLP9XX_IO_SYS_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 0)
#define XLP9XX_IO_FUSE_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 1) #define XLP9XX_IO_FUSE_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 1)
#define XLP9XX_IO_CLOCK_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 2)
#define XLP9XX_IO_POWER_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 3)
#define XLP9XX_IO_JTAG_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 4) #define XLP9XX_IO_JTAG_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 4)
#define XLP9XX_IO_PCIE_OFFSET(node, i) XLP9XX_HDR_OFFSET(node, 1, i) #define XLP9XX_IO_PCIE_OFFSET(node, i) XLP9XX_HDR_OFFSET(node, 1, i)
...@@ -135,11 +137,11 @@ ...@@ -135,11 +137,11 @@
/* XLP9XX on-chip SATA controller */ /* XLP9XX on-chip SATA controller */
#define XLP9XX_IO_SATA_OFFSET(node) XLP9XX_HDR_OFFSET(node, 3, 2) #define XLP9XX_IO_SATA_OFFSET(node) XLP9XX_HDR_OFFSET(node, 3, 2)
/* Flash */
#define XLP9XX_IO_NOR_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 0) #define XLP9XX_IO_NOR_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 0)
#define XLP9XX_IO_NAND_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 1) #define XLP9XX_IO_NAND_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 1)
#define XLP9XX_IO_SPI_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 2) #define XLP9XX_IO_SPI_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 2)
/* SD flash */ #define XLP9XX_IO_MMC_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 3)
#define XLP9XX_IO_MMCSD_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 3)
/* PCI config header register id's */ /* PCI config header register id's */
#define XLP_PCI_CFGREG0 0x00 #define XLP_PCI_CFGREG0 0x00
...@@ -186,8 +188,10 @@ ...@@ -186,8 +188,10 @@
#define PCI_DEVICE_ID_NLM_NOR 0x1015 #define PCI_DEVICE_ID_NLM_NOR 0x1015
#define PCI_DEVICE_ID_NLM_NAND 0x1016 #define PCI_DEVICE_ID_NLM_NAND 0x1016
#define PCI_DEVICE_ID_NLM_MMC 0x1018 #define PCI_DEVICE_ID_NLM_MMC 0x1018
#define PCI_DEVICE_ID_NLM_XHCI 0x101d #define PCI_DEVICE_ID_NLM_SATA 0x101A
#define PCI_DEVICE_ID_NLM_XHCI 0x101D
#define PCI_DEVICE_ID_XLP9XX_MMC 0x9018
#define PCI_DEVICE_ID_XLP9XX_SATA 0x901A #define PCI_DEVICE_ID_XLP9XX_SATA 0x901A
#define PCI_DEVICE_ID_XLP9XX_XHCI 0x901D #define PCI_DEVICE_ID_XLP9XX_XHCI 0x901D
......
...@@ -69,6 +69,20 @@ ...@@ -69,6 +69,20 @@
#define PCIE_9XX_BYTE_SWAP_IO_BASE 0x25e #define PCIE_9XX_BYTE_SWAP_IO_BASE 0x25e
#define PCIE_9XX_BYTE_SWAP_IO_LIM 0x25f #define PCIE_9XX_BYTE_SWAP_IO_LIM 0x25f
#define PCIE_9XX_BRIDGE_MSIX_ADDR_BASE 0x264
#define PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT 0x265
#define PCIE_9XX_MSI_STATUS 0x283
#define PCIE_9XX_MSI_EN 0x284
/* 128 MSIX vectors available in 9xx */
#define PCIE_9XX_MSIX_STATUS0 0x286
#define PCIE_9XX_MSIX_STATUSX(n) (n + 0x286)
#define PCIE_9XX_MSIX_VEC 0x296
#define PCIE_9XX_MSIX_VECX(n) (n + 0x296)
#define PCIE_9XX_INT_STATUS0 0x397
#define PCIE_9XX_INT_STATUS1 0x398
#define PCIE_9XX_INT_EN0 0x399
#define PCIE_9XX_INT_EN1 0x39a
/* other */ /* other */
#define PCIE_NLINKS 4 #define PCIE_NLINKS 4
......
...@@ -199,6 +199,10 @@ ...@@ -199,6 +199,10 @@
#define PIC_IRT_PCIE_LINK_3_INDEX 81 #define PIC_IRT_PCIE_LINK_3_INDEX 81
#define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX) #define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX)
#define PIC_9XX_IRT_PCIE_LINK_0_INDEX 191
#define PIC_9XX_IRT_PCIE_LINK_INDEX(num) \
((num) + PIC_9XX_IRT_PCIE_LINK_0_INDEX)
#define PIC_CLOCK_TIMER 7 #define PIC_CLOCK_TIMER 7
#if !defined(LOCORE) && !defined(__ASSEMBLY__) #if !defined(LOCORE) && !defined(__ASSEMBLY__)
......
...@@ -118,6 +118,10 @@ ...@@ -118,6 +118,10 @@
#define SYS_SCRTCH3 0x4c #define SYS_SCRTCH3 0x4c
/* PLL registers XLP2XX */ /* PLL registers XLP2XX */
#define SYS_CPU_PLL_CTRL0(core) (0x1c0 + (core * 4))
#define SYS_CPU_PLL_CTRL1(core) (0x1c1 + (core * 4))
#define SYS_CPU_PLL_CTRL2(core) (0x1c2 + (core * 4))
#define SYS_CPU_PLL_CTRL3(core) (0x1c3 + (core * 4))
#define SYS_PLL_CTRL0 0x240 #define SYS_PLL_CTRL0 0x240
#define SYS_PLL_CTRL1 0x241 #define SYS_PLL_CTRL1 0x241
#define SYS_PLL_CTRL2 0x242 #define SYS_PLL_CTRL2 0x242
...@@ -147,6 +151,32 @@ ...@@ -147,6 +151,32 @@
#define SYS_SYS_PLL_MEM_REQ 0x2a3 #define SYS_SYS_PLL_MEM_REQ 0x2a3
#define SYS_PLL_MEM_STAT 0x2a4 #define SYS_PLL_MEM_STAT 0x2a4
/* PLL registers XLP9XX */
#define SYS_9XX_CPU_PLL_CTRL0(core) (0xc0 + (core * 4))
#define SYS_9XX_CPU_PLL_CTRL1(core) (0xc1 + (core * 4))
#define SYS_9XX_CPU_PLL_CTRL2(core) (0xc2 + (core * 4))
#define SYS_9XX_CPU_PLL_CTRL3(core) (0xc3 + (core * 4))
#define SYS_9XX_DMC_PLL_CTRL0 0x140
#define SYS_9XX_DMC_PLL_CTRL1 0x141
#define SYS_9XX_DMC_PLL_CTRL2 0x142
#define SYS_9XX_DMC_PLL_CTRL3 0x143
#define SYS_9XX_PLL_CTRL0 0x144
#define SYS_9XX_PLL_CTRL1 0x145
#define SYS_9XX_PLL_CTRL2 0x146
#define SYS_9XX_PLL_CTRL3 0x147
#define SYS_9XX_PLL_CTRL0_DEVX(x) (0x148 + (x) * 4)
#define SYS_9XX_PLL_CTRL1_DEVX(x) (0x149 + (x) * 4)
#define SYS_9XX_PLL_CTRL2_DEVX(x) (0x14a + (x) * 4)
#define SYS_9XX_PLL_CTRL3_DEVX(x) (0x14b + (x) * 4)
#define SYS_9XX_CPU_PLL_CHG_CTRL 0x188
#define SYS_9XX_PLL_CHG_CTRL 0x189
#define SYS_9XX_CLK_DEV_DIS 0x18a
#define SYS_9XX_CLK_DEV_SEL 0x18b
#define SYS_9XX_CLK_DEV_DIV 0x18d
#define SYS_9XX_CLK_DEV_CHG 0x18f
/* Registers changed on 9XX */ /* Registers changed on 9XX */
#define SYS_9XX_POWER_ON_RESET_CFG 0x00 #define SYS_9XX_POWER_ON_RESET_CFG 0x00
#define SYS_9XX_CHIP_RESET 0x01 #define SYS_9XX_CHIP_RESET 0x01
...@@ -170,6 +200,11 @@ ...@@ -170,6 +200,11 @@
#define nlm_get_fuse_regbase(node) \ #define nlm_get_fuse_regbase(node) \
(nlm_get_fuse_pcibase(node) + XLP_IO_PCI_HDRSZ) (nlm_get_fuse_pcibase(node) + XLP_IO_PCI_HDRSZ)
#define nlm_get_clock_pcibase(node) \
nlm_pcicfg_base(XLP9XX_IO_CLOCK_OFFSET(node))
#define nlm_get_clock_regbase(node) \
(nlm_get_clock_pcibase(node) + XLP_IO_PCI_HDRSZ)
unsigned int nlm_get_pic_frequency(int node); unsigned int nlm_get_pic_frequency(int node);
#endif #endif
#endif #endif
...@@ -58,6 +58,10 @@ ...@@ -58,6 +58,10 @@
#define PIC_I2C_1_IRQ 31 #define PIC_I2C_1_IRQ 31
#define PIC_I2C_2_IRQ 32 #define PIC_I2C_2_IRQ 32
#define PIC_I2C_3_IRQ 33 #define PIC_I2C_3_IRQ 33
#define PIC_SPI_IRQ 34
#define PIC_NAND_IRQ 37
#define PIC_SATA_IRQ 38
#define PIC_GPIO_IRQ 39
#define PIC_PCIE_LINK_MSI_IRQ_BASE 44 /* 44 - 47 MSI IRQ */ #define PIC_PCIE_LINK_MSI_IRQ_BASE 44 /* 44 - 47 MSI IRQ */
#define PIC_PCIE_LINK_MSI_IRQ(i) (44 + (i)) #define PIC_PCIE_LINK_MSI_IRQ(i) (44 + (i))
...@@ -66,8 +70,9 @@ ...@@ -66,8 +70,9 @@
#define PIC_PCIE_MSIX_IRQ_BASE 48 /* 48 - 51 MSI-X IRQ */ #define PIC_PCIE_MSIX_IRQ_BASE 48 /* 48 - 51 MSI-X IRQ */
#define PIC_PCIE_MSIX_IRQ(i) (48 + (i)) #define PIC_PCIE_MSIX_IRQ(i) (48 + (i))
#define NLM_MSIX_VEC_BASE 96 /* 96 - 127 - MSIX mapped */ /* XLP9xx and XLP8xx has 128 and 32 MSIX vectors respectively */
#define NLM_MSI_VEC_BASE 128 /* 128 -255 - MSI mapped */ #define NLM_MSIX_VEC_BASE 96 /* 96 - 223 - MSIX mapped */
#define NLM_MSI_VEC_BASE 224 /* 224 -351 - MSI mapped */
#define NLM_PIC_INDIRECT_VEC_BASE 512 #define NLM_PIC_INDIRECT_VEC_BASE 512
#define NLM_GPIO_VEC_BASE 768 #define NLM_GPIO_VEC_BASE 768
...@@ -95,17 +100,19 @@ void *xlp_dt_init(void *fdtp); ...@@ -95,17 +100,19 @@ void *xlp_dt_init(void *fdtp);
static inline int cpu_is_xlpii(void) static inline int cpu_is_xlpii(void)
{ {
int chip = read_c0_prid() & 0xff00; int chip = read_c0_prid() & PRID_IMP_MASK;
return chip == PRID_IMP_NETLOGIC_XLP2XX || return chip == PRID_IMP_NETLOGIC_XLP2XX ||
chip == PRID_IMP_NETLOGIC_XLP9XX; chip == PRID_IMP_NETLOGIC_XLP9XX ||
chip == PRID_IMP_NETLOGIC_XLP5XX;
} }
static inline int cpu_is_xlp9xx(void) static inline int cpu_is_xlp9xx(void)
{ {
int chip = read_c0_prid() & 0xff00; int chip = read_c0_prid() & PRID_IMP_MASK;
return chip == PRID_IMP_NETLOGIC_XLP9XX; return chip == PRID_IMP_NETLOGIC_XLP9XX ||
chip == PRID_IMP_NETLOGIC_XLP5XX;
} }
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_NLM_XLP_H */ #endif /* _ASM_NLM_XLP_H */
/* /*
* asm-mips/nile4.h -- NEC Vrc-5074 Nile 4 definitions * asm-mips/nile4.h -- NEC Vrc-5074 Nile 4 definitions
* *
* Copyright (C) 2000 Geert Uytterhoeven <geert@sonycom.com> * Copyright (C) 2000 Geert Uytterhoeven <geert@linux-m68k.org>
* Sony Software Development Center Europe (SDCE), Brussels * Sony Software Development Center Europe (SDCE), Brussels
* *
* This file is based on the following documentation: * This file is based on the following documentation:
......
...@@ -211,7 +211,6 @@ union octeon_cvmemctl { ...@@ -211,7 +211,6 @@ union octeon_cvmemctl {
extern void octeon_write_lcd(const char *s); extern void octeon_write_lcd(const char *s);
extern void octeon_check_cpu_bist(void); extern void octeon_check_cpu_bist(void);
extern int octeon_get_boot_debug_flag(void);
extern int octeon_get_boot_uart(void); extern int octeon_get_boot_uart(void);
struct uart_port; struct uart_port;
......
...@@ -32,6 +32,8 @@ struct vm_area_struct; ...@@ -32,6 +32,8 @@ struct vm_area_struct;
_page_cachable_default) _page_cachable_default)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _page_cachable_default) _PAGE_GLOBAL | _page_cachable_default)
#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \ #define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
_page_cachable_default) _page_cachable_default)
#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
......
/*
* Copyright (C) 2014 Imagination Technologies
* Author: Paul Burton <paul.burton@imgtec.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __MIPS_ASM_PM_CPS_H__
#define __MIPS_ASM_PM_CPS_H__
/*
* The CM & CPC can only handle coherence & power control on a per-core basis,
* thus in an MT system the VPEs within each core are coupled and can only
* enter or exit states requiring CM or CPC assistance in unison.
*/
#ifdef CONFIG_MIPS_MT
# define coupled_coherence cpu_has_mipsmt
#else
# define coupled_coherence 0
#endif
/* Enumeration of possible PM states */
enum cps_pm_state {
CPS_PM_NC_WAIT, /* MIPS wait instruction, non-coherent */
CPS_PM_CLOCK_GATED, /* Core clock gated */
CPS_PM_POWER_GATED, /* Core power gated */
CPS_PM_STATE_COUNT,
};
/**
* cps_pm_support_state - determine whether the system supports a PM state
* @state: the state to test for support
*
* Returns true if the system supports the given state, otherwise false.
*/
extern bool cps_pm_support_state(enum cps_pm_state state);
/**
* cps_pm_enter_state - enter a PM state
* @state: the state to enter
*
* Enter the given PM state. If coupled_coherence is non-zero then it is
* expected that this function be called at approximately the same time on
* each coupled CPU. Returns 0 on successful entry & exit, otherwise -errno.
*/
extern int cps_pm_enter_state(enum cps_pm_state state);
#endif /* __MIPS_ASM_PM_CPS_H__ */
/*
* Copyright (C) 2014 Imagination Technologies Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* PM helper macros for CPU power off (e.g. Suspend-to-RAM).
*/
#ifndef __ASM_PM_H
#define __ASM_PM_H
#ifdef __ASSEMBLY__
#include <asm/asm-offsets.h>
#include <asm/asm.h>
#include <asm/mipsregs.h>
#include <asm/regdef.h>
/* Save CPU state to stack for suspend to RAM */
.macro SUSPEND_SAVE_REGS
subu sp, PT_SIZE
/* Call preserved GPRs */
LONG_S $16, PT_R16(sp)
LONG_S $17, PT_R17(sp)
LONG_S $18, PT_R18(sp)
LONG_S $19, PT_R19(sp)
LONG_S $20, PT_R20(sp)
LONG_S $21, PT_R21(sp)
LONG_S $22, PT_R22(sp)
LONG_S $23, PT_R23(sp)
LONG_S $28, PT_R28(sp)
LONG_S $30, PT_R30(sp)
LONG_S $31, PT_R31(sp)
/* A couple of CP0 registers with space in pt_regs */
mfc0 k0, CP0_STATUS
LONG_S k0, PT_STATUS(sp)
.endm
/* Restore CPU state from stack after resume from RAM */
.macro RESUME_RESTORE_REGS_RETURN
.set push
.set noreorder
/* A couple of CP0 registers with space in pt_regs */
LONG_L k0, PT_STATUS(sp)
mtc0 k0, CP0_STATUS
/* Call preserved GPRs */
LONG_L $16, PT_R16(sp)
LONG_L $17, PT_R17(sp)
LONG_L $18, PT_R18(sp)
LONG_L $19, PT_R19(sp)
LONG_L $20, PT_R20(sp)
LONG_L $21, PT_R21(sp)
LONG_L $22, PT_R22(sp)
LONG_L $23, PT_R23(sp)
LONG_L $28, PT_R28(sp)
LONG_L $30, PT_R30(sp)
LONG_L $31, PT_R31(sp)
/* Pop and return */
jr ra
addiu sp, PT_SIZE
.set pop
.endm
/* Get address of static suspend state into t1 */
.macro LA_STATIC_SUSPEND
la t1, mips_static_suspend_state
.endm
/* Save important CPU state for early restoration to global data */
.macro SUSPEND_SAVE_STATIC
#ifdef CONFIG_EVA
/*
* Segment configuration is saved in global data where it can be easily
* reloaded without depending on the segment configuration.
*/
mfc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */
LONG_S k0, SSS_SEGCTL0(t1)
mfc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */
LONG_S k0, SSS_SEGCTL1(t1)
mfc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */
LONG_S k0, SSS_SEGCTL2(t1)
#endif
/* save stack pointer (pointing to GPRs) */
LONG_S sp, SSS_SP(t1)
.endm
/* Restore important CPU state early from global data */
.macro RESUME_RESTORE_STATIC
#ifdef CONFIG_EVA
/*
* Segment configuration must be restored prior to any access to
* allocated memory, as it may reside outside of the legacy kernel
* segments.
*/
LONG_L k0, SSS_SEGCTL0(t1)
mtc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */
LONG_L k0, SSS_SEGCTL1(t1)
mtc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */
LONG_L k0, SSS_SEGCTL2(t1)
mtc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */
tlbw_use_hazard
#endif
/* restore stack pointer (pointing to GPRs) */
LONG_L sp, SSS_SP(t1)
.endm
/* flush caches to make sure context has reached memory */
.macro SUSPEND_CACHE_FLUSH
.extern __wback_cache_all
.set push
.set noreorder
la t1, __wback_cache_all
LONG_L t0, 0(t1)
jalr t0
nop
.set pop
.endm
/* Save suspend state and flush data caches to RAM */
.macro SUSPEND_SAVE
SUSPEND_SAVE_REGS
LA_STATIC_SUSPEND
SUSPEND_SAVE_STATIC
SUSPEND_CACHE_FLUSH
.endm
/* Restore saved state after resume from RAM and return */
.macro RESUME_RESTORE_RETURN
LA_STATIC_SUSPEND
RESUME_RESTORE_STATIC
RESUME_RESTORE_REGS_RETURN
.endm
#else /* __ASSEMBLY__ */
/**
* struct mips_static_suspend_state - Core saved CPU state across S2R.
* @segctl: CP0 Segment control registers.
* @sp: Stack frame where GP register context is saved.
*
* This structure contains minimal CPU state that must be saved in static kernel
* data in order to be able to restore the rest of the state. This includes
* segmentation configuration in the case of EVA being enabled, as they must be
* restored prior to any kmalloc'd memory being referenced (even the stack
* pointer).
*/
struct mips_static_suspend_state {
#ifdef CONFIG_EVA
unsigned long segctl[3];
#endif
unsigned long sp;
};
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PM_HELPERS_H */
...@@ -39,9 +39,6 @@ struct pt_regs { ...@@ -39,9 +39,6 @@ struct pt_regs {
unsigned long cp0_badvaddr; unsigned long cp0_badvaddr;
unsigned long cp0_cause; unsigned long cp0_cause;
unsigned long cp0_epc; unsigned long cp0_epc;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long cp0_tcstatus;
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_CPU_CAVIUM_OCTEON #ifdef CONFIG_CPU_CAVIUM_OCTEON
unsigned long long mpl[3]; /* MTM{0,1,2} */ unsigned long long mpl[3]; /* MTM{0,1,2} */
unsigned long long mtp[3]; /* MTP{0,1,2} */ unsigned long long mtp[3]; /* MTP{0,1,2} */
......
...@@ -43,11 +43,10 @@ ...@@ -43,11 +43,10 @@
: "i" (op), "R" (*(unsigned char *)(addr))) : "i" (op), "R" (*(unsigned char *)(addr)))
#ifdef CONFIG_MIPS_MT #ifdef CONFIG_MIPS_MT
/* /*
* Temporary hacks for SMTC debug. Optionally force single-threaded * Optionally force single-threaded execution during I-cache flushes.
* execution during I-cache flushes.
*/ */
#define PROTECT_CACHE_FLUSHES 1 #define PROTECT_CACHE_FLUSHES 1
#ifdef PROTECT_CACHE_FLUSHES #ifdef PROTECT_CACHE_FLUSHES
...@@ -524,6 +523,8 @@ __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, ...@@ -524,6 +523,8 @@ __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32,
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, ) __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, ) __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, ) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, ) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, ) __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
......
...@@ -69,6 +69,8 @@ ...@@ -69,6 +69,8 @@
#define SGI_EISA_IRQ SGINT_LOCAL2 + 3 /* EISA interrupts */ #define SGI_EISA_IRQ SGINT_LOCAL2 + 3 /* EISA interrupts */
#define SGI_KEYBD_IRQ SGINT_LOCAL2 + 4 /* keyboard */ #define SGI_KEYBD_IRQ SGINT_LOCAL2 + 4 /* keyboard */
#define SGI_SERIAL_IRQ SGINT_LOCAL2 + 5 /* onboard serial */ #define SGI_SERIAL_IRQ SGINT_LOCAL2 + 5 /* onboard serial */
#define SGI_GIOEXP0_IRQ (SGINT_LOCAL2 + 6) /* Indy GIO EXP0 */
#define SGI_GIOEXP1_IRQ (SGINT_LOCAL2 + 7) /* Indy GIO EXP1 */
#define ip22_is_fullhouse() (sgioc->sysid & SGIOC_SYSID_FULLHOUSE) #define ip22_is_fullhouse() (sgioc->sysid & SGIOC_SYSID_FULLHOUSE)
......
...@@ -13,17 +13,28 @@ ...@@ -13,17 +13,28 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct boot_config { struct vpe_boot_config {
unsigned int core;
unsigned int vpe;
unsigned long pc; unsigned long pc;
unsigned long sp; unsigned long sp;
unsigned long gp; unsigned long gp;
}; };
extern struct boot_config mips_cps_bootcfg; struct core_boot_config {
atomic_t vpe_mask;
struct vpe_boot_config *vpe_config;
};
extern struct core_boot_config *mips_cps_core_bootcfg;
extern void mips_cps_core_entry(void); extern void mips_cps_core_entry(void);
extern void mips_cps_core_init(void);
extern struct vpe_boot_config *mips_cps_boot_vpes(void);
extern bool mips_cps_smp_in_use(void);
extern void mips_cps_pm_save(void);
extern void mips_cps_pm_restore(void);
#else /* __ASSEMBLY__ */ #else /* __ASSEMBLY__ */
......
...@@ -26,7 +26,6 @@ struct plat_smp_ops { ...@@ -26,7 +26,6 @@ struct plat_smp_ops {
void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action); void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
void (*init_secondary)(void); void (*init_secondary)(void);
void (*smp_finish)(void); void (*smp_finish)(void);
void (*cpus_done)(void);
void (*boot_secondary)(int cpu, struct task_struct *idle); void (*boot_secondary)(int cpu, struct task_struct *idle);
void (*smp_setup)(void); void (*smp_setup)(void);
void (*prepare_cpus)(unsigned int max_cpus); void (*prepare_cpus)(unsigned int max_cpus);
......
...@@ -46,6 +46,9 @@ extern int __cpu_logical_map[NR_CPUS]; ...@@ -46,6 +46,9 @@ extern int __cpu_logical_map[NR_CPUS];
extern volatile cpumask_t cpu_callin_map; extern volatile cpumask_t cpu_callin_map;
/* Mask of CPUs which are currently definitely operating coherently */
extern cpumask_t cpu_coherent_mask;
extern void asmlinkage smp_bootstrap(void); extern void asmlinkage smp_bootstrap(void);
/* /*
......
#ifndef _ASM_SMTC_MT_H
#define _ASM_SMTC_MT_H
/*
* Definitions for SMTC multitasking on MIPS MT cores
*/
#include <asm/mips_mt.h>
#include <asm/smtc_ipi.h>
/*
* System-wide SMTC status information
*/
extern unsigned int smtc_status;
#define SMTC_TLB_SHARED 0x00000001
#define SMTC_MTC_ACTIVE 0x00000002
/*
* TLB/ASID Management information
*/
#define MAX_SMTC_TLBS 2
#define MAX_SMTC_ASIDS 256
#if NR_CPUS <= 8
typedef char asiduse;
#else
#if NR_CPUS <= 16
typedef short asiduse;
#else
typedef long asiduse;
#endif
#endif
/*
* VPE Management information
*/
#define MAX_SMTC_VPES MAX_SMTC_TLBS /* FIXME: May not always be true. */
extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
struct mm_struct;
struct task_struct;
void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
void self_ipi(struct smtc_ipi *);
void smtc_flush_tlb_asid(unsigned long asid);
extern int smtc_build_cpu_map(int startslot);
extern void smtc_prepare_cpus(int cpus);
extern void smtc_smp_finish(void);
extern void smtc_boot_secondary(int cpu, struct task_struct *t);
extern void smtc_cpus_done(void);
extern void smtc_init_secondary(void);
/*
* Sharing the TLB between multiple VPEs means that the
* "random" index selection function is not allowed to
* select the current value of the Index register. To
* avoid additional TLB pressure, the Index registers
* are "parked" with an non-Valid value.
*/
#define PARKED_INDEX ((unsigned int)0x80000000)
/*
* Define low-level interrupt mask for IPIs, if necessary.
* By default, use SW interrupt 1, which requires no external
* hardware support, but which works only for single-core
* MIPS MT systems.
*/
#ifndef MIPS_CPU_IPI_IRQ
#define MIPS_CPU_IPI_IRQ 1
#endif
#endif /* _ASM_SMTC_MT_H */
/*
* Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code.
*/
#ifndef __ASM_SMTC_IPI_H
#define __ASM_SMTC_IPI_H
#include <linux/spinlock.h>
//#define SMTC_IPI_DEBUG
#ifdef SMTC_IPI_DEBUG
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#endif /* SMTC_IPI_DEBUG */
/*
* An IPI "message"
*/
struct smtc_ipi {
struct smtc_ipi *flink;
int type;
void *arg;
int dest;
#ifdef SMTC_IPI_DEBUG
int sender;
long stamp;
#endif /* SMTC_IPI_DEBUG */
};
/*
* Defined IPI Types
*/
#define LINUX_SMP_IPI 1
#define SMTC_CLOCK_TICK 2
#define IRQ_AFFINITY_IPI 3
/*
* A queue of IPI messages
*/
struct smtc_ipi_q {
struct smtc_ipi *head;
spinlock_t lock;
struct smtc_ipi *tail;
int depth;
int resched_flag; /* reschedule already queued */
};
static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (q->head == NULL)
q->head = q->tail = p;
else
q->tail->flink = p;
p->flink = NULL;
q->tail = p;
q->depth++;
#ifdef SMTC_IPI_DEBUG
p->sender = read_c0_tcbind();
p->stamp = read_c0_count();
#endif /* SMTC_IPI_DEBUG */
spin_unlock_irqrestore(&q->lock, flags);
}
static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q)
{
struct smtc_ipi *p;
if (q->head == NULL)
p = NULL;
else {
p = q->head;
q->head = q->head->flink;
q->depth--;
/* Arguably unnecessary, but leaves queue cleaner */
if (q->head == NULL)
q->tail = NULL;
}
return p;
}
static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q)
{
unsigned long flags;
struct smtc_ipi *p;
spin_lock_irqsave(&q->lock, flags);
p = __smtc_ipi_dq(q);
spin_unlock_irqrestore(&q->lock, flags);
return p;
}
static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (q->head == NULL) {
q->head = q->tail = p;
p->flink = NULL;
} else {
p->flink = q->head;
q->head = p;
}
q->depth++;
spin_unlock_irqrestore(&q->lock, flags);
}
static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&q->lock, flags);
retval = q->depth;
spin_unlock_irqrestore(&q->lock, flags);
return retval;
}
extern void smtc_send_ipi(int cpu, int type, unsigned int action);
#endif /* __ASM_SMTC_IPI_H */
/*
* Definitions for SMTC /proc entries
* Copyright(C) 2005 MIPS Technologies Inc.
*/
#ifndef __ASM_SMTC_PROC_H
#define __ASM_SMTC_PROC_H
/*
* per-"CPU" statistics
*/
struct smtc_cpu_proc {
unsigned long timerints;
unsigned long selfipis;
};
extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
/* Count of number of recoveries of "stolen" FPU access rights on 34K */
extern atomic_t smtc_fpu_recoveries;
#endif /* __ASM_SMTC_PROC_H */
...@@ -19,22 +19,12 @@ ...@@ -19,22 +19,12 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
/* #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
* For SMTC kernel, global IE should be left set, and interrupts
* controlled exclusively via IXMT.
*/
#ifdef CONFIG_MIPS_MT_SMTC
#define STATMASK 0x1e
#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
#define STATMASK 0x3f #define STATMASK 0x3f
#else #else
#define STATMASK 0x1f #define STATMASK 0x1f
#endif #endif
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif /* CONFIG_MIPS_MT_SMTC */
.macro SAVE_AT .macro SAVE_AT
.set push .set push
.set noat .set noat
...@@ -186,16 +176,6 @@ ...@@ -186,16 +176,6 @@
mfc0 v1, CP0_STATUS mfc0 v1, CP0_STATUS
LONG_S $2, PT_R2(sp) LONG_S $2, PT_R2(sp)
LONG_S v1, PT_STATUS(sp) LONG_S v1, PT_STATUS(sp)
#ifdef CONFIG_MIPS_MT_SMTC
/*
* Ideally, these instructions would be shuffled in
* to cover the pipeline delay.
*/
.set mips32
mfc0 k0, CP0_TCSTATUS
.set mips0
LONG_S k0, PT_TCSTATUS(sp)
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_S $4, PT_R4(sp) LONG_S $4, PT_R4(sp)
mfc0 v1, CP0_CAUSE mfc0 v1, CP0_CAUSE
LONG_S $5, PT_R5(sp) LONG_S $5, PT_R5(sp)
...@@ -321,36 +301,6 @@ ...@@ -321,36 +301,6 @@
.set push .set push
.set reorder .set reorder
.set noat .set noat
#ifdef CONFIG_MIPS_MT_SMTC
.set mips32r2
/*
* We need to make sure the read-modify-write
* of Status below isn't perturbed by an interrupt
* or cross-TC access, so we need to do at least a DMT,
* protected by an interrupt-inhibit. But setting IXMT
* also creates a few-cycle window where an IPI could
* be queued and not be detected before potentially
* returning to a WAIT or user-mode loop. It must be
* replayed.
*
* We're in the middle of a context switch, and
* we can't dispatch it directly without trashing
* some registers, so we'll try to detect this unlikely
* case and program a software interrupt in the VPE,
* as would be done for a cross-VPE IPI. To accommodate
* the handling of that case, we're doing a DVPE instead
* of just a DMT here to protect against other threads.
* This is a lot of cruft to cover a tiny window.
* If you can find a better design, implement it!
*
*/
mfc0 v0, CP0_TCSTATUS
ori v0, TCSTATUS_IXMT
mtc0 v0, CP0_TCSTATUS
_ehb
DVPE 5 # dvpe a1
jal mips_ihb
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 a0, CP0_STATUS mfc0 a0, CP0_STATUS
ori a0, STATMASK ori a0, STATMASK
xori a0, STATMASK xori a0, STATMASK
...@@ -362,59 +312,6 @@ ...@@ -362,59 +312,6 @@
and v0, v1 and v0, v1
or v0, a0 or v0, a0
mtc0 v0, CP0_STATUS mtc0 v0, CP0_STATUS
#ifdef CONFIG_MIPS_MT_SMTC
/*
* Only after EXL/ERL have been restored to status can we
* restore TCStatus.IXMT.
*/
LONG_L v1, PT_TCSTATUS(sp)
_ehb
mfc0 a0, CP0_TCSTATUS
andi v1, TCSTATUS_IXMT
bnez v1, 0f
/*
* We'd like to detect any IPIs queued in the tiny window
* above and request an software interrupt to service them
* when we ERET.
*
* Computing the offset into the IPIQ array of the executing
* TC's IPI queue in-line would be tedious. We use part of
* the TCContext register to hold 16 bits of offset that we
* can add in-line to find the queue head.
*/
mfc0 v0, CP0_TCCONTEXT
la a2, IPIQ
srl v0, v0, 16
addu a2, a2, v0
LONG_L v0, 0(a2)
beqz v0, 0f
/*
* If we have a queue, provoke dispatch within the VPE by setting C_SW1
*/
mfc0 v0, CP0_CAUSE
ori v0, v0, C_SW1
mtc0 v0, CP0_CAUSE
0:
/*
* This test should really never branch but
* let's be prudent here. Having atomized
* the shared register modifications, we can
* now EVPE, and must do so before interrupts
* are potentially re-enabled.
*/
andi a1, a1, MVPCONTROL_EVP
beqz a1, 1f
evpe
1:
/* We know that TCStatua.IXMT should be set from above */
xori a0, a0, TCSTATUS_IXMT
or a0, a0, v1
mtc0 a0, CP0_TCSTATUS
_ehb
.set mips0
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_L v1, PT_EPC(sp) LONG_L v1, PT_EPC(sp)
MTC0 v1, CP0_EPC MTC0 v1, CP0_EPC
LONG_L $31, PT_R31(sp) LONG_L $31, PT_R31(sp)
...@@ -467,33 +364,11 @@ ...@@ -467,33 +364,11 @@
* Set cp0 enable bit as sign that we're running on the kernel stack * Set cp0 enable bit as sign that we're running on the kernel stack
*/ */
.macro CLI .macro CLI
#if !defined(CONFIG_MIPS_MT_SMTC)
mfc0 t0, CP0_STATUS mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | STATMASK li t1, ST0_CU0 | STATMASK
or t0, t1 or t0, t1
xori t0, STATMASK xori t0, STATMASK
mtc0 t0, CP0_STATUS mtc0 t0, CP0_STATUS
#else /* CONFIG_MIPS_MT_SMTC */
/*
* For SMTC, we need to set privilege
* and disable interrupts only for the
* current TC, using the TCStatus register.
*/
mfc0 t0, CP0_TCSTATUS
/* Fortunately CU 0 is in the same place in both registers */
/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
li t1, ST0_CU0 | 0x08001c00
or t0, t1
/* Clear TKSU, leave IXMT */
xori t0, 0x00001800
mtc0 t0, CP0_TCSTATUS
_ehb
/* We need to leave the global IE bit set, but clear EXL...*/
mfc0 t0, CP0_STATUS
ori t0, ST0_EXL | ST0_ERL
xori t0, ST0_EXL | ST0_ERL
mtc0 t0, CP0_STATUS
#endif /* CONFIG_MIPS_MT_SMTC */
irq_disable_hazard irq_disable_hazard
.endm .endm
...@@ -502,35 +377,11 @@ ...@@ -502,35 +377,11 @@
* Set cp0 enable bit as sign that we're running on the kernel stack * Set cp0 enable bit as sign that we're running on the kernel stack
*/ */
.macro STI .macro STI
#if !defined(CONFIG_MIPS_MT_SMTC)
mfc0 t0, CP0_STATUS mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | STATMASK li t1, ST0_CU0 | STATMASK
or t0, t1 or t0, t1
xori t0, STATMASK & ~1 xori t0, STATMASK & ~1
mtc0 t0, CP0_STATUS mtc0 t0, CP0_STATUS
#else /* CONFIG_MIPS_MT_SMTC */
/*
* For SMTC, we need to set privilege
* and enable interrupts only for the
* current TC, using the TCStatus register.
*/
_ehb
mfc0 t0, CP0_TCSTATUS
/* Fortunately CU 0 is in the same place in both registers */
/* Set TCU0, TKSU (for later inversion) and IXMT */
li t1, ST0_CU0 | 0x08001c00
or t0, t1
/* Clear TKSU *and* IXMT */
xori t0, 0x00001c00
mtc0 t0, CP0_TCSTATUS
_ehb
/* We need to leave the global IE bit set, but clear EXL...*/
mfc0 t0, CP0_STATUS
ori t0, ST0_EXL
xori t0, ST0_EXL
mtc0 t0, CP0_STATUS
/* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
#endif /* CONFIG_MIPS_MT_SMTC */
irq_enable_hazard irq_enable_hazard
.endm .endm
...@@ -540,32 +391,6 @@ ...@@ -540,32 +391,6 @@
* Set cp0 enable bit as sign that we're running on the kernel stack * Set cp0 enable bit as sign that we're running on the kernel stack
*/ */
.macro KMODE .macro KMODE
#ifdef CONFIG_MIPS_MT_SMTC
/*
* This gets baroque in SMTC. We want to
* protect the non-atomic clearing of EXL
* with DMT/EMT, but we don't want to take
* an interrupt while DMT is still in effect.
*/
/* KMODE gets invoked from both reorder and noreorder code */
.set push
.set mips32r2
.set noreorder
mfc0 v0, CP0_TCSTATUS
andi v1, v0, TCSTATUS_IXMT
ori v0, TCSTATUS_IXMT
mtc0 v0, CP0_TCSTATUS
_ehb
DMT 2 # dmt v0
/*
* We don't know a priori if ra is "live"
*/
move t0, ra
jal mips_ihb
nop /* delay slot */
move ra, t0
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 t0, CP0_STATUS mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | (STATMASK & ~1) li t1, ST0_CU0 | (STATMASK & ~1)
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
...@@ -576,25 +401,6 @@ ...@@ -576,25 +401,6 @@
or t0, t1 or t0, t1
xori t0, STATMASK & ~1 xori t0, STATMASK & ~1
mtc0 t0, CP0_STATUS mtc0 t0, CP0_STATUS
#ifdef CONFIG_MIPS_MT_SMTC
_ehb
andi v0, v0, VPECONTROL_TE
beqz v0, 2f
nop /* delay slot */
emt
2:
mfc0 v0, CP0_TCSTATUS
/* Clear IXMT, then OR in previous value */
ori v0, TCSTATUS_IXMT
xori v0, TCSTATUS_IXMT
or v0, v1, v0
mtc0 v0, CP0_TCSTATUS
/*
* irq_disable_hazard below should expand to EHB
* on 24K/34K CPUS
*/
.set pop
#endif /* CONFIG_MIPS_MT_SMTC */
irq_disable_hazard irq_disable_hazard
.endm .endm
......
...@@ -159,11 +159,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -159,11 +159,7 @@ static inline struct thread_info *current_thread_info(void)
* We stash processor id into a COP0 register to retrieve it fast * We stash processor id into a COP0 register to retrieve it fast
* at kernel exception entry. * at kernel exception entry.
*/ */
#if defined(CONFIG_MIPS_MT_SMTC) #if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
#define SMP_CPUID_REG 2, 2 /* TCBIND */
#define ASM_SMP_CPUID_REG $2, 2
#define SMP_CPUID_PTRSHIFT 19
#elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
#define SMP_CPUID_REG 20, 0 /* XCONTEXT */ #define SMP_CPUID_REG 20, 0 /* XCONTEXT */
#define ASM_SMP_CPUID_REG $20 #define ASM_SMP_CPUID_REG $20
#define SMP_CPUID_PTRSHIFT 48 #define SMP_CPUID_PTRSHIFT 48
...@@ -179,13 +175,8 @@ static inline struct thread_info *current_thread_info(void) ...@@ -179,13 +175,8 @@ static inline struct thread_info *current_thread_info(void)
#define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2) #define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2)
#endif #endif
#ifdef CONFIG_MIPS_MT_SMTC
#define ASM_CPUID_MFC0 mfc0
#define UASM_i_CPUID_MFC0 uasm_i_mfc0
#else
#define ASM_CPUID_MFC0 MFC0 #define ASM_CPUID_MFC0 MFC0
#define UASM_i_CPUID_MFC0 UASM_i_MFC0 #define UASM_i_CPUID_MFC0 UASM_i_MFC0
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */ #endif /* _ASM_THREAD_INFO_H */
...@@ -52,14 +52,11 @@ extern int (*perf_irq)(void); ...@@ -52,14 +52,11 @@ extern int (*perf_irq)(void);
*/ */
extern unsigned int __weak get_c0_compare_int(void); extern unsigned int __weak get_c0_compare_int(void);
extern int r4k_clockevent_init(void); extern int r4k_clockevent_init(void);
extern int smtc_clockevent_init(void);
extern int gic_clockevent_init(void); extern int gic_clockevent_init(void);
static inline int mips_clockevent_init(void) static inline int mips_clockevent_init(void)
{ {
#ifdef CONFIG_MIPS_MT_SMTC #if defined(CONFIG_CEVT_GIC)
return smtc_clockevent_init();
#elif defined(CONFIG_CEVT_GIC)
return (gic_clockevent_init() | r4k_clockevent_init()); return (gic_clockevent_init() | r4k_clockevent_init());
#elif defined(CONFIG_CEVT_R4K) #elif defined(CONFIG_CEVT_R4K)
return r4k_clockevent_init(); return r4k_clockevent_init();
......
...@@ -4,12 +4,16 @@ ...@@ -4,12 +4,16 @@
* for more details. * for more details.
* *
* Copyright (C) 1998, 1999, 2003 by Ralf Baechle * Copyright (C) 1998, 1999, 2003 by Ralf Baechle
* Copyright (C) 2014 by Maciej W. Rozycki
*/ */
#ifndef _ASM_TIMEX_H #ifndef _ASM_TIMEX_H
#define _ASM_TIMEX_H #define _ASM_TIMEX_H
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/compiler.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
#include <asm/cpu-type.h> #include <asm/cpu-type.h>
...@@ -45,29 +49,54 @@ typedef unsigned int cycles_t; ...@@ -45,29 +49,54 @@ typedef unsigned int cycles_t;
* However for now the implementaton of this function doesn't get these * However for now the implementaton of this function doesn't get these
* fine details right. * fine details right.
*/ */
static inline cycles_t get_cycles(void) static inline int can_use_mips_counter(unsigned int prid)
{ {
switch (boot_cpu_type()) { int comp = (prid & PRID_COMP_MASK) != PRID_COMP_LEGACY;
case CPU_R4400PC:
case CPU_R4400SC:
case CPU_R4400MC:
if ((read_c0_prid() & 0xff) >= 0x0050)
return read_c0_count();
break;
case CPU_R4000PC: if (__builtin_constant_p(cpu_has_counter) && !cpu_has_counter)
case CPU_R4000SC: return 0;
case CPU_R4000MC: else if (__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r)
break; return 1;
else if (likely(!__builtin_constant_p(cpu_has_mips_r) && comp))
return 1;
/* Make sure we don't peek at cpu_data[0].options in the fast path! */
if (!__builtin_constant_p(cpu_has_counter))
asm volatile("" : "=m" (cpu_data[0].options));
if (likely(cpu_has_counter &&
prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0))))
return 1;
else
return 0;
}
default: static inline cycles_t get_cycles(void)
if (cpu_has_counter) {
return read_c0_count(); if (can_use_mips_counter(read_c0_prid()))
break; return read_c0_count();
} else
return 0; /* no usable counter */
}
/*
* Like get_cycles - but where c0_count is not available we desperately
* use c0_random in an attempt to get at least a little bit of entropy.
*
* R6000 and R6000A neither have a count register nor a random register.
* That leaves no entropy source in the CPU itself.
*/
static inline unsigned long random_get_entropy(void)
{
unsigned int prid = read_c0_prid();
unsigned int imp = prid & PRID_IMP_MASK;
return 0; /* no usable counter */ if (can_use_mips_counter(prid))
return read_c0_count();
else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A))
return read_c0_random();
else
return 0; /* no usable register */
} }
#define random_get_entropy random_get_entropy
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -55,6 +55,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) ...@@ -55,6 +55,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u2u1u3(op) \ #define Ip_u2u1u3(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u3u2u1(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u3u1u2(op) \ #define Ip_u3u1u2(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
...@@ -74,6 +77,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \ ...@@ -74,6 +77,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
#define Ip_u1u2(op) \ #define Ip_u1u2(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
#define Ip_u2u1(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
#define Ip_u1s2(op) \ #define Ip_u1s2(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, signed int b) void ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
...@@ -99,6 +105,7 @@ Ip_u2u1s3(_daddiu); ...@@ -99,6 +105,7 @@ Ip_u2u1s3(_daddiu);
Ip_u3u1u2(_daddu); Ip_u3u1u2(_daddu);
Ip_u2u1msbu3(_dins); Ip_u2u1msbu3(_dins);
Ip_u2u1msbu3(_dinsm); Ip_u2u1msbu3(_dinsm);
Ip_u1u2(_divu);
Ip_u1u2u3(_dmfc0); Ip_u1u2u3(_dmfc0);
Ip_u1u2u3(_dmtc0); Ip_u1u2u3(_dmtc0);
Ip_u2u1u3(_drotr); Ip_u2u1u3(_drotr);
...@@ -114,16 +121,22 @@ Ip_u2u1msbu3(_ext); ...@@ -114,16 +121,22 @@ Ip_u2u1msbu3(_ext);
Ip_u2u1msbu3(_ins); Ip_u2u1msbu3(_ins);
Ip_u1(_j); Ip_u1(_j);
Ip_u1(_jal); Ip_u1(_jal);
Ip_u2u1(_jalr);
Ip_u1(_jr); Ip_u1(_jr);
Ip_u2s3u1(_lb);
Ip_u2s3u1(_ld); Ip_u2s3u1(_ld);
Ip_u3u1u2(_ldx); Ip_u3u1u2(_ldx);
Ip_u2s3u1(_lh);
Ip_u2s3u1(_ll); Ip_u2s3u1(_ll);
Ip_u2s3u1(_lld); Ip_u2s3u1(_lld);
Ip_u1s2(_lui); Ip_u1s2(_lui);
Ip_u2s3u1(_lw); Ip_u2s3u1(_lw);
Ip_u3u1u2(_lwx); Ip_u3u1u2(_lwx);
Ip_u1u2u3(_mfc0); Ip_u1u2u3(_mfc0);
Ip_u1(_mfhi);
Ip_u1(_mflo);
Ip_u1u2u3(_mtc0); Ip_u1u2u3(_mtc0);
Ip_u3u1u2(_mul);
Ip_u3u1u2(_or); Ip_u3u1u2(_or);
Ip_u2u1u3(_ori); Ip_u2u1u3(_ori);
Ip_u2s3u1(_pref); Ip_u2s3u1(_pref);
...@@ -133,17 +146,25 @@ Ip_u2s3u1(_sc); ...@@ -133,17 +146,25 @@ Ip_u2s3u1(_sc);
Ip_u2s3u1(_scd); Ip_u2s3u1(_scd);
Ip_u2s3u1(_sd); Ip_u2s3u1(_sd);
Ip_u2u1u3(_sll); Ip_u2u1u3(_sll);
Ip_u3u2u1(_sllv);
Ip_u2u1s3(_sltiu);
Ip_u3u1u2(_sltu);
Ip_u2u1u3(_sra); Ip_u2u1u3(_sra);
Ip_u2u1u3(_srl); Ip_u2u1u3(_srl);
Ip_u3u2u1(_srlv);
Ip_u3u1u2(_subu); Ip_u3u1u2(_subu);
Ip_u2s3u1(_sw); Ip_u2s3u1(_sw);
Ip_u1(_sync);
Ip_u1(_syscall); Ip_u1(_syscall);
Ip_0(_tlbp); Ip_0(_tlbp);
Ip_0(_tlbr); Ip_0(_tlbr);
Ip_0(_tlbwi); Ip_0(_tlbwi);
Ip_0(_tlbwr); Ip_0(_tlbwr);
Ip_u1(_wait);
Ip_u2u1(_wsbh);
Ip_u3u1u2(_xor); Ip_u3u1u2(_xor);
Ip_u2u1u3(_xori); Ip_u2u1u3(_xori);
Ip_u2u1(_yield);
/* Handle labels. */ /* Handle labels. */
...@@ -264,6 +285,8 @@ void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg, ...@@ -264,6 +285,8 @@ void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid); unsigned int bit, int lid);
void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid); unsigned int bit, int lid);
void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1,
unsigned int r2, int lid);
void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
......
...@@ -4,6 +4,7 @@ include include/uapi/asm-generic/Kbuild.asm ...@@ -4,6 +4,7 @@ include include/uapi/asm-generic/Kbuild.asm
generic-y += auxvec.h generic-y += auxvec.h
generic-y += ipcbuf.h generic-y += ipcbuf.h
header-y += bitfield.h
header-y += bitsperlong.h header-y += bitsperlong.h
header-y += break.h header-y += break.h
header-y += byteorder.h header-y += byteorder.h
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2014 by Ralf Baechle <ralf@linux-mips.org>
*/
#ifndef __UAPI_ASM_BITFIELD_H
#define __UAPI_ASM_BITFIELD_H
/*
* * Damn ... bitfields depend from byteorder :-(
* */
#ifdef __MIPSEB__
#define __BITFIELD_FIELD(field, more) \
field; \
more
#elif defined(__MIPSEL__)
#define __BITFIELD_FIELD(field, more) \
more \
field;
#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */
#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
#endif
#endif /* __UAPI_ASM_BITFIELD_H */
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#ifndef _UAPI_ASM_INST_H #ifndef _UAPI_ASM_INST_H
#define _UAPI_ASM_INST_H #define _UAPI_ASM_INST_H
#include <asm/bitfield.h>
/* /*
* Major opcodes; before MIPS IV cop1x was called cop3. * Major opcodes; before MIPS IV cop1x was called cop3.
*/ */
...@@ -74,16 +76,17 @@ enum spec2_op { ...@@ -74,16 +76,17 @@ enum spec2_op {
enum spec3_op { enum spec3_op {
ext_op, dextm_op, dextu_op, dext_op, ext_op, dextm_op, dextu_op, dext_op,
ins_op, dinsm_op, dinsu_op, dins_op, ins_op, dinsm_op, dinsu_op, dins_op,
lx_op = 0x0a, lwle_op = 0x19, yield_op = 0x09, lx_op = 0x0a,
lwre_op = 0x1a, cachee_op = 0x1b, lwle_op = 0x19, lwre_op = 0x1a,
sbe_op = 0x1c, she_op = 0x1d, cachee_op = 0x1b, sbe_op = 0x1c,
sce_op = 0x1e, swe_op = 0x1f, she_op = 0x1d, sce_op = 0x1e,
bshfl_op = 0x20, swle_op = 0x21, swe_op = 0x1f, bshfl_op = 0x20,
swre_op = 0x22, prefe_op = 0x23, swle_op = 0x21, swre_op = 0x22,
dbshfl_op = 0x24, lbue_op = 0x28, prefe_op = 0x23, dbshfl_op = 0x24,
lhue_op = 0x29, lbe_op = 0x2c, lbue_op = 0x28, lhue_op = 0x29,
lhe_op = 0x2d, lle_op = 0x2e, lbe_op = 0x2c, lhe_op = 0x2d,
lwe_op = 0x2f, rdhwr_op = 0x3b lle_op = 0x2e, lwe_op = 0x2f,
rdhwr_op = 0x3b
}; };
/* /*
...@@ -125,7 +128,8 @@ enum bcop_op { ...@@ -125,7 +128,8 @@ enum bcop_op {
enum cop0_coi_func { enum cop0_coi_func {
tlbr_op = 0x01, tlbwi_op = 0x02, tlbr_op = 0x01, tlbwi_op = 0x02,
tlbwr_op = 0x06, tlbp_op = 0x08, tlbwr_op = 0x06, tlbp_op = 0x08,
rfe_op = 0x10, eret_op = 0x18 rfe_op = 0x10, eret_op = 0x18,
wait_op = 0x20,
}; };
/* /*
...@@ -201,6 +205,16 @@ enum lx_func { ...@@ -201,6 +205,16 @@ enum lx_func {
lbx_op = 0x16, lbx_op = 0x16,
}; };
/*
* BSHFL opcodes
*/
enum bshfl_func {
wsbh_op = 0x2,
dshd_op = 0x5,
seb_op = 0x10,
seh_op = 0x18,
};
/* /*
* (microMIPS) Major opcodes. * (microMIPS) Major opcodes.
*/ */
...@@ -244,17 +258,22 @@ enum mm_32i_minor_op { ...@@ -244,17 +258,22 @@ enum mm_32i_minor_op {
enum mm_32a_minor_op { enum mm_32a_minor_op {
mm_sll32_op = 0x000, mm_sll32_op = 0x000,
mm_ins_op = 0x00c, mm_ins_op = 0x00c,
mm_sllv32_op = 0x010,
mm_ext_op = 0x02c, mm_ext_op = 0x02c,
mm_pool32axf_op = 0x03c, mm_pool32axf_op = 0x03c,
mm_srl32_op = 0x040, mm_srl32_op = 0x040,
mm_sra_op = 0x080, mm_sra_op = 0x080,
mm_srlv32_op = 0x090,
mm_rotr_op = 0x0c0, mm_rotr_op = 0x0c0,
mm_lwxs_op = 0x118, mm_lwxs_op = 0x118,
mm_addu32_op = 0x150, mm_addu32_op = 0x150,
mm_subu32_op = 0x1d0, mm_subu32_op = 0x1d0,
mm_wsbh_op = 0x1ec,
mm_mul_op = 0x210,
mm_and_op = 0x250, mm_and_op = 0x250,
mm_or32_op = 0x290, mm_or32_op = 0x290,
mm_xor32_op = 0x310, mm_xor32_op = 0x310,
mm_sltu_op = 0x390,
}; };
/* /*
...@@ -294,15 +313,20 @@ enum mm_32axf_minor_op { ...@@ -294,15 +313,20 @@ enum mm_32axf_minor_op {
mm_mfc0_op = 0x003, mm_mfc0_op = 0x003,
mm_mtc0_op = 0x00b, mm_mtc0_op = 0x00b,
mm_tlbp_op = 0x00d, mm_tlbp_op = 0x00d,
mm_mfhi32_op = 0x035,
mm_jalr_op = 0x03c, mm_jalr_op = 0x03c,
mm_tlbr_op = 0x04d, mm_tlbr_op = 0x04d,
mm_mflo32_op = 0x075,
mm_jalrhb_op = 0x07c, mm_jalrhb_op = 0x07c,
mm_tlbwi_op = 0x08d, mm_tlbwi_op = 0x08d,
mm_tlbwr_op = 0x0cd, mm_tlbwr_op = 0x0cd,
mm_jalrs_op = 0x13c, mm_jalrs_op = 0x13c,
mm_jalrshb_op = 0x17c, mm_jalrshb_op = 0x17c,
mm_sync_op = 0x1ad,
mm_syscall_op = 0x22d, mm_syscall_op = 0x22d,
mm_wait_op = 0x24d,
mm_eret_op = 0x3cd, mm_eret_op = 0x3cd,
mm_divu_op = 0x5dc,
}; };
/* /*
...@@ -480,24 +504,6 @@ enum MIPS6e_i8_func { ...@@ -480,24 +504,6 @@ enum MIPS6e_i8_func {
*/ */
#define MM_NOP16 0x0c00 #define MM_NOP16 0x0c00
/*
* Damn ... bitfields depend from byteorder :-(
*/
#ifdef __MIPSEB__
#define __BITFIELD_FIELD(field, more) \
field; \
more
#elif defined(__MIPSEL__)
#define __BITFIELD_FIELD(field, more) \
more \
field;
#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */
#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
#endif
struct j_format { struct j_format {
__BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */ __BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
__BITFIELD_FIELD(unsigned int target : 26, __BITFIELD_FIELD(unsigned int target : 26,
......
#include <asm-generic/kvm_para.h> #ifndef _UAPI_ASM_MIPS_KVM_PARA_H
#define _UAPI_ASM_MIPS_KVM_PARA_H
#endif /* _UAPI_ASM_MIPS_KVM_PARA_H */
...@@ -14,9 +14,12 @@ ...@@ -14,9 +14,12 @@
/* /*
* We don't use int-l64.h for the kernel anymore but still use it for * We don't use int-l64.h for the kernel anymore but still use it for
* userspace to avoid code changes. * userspace to avoid code changes.
*
* However, some user programs (e.g. perf) may not want this. They can
* flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
*/ */
#ifndef __KERNEL__ #ifndef __KERNEL__
# if _MIPS_SZLONG == 64 # if _MIPS_SZLONG == 64 && !defined(__SANE_USERSPACE_TYPES__)
# include <asm-generic/int-l64.h> # include <asm-generic/int-l64.h>
# else # else
# include <asm-generic/int-ll64.h> # include <asm-generic/int-ll64.h>
......
...@@ -17,7 +17,6 @@ endif ...@@ -17,7 +17,6 @@ endif
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
obj-$(CONFIG_CEVT_GIC) += cevt-gic.o obj-$(CONFIG_CEVT_GIC) += cevt-gic.o
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
...@@ -42,7 +41,7 @@ obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o ...@@ -42,7 +41,7 @@ obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += r4k_fpu.o octeon_switch.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP_UP) += smp-up.o obj-$(CONFIG_SMP_UP) += smp-up.o
...@@ -50,7 +49,6 @@ obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o ...@@ -50,7 +49,6 @@ obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o
obj-$(CONFIG_MIPS_MT) += mips-mt.o obj-$(CONFIG_MIPS_MT) += mips-mt.o
obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
obj-$(CONFIG_MIPS_CMP) += smp-cmp.o obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
...@@ -107,6 +105,9 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o ...@@ -107,6 +105,9 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_MIPS_CM) += mips-cm.o obj-$(CONFIG_MIPS_CM) += mips-cm.o
obj-$(CONFIG_MIPS_CPC) += mips-cpc.o obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
obj-$(CONFIG_CPU_PM) += pm.o
obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o
# #
# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches # safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/kbuild.h> #include <linux/kbuild.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <asm/pm.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/smp-cps.h> #include <asm/smp-cps.h>
...@@ -64,9 +65,6 @@ void output_ptreg_defines(void) ...@@ -64,9 +65,6 @@ void output_ptreg_defines(void)
OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
OFFSET(PT_STATUS, pt_regs, cp0_status); OFFSET(PT_STATUS, pt_regs, cp0_status);
OFFSET(PT_CAUSE, pt_regs, cp0_cause); OFFSET(PT_CAUSE, pt_regs, cp0_cause);
#ifdef CONFIG_MIPS_MT_SMTC
OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus);
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_CPU_CAVIUM_OCTEON #ifdef CONFIG_CPU_CAVIUM_OCTEON
OFFSET(PT_MPL, pt_regs, mpl); OFFSET(PT_MPL, pt_regs, mpl);
OFFSET(PT_MTP, pt_regs, mtp); OFFSET(PT_MTP, pt_regs, mtp);
...@@ -404,6 +402,20 @@ void output_pbe_defines(void) ...@@ -404,6 +402,20 @@ void output_pbe_defines(void)
} }
#endif #endif
#ifdef CONFIG_CPU_PM
void output_pm_defines(void)
{
COMMENT(" PM offsets. ");
#ifdef CONFIG_EVA
OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]);
OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]);
OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]);
#endif
OFFSET(SSS_SP, mips_static_suspend_state, sp);
BLANK();
}
#endif
void output_kvm_defines(void) void output_kvm_defines(void)
{ {
COMMENT(" KVM/MIPS Specfic offsets. "); COMMENT(" KVM/MIPS Specfic offsets. ");
...@@ -472,10 +484,14 @@ void output_kvm_defines(void) ...@@ -472,10 +484,14 @@ void output_kvm_defines(void)
void output_cps_defines(void) void output_cps_defines(void)
{ {
COMMENT(" MIPS CPS offsets. "); COMMENT(" MIPS CPS offsets. ");
OFFSET(BOOTCFG_CORE, boot_config, core);
OFFSET(BOOTCFG_VPE, boot_config, vpe); OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask);
OFFSET(BOOTCFG_PC, boot_config, pc); OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config);
OFFSET(BOOTCFG_SP, boot_config, sp); DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config));
OFFSET(BOOTCFG_GP, boot_config, gp);
OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc);
OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp);
OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp);
DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config));
} }
#endif #endif
...@@ -48,6 +48,202 @@ int __isa_exception_epc(struct pt_regs *regs) ...@@ -48,6 +48,202 @@ int __isa_exception_epc(struct pt_regs *regs)
return epc; return epc;
} }
/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc)
{
union mips_instruction insn = (union mips_instruction)dec_insn.insn;
int bc_false = 0;
unsigned int fcr31;
unsigned int bit;
if (!cpu_has_mmips)
return 0;
switch (insn.mm_i_format.opcode) {
case mm_pool32a_op:
if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
mm_pool32axf_op) {
switch (insn.mm_i_format.simmediate >>
MM_POOL32A_MINOR_SHIFT) {
case mm_jalr_op:
case mm_jalrhb_op:
case mm_jalrs_op:
case mm_jalrshb_op:
if (insn.mm_i_format.rt != 0) /* Not mm_jr */
regs->regs[insn.mm_i_format.rt] =
regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
*contpc = regs->regs[insn.mm_i_format.rs];
return 1;
}
}
break;
case mm_pool32i_op:
switch (insn.mm_i_format.rt) {
case mm_bltzals_op:
case mm_bltzal_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
/* Fall through */
case mm_bltz_op:
if ((long)regs->regs[insn.mm_i_format.rs] < 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bgezals_op:
case mm_bgezal_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
/* Fall through */
case mm_bgez_op:
if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_blez_op:
if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bgtz_op:
if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bc2f_op:
case mm_bc1f_op:
bc_false = 1;
/* Fall through */
case mm_bc2t_op:
case mm_bc1t_op:
preempt_disable();
if (is_fpu_owner())
asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
if (bc_false)
fcr31 = ~fcr31;
bit = (insn.mm_i_format.rs >> 2);
bit += (bit != 0);
bit += 23;
if (fcr31 & (1 << bit))
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
}
break;
case mm_pool16c_op:
switch (insn.mm_i_format.rt) {
case mm_jalr16_op:
case mm_jalrs16_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
/* Fall through */
case mm_jr16_op:
*contpc = regs->regs[insn.mm_i_format.rs];
return 1;
}
break;
case mm_beqz16_op:
if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_b1_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
case mm_bnez16_op:
if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_b1_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
case mm_b16_op:
*contpc = regs->cp0_epc + dec_insn.pc_inc +
(insn.mm_b0_format.simmediate << 1);
return 1;
case mm_beq32_op:
if (regs->regs[insn.mm_i_format.rs] ==
regs->regs[insn.mm_i_format.rt])
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bne32_op:
if (regs->regs[insn.mm_i_format.rs] !=
regs->regs[insn.mm_i_format.rt])
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
case mm_jalx32_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
*contpc = regs->cp0_epc + dec_insn.pc_inc;
*contpc >>= 28;
*contpc <<= 28;
*contpc |= (insn.j_format.target << 2);
return 1;
case mm_jals32_op:
case mm_jal32_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
/* Fall through */
case mm_j32_op:
*contpc = regs->cp0_epc + dec_insn.pc_inc;
*contpc >>= 27;
*contpc <<= 27;
*contpc |= (insn.j_format.target << 1);
set_isa16_mode(*contpc);
return 1;
}
return 0;
}
/* /*
* Compute return address and emulate branch in microMIPS mode after an * Compute return address and emulate branch in microMIPS mode after an
* exception only. It does not handle compact branches/jumps and cannot * exception only. It does not handle compact branches/jumps and cannot
...@@ -366,7 +562,11 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, ...@@ -366,7 +562,11 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case cop1_op: case cop1_op:
preempt_disable(); preempt_disable();
if (is_fpu_owner()) if (is_fpu_owner())
asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); asm volatile(
".set push\n"
"\t.set mips1\n"
"\tcfc1\t%0,$31\n"
"\t.set pop" : "=r" (fcr31));
else else
fcr31 = current->thread.fpu.fcr31; fcr31 = current->thread.fpu.fcr31;
preempt_enable(); preempt_enable();
......
...@@ -26,7 +26,7 @@ static int gic_next_event(unsigned long delta, struct clock_event_device *evt) ...@@ -26,7 +26,7 @@ static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
cnt = gic_read_count(); cnt = gic_read_count();
cnt += (u64)delta; cnt += (u64)delta;
gic_write_compare(cnt); gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask));
res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
return res; return res;
} }
...@@ -73,7 +73,8 @@ int gic_clockevent_init(void) ...@@ -73,7 +73,8 @@ int gic_clockevent_init(void)
cd = &per_cpu(gic_clockevent_device, cpu); cd = &per_cpu(gic_clockevent_device, cpu);
cd->name = "MIPS GIC"; cd->name = "MIPS GIC";
cd->features = CLOCK_EVT_FEAT_ONESHOT; cd->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP;
clockevent_set_clock(cd, gic_frequency); clockevent_set_clock(cd, gic_frequency);
......
...@@ -12,17 +12,10 @@ ...@@ -12,17 +12,10 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/cevt-r4k.h> #include <asm/cevt-r4k.h>
#include <asm/gic.h> #include <asm/gic.h>
/*
* The SMTC Kernel for the 34K, 1004K, et. al. replaces several
* of these routines with SMTC-specific variants.
*/
#ifndef CONFIG_MIPS_MT_SMTC
static int mips_next_event(unsigned long delta, static int mips_next_event(unsigned long delta,
struct clock_event_device *evt) struct clock_event_device *evt)
{ {
...@@ -36,8 +29,6 @@ static int mips_next_event(unsigned long delta, ...@@ -36,8 +29,6 @@ static int mips_next_event(unsigned long delta,
return res; return res;
} }
#endif /* CONFIG_MIPS_MT_SMTC */
void mips_set_clock_mode(enum clock_event_mode mode, void mips_set_clock_mode(enum clock_event_mode mode,
struct clock_event_device *evt) struct clock_event_device *evt)
{ {
...@@ -47,7 +38,6 @@ void mips_set_clock_mode(enum clock_event_mode mode, ...@@ -47,7 +38,6 @@ void mips_set_clock_mode(enum clock_event_mode mode,
DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
int cp0_timer_irq_installed; int cp0_timer_irq_installed;
#ifndef CONFIG_MIPS_MT_SMTC
irqreturn_t c0_compare_interrupt(int irq, void *dev_id) irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{ {
const int r2 = cpu_has_mips_r2; const int r2 = cpu_has_mips_r2;
...@@ -72,9 +62,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id) ...@@ -72,9 +62,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
/* Clear Count/Compare Interrupt */ /* Clear Count/Compare Interrupt */
write_c0_compare(read_c0_compare()); write_c0_compare(read_c0_compare());
cd = &per_cpu(mips_clockevent_device, cpu); cd = &per_cpu(mips_clockevent_device, cpu);
#ifdef CONFIG_CEVT_GIC
if (!gic_present)
#endif
cd->event_handler(cd); cd->event_handler(cd);
} }
...@@ -82,8 +69,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id) ...@@ -82,8 +69,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
#endif /* Not CONFIG_MIPS_MT_SMTC */
struct irqaction c0_compare_irqaction = { struct irqaction c0_compare_irqaction = {
.handler = c0_compare_interrupt, .handler = c0_compare_interrupt,
.flags = IRQF_PERCPU | IRQF_TIMER, .flags = IRQF_PERCPU | IRQF_TIMER,
...@@ -170,7 +155,6 @@ int c0_compare_int_usable(void) ...@@ -170,7 +155,6 @@ int c0_compare_int_usable(void)
return 1; return 1;
} }
#ifndef CONFIG_MIPS_MT_SMTC
int r4k_clockevent_init(void) int r4k_clockevent_init(void)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
...@@ -195,7 +179,9 @@ int r4k_clockevent_init(void) ...@@ -195,7 +179,9 @@ int r4k_clockevent_init(void)
cd = &per_cpu(mips_clockevent_device, cpu); cd = &per_cpu(mips_clockevent_device, cpu);
cd->name = "MIPS"; cd->name = "MIPS";
cd->features = CLOCK_EVT_FEAT_ONESHOT; cd->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP |
CLOCK_EVT_FEAT_PERCPU;
clockevent_set_clock(cd, mips_hpt_frequency); clockevent_set_clock(cd, mips_hpt_frequency);
...@@ -210,9 +196,6 @@ int r4k_clockevent_init(void) ...@@ -210,9 +196,6 @@ int r4k_clockevent_init(void)
cd->set_mode = mips_set_clock_mode; cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler; cd->event_handler = mips_event_handler;
#ifdef CONFIG_CEVT_GIC
if (!gic_present)
#endif
clockevents_register_device(cd); clockevents_register_device(cd);
if (cp0_timer_irq_installed) if (cp0_timer_irq_installed)
...@@ -225,4 +208,3 @@ int r4k_clockevent_init(void) ...@@ -225,4 +208,3 @@ int r4k_clockevent_init(void)
return 0; return 0;
} }
#endif /* Not CONFIG_MIPS_MT_SMTC */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 MIPS Technologies, Inc.
* Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
* Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/irq.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h>
#include <asm/cevt-r4k.h>
/*
* Variant clock event timer support for SMTC on MIPS 34K, 1004K
* or other MIPS MT cores.
*
* Notes on SMTC Support:
*
* SMTC has multiple microthread TCs pretending to be Linux CPUs.
* But there's only one Count/Compare pair per VPE, and Compare
* interrupts are taken opportunisitically by available TCs
* bound to the VPE with the Count register. The new timer
* framework provides for global broadcasts, but we really
* want VPE-level multicasts for best behavior. So instead
* of invoking the high-level clock-event broadcast code,
* this version of SMTC support uses the historical SMTC
* multicast mechanisms "under the hood", appearing to the
* generic clock layer as if the interrupts are per-CPU.
*
* The approach taken here is to maintain a set of NR_CPUS
* virtual timers, and track which "CPU" needs to be alerted
* at each event.
*
* It's unlikely that we'll see a MIPS MT core with more than
* 2 VPEs, but we *know* that we won't need to handle more
* VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
* is always going to be overkill, but always going to be enough.
*/
unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
static int smtc_nextinvpe[NR_CPUS];
/*
* Timestamps stored are absolute values to be programmed
* into Count register. Valid timestamps will never be zero.
* If a Zero Count value is actually calculated, it is converted
* to be a 1, which will introduce 1 or two CPU cycles of error
* roughly once every four billion events, which at 1000 HZ means
* about once every 50 days. If that's actually a problem, one
* could alternate squashing 0 to 1 and to -1.
*/
#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
#define ISVALID(x) ((x) != 0L)
/*
* Time comparison is subtle, as it's really truncated
* modular arithmetic.
*/
#define IS_SOONER(a, b, reference) \
(((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
/*
* CATCHUP_INCREMENT, used when the function falls behind the counter.
* Could be an increasing function instead of a constant;
*/
#define CATCHUP_INCREMENT 64
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
{
unsigned long flags;
unsigned int mtflags;
unsigned long timestamp, reference, previous;
unsigned long nextcomp = 0L;
int vpe = current_cpu_data.vpe_id;
int cpu = smp_processor_id();
local_irq_save(flags);
mtflags = dmt();
/*
* Maintain the per-TC virtual timer
* and program the per-VPE shared Count register
* as appropriate here...
*/
reference = (unsigned long)read_c0_count();
timestamp = MAKEVALID(reference + delta);
/*
* To really model the clock, we have to catch the case
* where the current next-in-VPE timestamp is the old
* timestamp for the calling CPE, but the new value is
* in fact later. In that case, we have to do a full
* scan and discover the new next-in-VPE CPU id and
* timestamp.
*/
previous = smtc_nexttime[vpe][cpu];
if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
&& IS_SOONER(previous, timestamp, reference)) {
int i;
int soonest = cpu;
/*
* Update timestamp array here, so that new
* value gets considered along with those of
* other virtual CPUs on the VPE.
*/
smtc_nexttime[vpe][cpu] = timestamp;
for_each_online_cpu(i) {
if (ISVALID(smtc_nexttime[vpe][i])
&& IS_SOONER(smtc_nexttime[vpe][i],
smtc_nexttime[vpe][soonest], reference)) {
soonest = i;
}
}
smtc_nextinvpe[vpe] = soonest;
nextcomp = smtc_nexttime[vpe][soonest];
/*
* Otherwise, we don't have to process the whole array rank,
* we just have to see if the event horizon has gotten closer.
*/
} else {
if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
IS_SOONER(timestamp,
smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
smtc_nextinvpe[vpe] = cpu;
nextcomp = timestamp;
}
/*
* Since next-in-VPE may me the same as the executing
* virtual CPU, we update the array *after* checking
* its value.
*/
smtc_nexttime[vpe][cpu] = timestamp;
}
/*
* It may be that, in fact, we don't need to update Compare,
* but if we do, we want to make sure we didn't fall into
* a crack just behind Count.
*/
if (ISVALID(nextcomp)) {
write_c0_compare(nextcomp);
ehb();
/*
* We never return an error, we just make sure
* that we trigger the handlers as quickly as
* we can if we fell behind.
*/
while ((nextcomp - (unsigned long)read_c0_count())
> (unsigned long)LONG_MAX) {
nextcomp += CATCHUP_INCREMENT;
write_c0_compare(nextcomp);
ehb();
}
}
emt(mtflags);
local_irq_restore(flags);
return 0;
}
void smtc_distribute_timer(int vpe)
{
unsigned long flags;
unsigned int mtflags;
int cpu;
struct clock_event_device *cd;
unsigned long nextstamp;
unsigned long reference;
repeat:
nextstamp = 0L;
for_each_online_cpu(cpu) {
/*
* Find virtual CPUs within the current VPE who have
* unserviced timer requests whose time is now past.
*/
local_irq_save(flags);
mtflags = dmt();
if (cpu_data[cpu].vpe_id == vpe &&
ISVALID(smtc_nexttime[vpe][cpu])) {
reference = (unsigned long)read_c0_count();
if ((smtc_nexttime[vpe][cpu] - reference)
> (unsigned long)LONG_MAX) {
smtc_nexttime[vpe][cpu] = 0L;
emt(mtflags);
local_irq_restore(flags);
/*
* We don't send IPIs to ourself.
*/
if (cpu != smp_processor_id()) {
smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
} else {
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
}
} else {
/* Local to VPE but Valid Time not yet reached. */
if (!ISVALID(nextstamp) ||
IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
reference)) {
smtc_nextinvpe[vpe] = cpu;
nextstamp = smtc_nexttime[vpe][cpu];
}
emt(mtflags);
local_irq_restore(flags);
}
} else {
emt(mtflags);
local_irq_restore(flags);
}
}
/* Reprogram for interrupt at next soonest timestamp for VPE */
if (ISVALID(nextstamp)) {
write_c0_compare(nextstamp);
ehb();
if ((nextstamp - (unsigned long)read_c0_count())
> (unsigned long)LONG_MAX)
goto repeat;
}
}
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
int cpu = smp_processor_id();
/* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
handle_perf_irq(1);
if (read_c0_cause() & (1 << 30)) {
/* Clear Count/Compare Interrupt */
write_c0_compare(read_c0_compare());
smtc_distribute_timer(cpu_data[cpu].vpe_id);
}
return IRQ_HANDLED;
}
int smtc_clockevent_init(void)
{
uint64_t mips_freq = mips_hpt_frequency;
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
unsigned int irq;
int i;
int j;
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
if (cpu == 0) {
for (i = 0; i < num_possible_cpus(); i++) {
smtc_nextinvpe[i] = 0;
for (j = 0; j < num_possible_cpus(); j++)
smtc_nexttime[i][j] = 0L;
}
/*
* SMTC also can't have the usablility test
* run by secondary TCs once Compare is in use.
*/
if (!c0_compare_int_usable())
return -ENXIO;
}
/*
* With vectored interrupts things are getting platform specific.
* get_c0_compare_int is a hook to allow a platform to return the
* interrupt number of it's liking.
*/
irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
if (get_c0_compare_int)
irq = get_c0_compare_int();
cd = &per_cpu(mips_clockevent_device, cpu);
cd->name = "MIPS";
cd->features = CLOCK_EVT_FEAT_ONESHOT;
/* Calculate the min / max delta */
cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
cd->shift = 32;
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->rating = 300;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = mips_next_event;
cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
clockevents_register_device(cd);
/*
* On SMTC we only want to do the data structure
* initialization and IRQ setup once.
*/
if (cpu)
return 0;
/*
* And we need the hwmask associated with the c0_compare
* vector to be initialized.
*/
irq_hwmask[irq] = (0x100 << cp0_compare_irq);
if (cp0_timer_irq_installed)
return 0;
cp0_timer_irq_installed = 1;
setup_irq(irq, &c0_compare_irqaction);
return 0;
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment