Commit 66188fb1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS fixes from Ralf Baechle:
 "Another round of fixes:

   - CM: Fix mips_cm_max_vp_width for non-MT kernels on MT systems
   - CPS: Avoid BUG() when offlining pre-r6 CPUs
   - DEC: Avoid gas warnings due to suspicious instruction scheduling by
     manually expanding assembler macros.
   - FTLB: Fix configuration by moving confiuguratoin after probing
   - FTLB: clear execution hazard after changing FTLB enable
   - Highmem: Fix detection of unsupported highmem with cache aliases
   - I6400: Don't touch FTLBP chicken bits
   - microMIPS: Fix BUILD_ROLLBACK_PROLOGUE
   - Malta: Fix IOCU disable switch read for MIPS64
   - Octeon: Fix probing of devices attached to GPIO lines
   - uprobes: Misc small fixes"

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
  MIPS: CM: Fix mips_cm_max_vp_width for non-MT kernels on MT systems
  MIPS: Fix detection of unsupported highmem with cache aliases
  MIPS: Malta: Fix IOCU disable switch read for MIPS64
  MIPS: Fix BUILD_ROLLBACK_PROLOGUE for microMIPS
  MIPS: clear execution hazard after changing FTLB enable
  MIPS: Configure FTLB after probing TLB sizes from config4
  MIPS: Stop setting I6400 FTLBP
  MIPS: DEC: Avoid la pseudo-instruction in delay slots
  MIPS: Octeon: mark GPIO controller node not populated after IRQ init.
  MIPS: uprobes: fix use of uninitialised variable
  MIPS: uprobes: remove incorrect set_orig_insn
  MIPS: fix uretprobe implementation
  MIPS: smp-cps: Avoid BUG() when offlining pre-r6 CPUs
parents 0c7fc30f 6605d156
...@@ -1619,6 +1619,12 @@ static int __init octeon_irq_init_gpio( ...@@ -1619,6 +1619,12 @@ static int __init octeon_irq_init_gpio(
return -ENOMEM; return -ENOMEM;
} }
/*
* Clear the OF_POPULATED flag that was set by of_irq_init()
* so that all GPIO devices will be probed.
*/
of_node_clear_flag(gpio_node, OF_POPULATED);
return 0; return 0;
} }
/* /*
......
...@@ -146,7 +146,25 @@ ...@@ -146,7 +146,25 @@
/* /*
* Find irq with highest priority * Find irq with highest priority
*/ */
PTR_LA t1,cpu_mask_nr_tbl # open coded PTR_LA t1, cpu_mask_nr_tbl
#if (_MIPS_SZPTR == 32)
# open coded la t1, cpu_mask_nr_tbl
lui t1, %hi(cpu_mask_nr_tbl)
addiu t1, %lo(cpu_mask_nr_tbl)
#endif
#if (_MIPS_SZPTR == 64)
# open coded dla t1, cpu_mask_nr_tbl
.set push
.set noat
lui t1, %highest(cpu_mask_nr_tbl)
lui AT, %hi(cpu_mask_nr_tbl)
daddiu t1, t1, %higher(cpu_mask_nr_tbl)
daddiu AT, AT, %lo(cpu_mask_nr_tbl)
dsll t1, 32
daddu t1, t1, AT
.set pop
#endif
1: lw t2,(t1) 1: lw t2,(t1)
nop nop
and t2,t0 and t2,t0
...@@ -195,7 +213,25 @@ ...@@ -195,7 +213,25 @@
/* /*
* Find irq with highest priority * Find irq with highest priority
*/ */
PTR_LA t1,asic_mask_nr_tbl # open coded PTR_LA t1,asic_mask_nr_tbl
#if (_MIPS_SZPTR == 32)
# open coded la t1, asic_mask_nr_tbl
lui t1, %hi(asic_mask_nr_tbl)
addiu t1, %lo(asic_mask_nr_tbl)
#endif
#if (_MIPS_SZPTR == 64)
# open coded dla t1, asic_mask_nr_tbl
.set push
.set noat
lui t1, %highest(asic_mask_nr_tbl)
lui AT, %hi(asic_mask_nr_tbl)
daddiu t1, t1, %higher(asic_mask_nr_tbl)
daddiu AT, AT, %lo(asic_mask_nr_tbl)
dsll t1, 32
daddu t1, t1, AT
.set pop
#endif
2: lw t2,(t1) 2: lw t2,(t1)
nop nop
and t2,t0 and t2,t0
......
...@@ -458,10 +458,21 @@ static inline int mips_cm_revision(void) ...@@ -458,10 +458,21 @@ static inline int mips_cm_revision(void)
static inline unsigned int mips_cm_max_vp_width(void) static inline unsigned int mips_cm_max_vp_width(void)
{ {
extern int smp_num_siblings; extern int smp_num_siblings;
uint32_t cfg;
if (mips_cm_revision() >= CM_REV_CM3) if (mips_cm_revision() >= CM_REV_CM3)
return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
if (mips_cm_present()) {
/*
* We presume that all cores in the system will have the same
* number of VP(E)s, and if that ever changes then this will
* need revisiting.
*/
cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
}
if (IS_ENABLED(CONFIG_SMP)) if (IS_ENABLED(CONFIG_SMP))
return smp_num_siblings; return smp_num_siblings;
......
...@@ -660,8 +660,6 @@ ...@@ -660,8 +660,6 @@
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10) #define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16) #define MIPS_CONF7_AR (_ULCAST_(1) << 16)
/* FTLB probability bits for R6 */
#define MIPS_CONF7_FTLBP_SHIFT (18)
/* WatchLo* register definitions */ /* WatchLo* register definitions */
#define MIPS_WATCHLO_IRW (_ULCAST_(0x7) << 0) #define MIPS_WATCHLO_IRW (_ULCAST_(0x7) << 0)
......
...@@ -36,7 +36,6 @@ struct arch_uprobe { ...@@ -36,7 +36,6 @@ struct arch_uprobe {
unsigned long resume_epc; unsigned long resume_epc;
u32 insn[2]; u32 insn[2];
u32 ixol[2]; u32 ixol[2];
union mips_instruction orig_inst[MAX_UINSN_BYTES / 4];
}; };
struct arch_uprobe_task { struct arch_uprobe_task {
......
...@@ -352,7 +352,12 @@ __setup("nohtw", htw_disable); ...@@ -352,7 +352,12 @@ __setup("nohtw", htw_disable);
static int mips_ftlb_disabled; static int mips_ftlb_disabled;
static int mips_has_ftlb_configured; static int mips_has_ftlb_configured;
static int set_ftlb_enable(struct cpuinfo_mips *c, int enable); enum ftlb_flags {
FTLB_EN = 1 << 0,
FTLB_SET_PROB = 1 << 1,
};
static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags);
static int __init ftlb_disable(char *s) static int __init ftlb_disable(char *s)
{ {
...@@ -371,8 +376,6 @@ static int __init ftlb_disable(char *s) ...@@ -371,8 +376,6 @@ static int __init ftlb_disable(char *s)
return 1; return 1;
} }
back_to_back_c0_hazard();
config4 = read_c0_config4(); config4 = read_c0_config4();
/* Check that FTLB has been disabled */ /* Check that FTLB has been disabled */
...@@ -531,7 +534,7 @@ static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c) ...@@ -531,7 +534,7 @@ static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
return 3; return 3;
} }
static int set_ftlb_enable(struct cpuinfo_mips *c, int enable) static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags)
{ {
unsigned int config; unsigned int config;
...@@ -542,33 +545,33 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable) ...@@ -542,33 +545,33 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
case CPU_P6600: case CPU_P6600:
/* proAptiv & related cores use Config6 to enable the FTLB */ /* proAptiv & related cores use Config6 to enable the FTLB */
config = read_c0_config6(); config = read_c0_config6();
/* Clear the old probability value */
config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT); if (flags & FTLB_EN)
if (enable) config |= MIPS_CONF6_FTLBEN;
/* Enable FTLB */
write_c0_config6(config |
(calculate_ftlb_probability(c)
<< MIPS_CONF6_FTLBP_SHIFT)
| MIPS_CONF6_FTLBEN);
else else
/* Disable FTLB */ config &= ~MIPS_CONF6_FTLBEN;
write_c0_config6(config & ~MIPS_CONF6_FTLBEN);
if (flags & FTLB_SET_PROB) {
config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
config |= calculate_ftlb_probability(c)
<< MIPS_CONF6_FTLBP_SHIFT;
}
write_c0_config6(config);
back_to_back_c0_hazard();
break; break;
case CPU_I6400: case CPU_I6400:
/* I6400 & related cores use Config7 to configure FTLB */ /* There's no way to disable the FTLB */
config = read_c0_config7(); if (!(flags & FTLB_EN))
/* Clear the old probability value */ return 1;
config &= ~(3 << MIPS_CONF7_FTLBP_SHIFT); return 0;
write_c0_config7(config | (calculate_ftlb_probability(c)
<< MIPS_CONF7_FTLBP_SHIFT));
break;
case CPU_LOONGSON3: case CPU_LOONGSON3:
/* Flush ITLB, DTLB, VTLB and FTLB */ /* Flush ITLB, DTLB, VTLB and FTLB */
write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB | write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB |
LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB); LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB);
/* Loongson-3 cores use Config6 to enable the FTLB */ /* Loongson-3 cores use Config6 to enable the FTLB */
config = read_c0_config6(); config = read_c0_config6();
if (enable) if (flags & FTLB_EN)
/* Enable FTLB */ /* Enable FTLB */
write_c0_config6(config & ~MIPS_CONF6_FTLBDIS); write_c0_config6(config & ~MIPS_CONF6_FTLBDIS);
else else
...@@ -788,6 +791,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) ...@@ -788,6 +791,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
PAGE_SIZE, config4); PAGE_SIZE, config4);
/* Switch FTLB off */ /* Switch FTLB off */
set_ftlb_enable(c, 0); set_ftlb_enable(c, 0);
mips_ftlb_disabled = 1;
break; break;
} }
c->tlbsizeftlbsets = 1 << c->tlbsizeftlbsets = 1 <<
...@@ -852,7 +856,7 @@ static void decode_configs(struct cpuinfo_mips *c) ...@@ -852,7 +856,7 @@ static void decode_configs(struct cpuinfo_mips *c)
c->scache.flags = MIPS_CACHE_NOT_PRESENT; c->scache.flags = MIPS_CACHE_NOT_PRESENT;
/* Enable FTLB if present and not disabled */ /* Enable FTLB if present and not disabled */
set_ftlb_enable(c, !mips_ftlb_disabled); set_ftlb_enable(c, mips_ftlb_disabled ? 0 : FTLB_EN);
ok = decode_config0(c); /* Read Config registers. */ ok = decode_config0(c); /* Read Config registers. */
BUG_ON(!ok); /* Arch spec violation! */ BUG_ON(!ok); /* Arch spec violation! */
...@@ -902,6 +906,9 @@ static void decode_configs(struct cpuinfo_mips *c) ...@@ -902,6 +906,9 @@ static void decode_configs(struct cpuinfo_mips *c)
} }
} }
/* configure the FTLB write probability */
set_ftlb_enable(c, (mips_ftlb_disabled ? 0 : FTLB_EN) | FTLB_SET_PROB);
mips_probe_watch_registers(c); mips_probe_watch_registers(c);
#ifndef CONFIG_MIPS_CPS #ifndef CONFIG_MIPS_CPS
......
...@@ -142,9 +142,8 @@ LEAF(__r4k_wait) ...@@ -142,9 +142,8 @@ LEAF(__r4k_wait)
PTR_LA k1, __r4k_wait PTR_LA k1, __r4k_wait
ori k0, 0x1f /* 32 byte rollback region */ ori k0, 0x1f /* 32 byte rollback region */
xori k0, 0x1f xori k0, 0x1f
bne k0, k1, 9f bne k0, k1, \handler
MTC0 k0, CP0_EPC MTC0 k0, CP0_EPC
9:
.set pop .set pop
.endm .endm
......
...@@ -764,7 +764,6 @@ static void __init arch_mem_init(char **cmdline_p) ...@@ -764,7 +764,6 @@ static void __init arch_mem_init(char **cmdline_p)
device_tree_init(); device_tree_init();
sparse_init(); sparse_init();
plat_swiotlb_setup(); plat_swiotlb_setup();
paging_init();
dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
/* Tell bootmem about cma reserved memblock section */ /* Tell bootmem about cma reserved memblock section */
...@@ -877,6 +876,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -877,6 +876,7 @@ void __init setup_arch(char **cmdline_p)
prefill_possible_map(); prefill_possible_map();
cpu_cache_init(); cpu_cache_init();
paging_init();
} }
unsigned long kernelsp[NR_CPUS]; unsigned long kernelsp[NR_CPUS];
......
...@@ -513,7 +513,7 @@ static void cps_cpu_die(unsigned int cpu) ...@@ -513,7 +513,7 @@ static void cps_cpu_die(unsigned int cpu)
* in which case the CPC will refuse to power down the core. * in which case the CPC will refuse to power down the core.
*/ */
do { do {
mips_cm_lock_other(core, vpe_id); mips_cm_lock_other(core, 0);
mips_cpc_lock_other(core); mips_cpc_lock_other(core);
stat = read_cpc_co_stat_conf(); stat = read_cpc_co_stat_conf();
stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
......
...@@ -157,7 +157,6 @@ bool is_trap_insn(uprobe_opcode_t *insn) ...@@ -157,7 +157,6 @@ bool is_trap_insn(uprobe_opcode_t *insn)
int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
{ {
struct uprobe_task *utask = current->utask; struct uprobe_task *utask = current->utask;
union mips_instruction insn;
/* /*
* Now find the EPC where to resume after the breakpoint has been * Now find the EPC where to resume after the breakpoint has been
...@@ -168,10 +167,10 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) ...@@ -168,10 +167,10 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
unsigned long epc; unsigned long epc;
epc = regs->cp0_epc; epc = regs->cp0_epc;
__compute_return_epc_for_insn(regs, insn); __compute_return_epc_for_insn(regs,
(union mips_instruction) aup->insn[0]);
aup->resume_epc = regs->cp0_epc; aup->resume_epc = regs->cp0_epc;
} }
utask->autask.saved_trap_nr = current->thread.trap_nr; utask->autask.saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR; current->thread.trap_nr = UPROBE_TRAP_NR;
regs->cp0_epc = current->utask->xol_vaddr; regs->cp0_epc = current->utask->xol_vaddr;
...@@ -257,7 +256,7 @@ unsigned long arch_uretprobe_hijack_return_addr( ...@@ -257,7 +256,7 @@ unsigned long arch_uretprobe_hijack_return_addr(
ra = regs->regs[31]; ra = regs->regs[31];
/* Replace the return address with the trampoline address */ /* Replace the return address with the trampoline address */
regs->regs[31] = ra; regs->regs[31] = trampoline_vaddr;
return ra; return ra;
} }
...@@ -280,24 +279,6 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, ...@@ -280,24 +279,6 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
} }
/**
* set_orig_insn - Restore the original instruction.
* @mm: the probed process address space.
* @auprobe: arch specific probepoint information.
* @vaddr: the virtual address to insert the opcode.
*
* For mm @mm, restore the original opcode (opcode) at @vaddr.
* Return 0 (success) or a negative errno.
*
* This overrides the weak version in kernel/events/uprobes.c.
*/
int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
unsigned long vaddr)
{
return uprobe_write_opcode(mm, vaddr,
*(uprobe_opcode_t *)&auprobe->orig_inst[0].word);
}
void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len) void *src, unsigned long len)
{ {
......
...@@ -440,6 +440,9 @@ static inline void mem_init_free_highmem(void) ...@@ -440,6 +440,9 @@ static inline void mem_init_free_highmem(void)
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
unsigned long tmp; unsigned long tmp;
if (cpu_has_dc_aliases)
return;
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
struct page *page = pfn_to_page(tmp); struct page *page = pfn_to_page(tmp);
......
...@@ -39,6 +39,9 @@ ...@@ -39,6 +39,9 @@
#include <linux/console.h> #include <linux/console.h>
#endif #endif
#define ROCIT_CONFIG_GEN0 0x1f403000
#define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7)
extern void malta_be_init(void); extern void malta_be_init(void);
extern int malta_be_handler(struct pt_regs *regs, int is_fixup); extern int malta_be_handler(struct pt_regs *regs, int is_fixup);
...@@ -107,6 +110,8 @@ static void __init fd_activate(void) ...@@ -107,6 +110,8 @@ static void __init fd_activate(void)
static int __init plat_enable_iocoherency(void) static int __init plat_enable_iocoherency(void)
{ {
int supported = 0; int supported = 0;
u32 cfg;
if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) { if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
...@@ -129,7 +134,8 @@ static int __init plat_enable_iocoherency(void) ...@@ -129,7 +134,8 @@ static int __init plat_enable_iocoherency(void)
} else if (mips_cm_numiocu() != 0) { } else if (mips_cm_numiocu() != 0) {
/* Nothing special needs to be done to enable coherency */ /* Nothing special needs to be done to enable coherency */
pr_info("CMP IOCU detected\n"); pr_info("CMP IOCU detected\n");
if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) { cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0));
if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) {
pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n"); pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment