Commit 831e45d8 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS fixes from Ralf Baechle:
 "A round of 4.8 fixes:

  MIPS generic code:
   - Add a missing ".set pop" in an early commit
   - Fix memory regions reaching top of physical
   - MAAR: Fix address alignment
   - vDSO: Fix Malta EVA mapping to vDSO page structs
   - uprobes: fix incorrect uprobe brk handling
   - uprobes: select HAVE_REGS_AND_STACK_ACCESS_API
   - Avoid a BUG warning during PR_SET_FP_MODE prctl
   - SMP: Fix possibility of deadlock when bringing CPUs online
   - R6: Remove compact branch policy Kconfig entries
   - Fix size calc when avoiding IPIs for small icache flushes
   - Fix pre-r6 emulation FPU initialisation
   - Fix delay slot emulation count in debugfs

  ATH79:
   - Fix test for error return of clk_register_fixed_factor.

  Octeon:
   - Fix kernel header to work for VDSO build.
   - Fix initialization of platform device probing.

  paravirt:
   - Fix undefined reference to smp_bootstrap"

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
  MIPS: Fix delay slot emulation count in debugfs
  MIPS: SMP: Fix possibility of deadlock when bringing CPUs online
  MIPS: Fix pre-r6 emulation FPU initialisation
  MIPS: vDSO: Fix Malta EVA mapping to vDSO page structs
  MIPS: Select HAVE_REGS_AND_STACK_ACCESS_API
  MIPS: Octeon: Fix platform bus probing
  MIPS: Octeon: mangle-port: fix build failure with VDSO code
  MIPS: Avoid a BUG warning during prctl(PR_SET_FP_MODE, ...)
  MIPS: c-r4k: Fix size calc when avoiding IPIs for small icache flushes
  MIPS: Add a missing ".set pop" in an early commit
  MIPS: paravirt: Fix undefined reference to smp_bootstrap
  MIPS: Remove compact branch policy Kconfig entries
  MIPS: MAAR: Fix address alignment
  MIPS: Fix memory regions reaching top of physical
  MIPS: uprobes: fix incorrect uprobe brk handling
  MIPS: ath79: Fix test for error return of clk_register_fixed_factor().
parents 751b9a5d 116e7111
...@@ -65,6 +65,7 @@ config MIPS ...@@ -65,6 +65,7 @@ config MIPS
select ARCH_CLOCKSOURCE_DATA select ARCH_CLOCKSOURCE_DATA
select HANDLE_DOMAIN_IRQ select HANDLE_DOMAIN_IRQ
select HAVE_EXIT_THREAD select HAVE_EXIT_THREAD
select HAVE_REGS_AND_STACK_ACCESS_API
menu "Machine selection" menu "Machine selection"
......
...@@ -113,42 +113,6 @@ config SPINLOCK_TEST ...@@ -113,42 +113,6 @@ config SPINLOCK_TEST
help help
Add several files to the debugfs to test spinlock speed. Add several files to the debugfs to test spinlock speed.
if CPU_MIPSR6
choice
prompt "Compact branch policy"
default MIPS_COMPACT_BRANCHES_OPTIMAL
config MIPS_COMPACT_BRANCHES_NEVER
bool "Never (force delay slot branches)"
help
Pass the -mcompact-branches=never flag to the compiler in order to
force it to always emit branches with delay slots, and make no use
of the compact branch instructions introduced by MIPSr6. This is
useful if you suspect there may be an issue with compact branches in
either the compiler or the CPU.
config MIPS_COMPACT_BRANCHES_OPTIMAL
bool "Optimal (use where beneficial)"
help
Pass the -mcompact-branches=optimal flag to the compiler in order for
it to make use of compact branch instructions where it deems them
beneficial, and use branches with delay slots elsewhere. This is the
default compiler behaviour, and should be used unless you have a
reason to choose otherwise.
config MIPS_COMPACT_BRANCHES_ALWAYS
bool "Always (force compact branches)"
help
Pass the -mcompact-branches=always flag to the compiler in order to
force it to always emit compact branches, making no use of branch
instructions with delay slots. This can result in more compact code
which may be beneficial in some scenarios.
endchoice
endif # CPU_MIPSR6
config SCACHE_DEBUGFS config SCACHE_DEBUGFS
bool "L2 cache debugfs entries" bool "L2 cache debugfs entries"
depends on DEBUG_FS depends on DEBUG_FS
......
...@@ -203,10 +203,6 @@ endif ...@@ -203,10 +203,6 @@ endif
toolchain-virt := $(call cc-option-yn,$(mips-cflags) -mvirt) toolchain-virt := $(call cc-option-yn,$(mips-cflags) -mvirt)
cflags-$(toolchain-virt) += -DTOOLCHAIN_SUPPORTS_VIRT cflags-$(toolchain-virt) += -DTOOLCHAIN_SUPPORTS_VIRT
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER) += -mcompact-branches=never
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS) += -mcompact-branches=always
# #
# Firmware support # Firmware support
# #
......
...@@ -96,7 +96,7 @@ static struct clk * __init ath79_reg_ffclk(const char *name, ...@@ -96,7 +96,7 @@ static struct clk * __init ath79_reg_ffclk(const char *name,
struct clk *clk; struct clk *clk;
clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div); clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div);
if (!clk) if (IS_ERR(clk))
panic("failed to allocate %s clock structure", name); panic("failed to allocate %s clock structure", name);
return clk; return clk;
......
...@@ -1059,7 +1059,7 @@ static int __init octeon_publish_devices(void) ...@@ -1059,7 +1059,7 @@ static int __init octeon_publish_devices(void)
{ {
return of_platform_bus_probe(NULL, octeon_ids, NULL); return of_platform_bus_probe(NULL, octeon_ids, NULL);
} }
device_initcall(octeon_publish_devices); arch_initcall(octeon_publish_devices);
MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>"); MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
......
...@@ -157,6 +157,7 @@ ...@@ -157,6 +157,7 @@
ldc1 $f28, THREAD_FPR28(\thread) ldc1 $f28, THREAD_FPR28(\thread)
ldc1 $f30, THREAD_FPR30(\thread) ldc1 $f30, THREAD_FPR30(\thread)
ctc1 \tmp, fcr31 ctc1 \tmp, fcr31
.set pop
.endm .endm
.macro fpu_restore_16odd thread .macro fpu_restore_16odd thread
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
static inline bool __should_swizzle_bits(volatile void *a) static inline bool __should_swizzle_bits(volatile void *a)
{ {
extern const bool octeon_should_swizzle_table[]; extern const bool octeon_should_swizzle_table[];
u64 did = ((u64)(uintptr_t)a >> 40) & 0xff;
unsigned long did = ((unsigned long)a >> 40) & 0xff;
return octeon_should_swizzle_table[did]; return octeon_should_swizzle_table[did];
} }
...@@ -29,7 +29,7 @@ static inline bool __should_swizzle_bits(volatile void *a) ...@@ -29,7 +29,7 @@ static inline bool __should_swizzle_bits(volatile void *a)
#define __should_swizzle_bits(a) false #define __should_swizzle_bits(a) false
static inline bool __should_swizzle_addr(unsigned long p) static inline bool __should_swizzle_addr(u64 p)
{ {
/* boot bus? */ /* boot bus? */
return ((p >> 40) & 0xff) == 0; return ((p >> 40) & 0xff) == 0;
......
...@@ -11,11 +11,13 @@ ...@@ -11,11 +11,13 @@
#define CP0_EBASE $15, 1 #define CP0_EBASE $15, 1
.macro kernel_entry_setup .macro kernel_entry_setup
#ifdef CONFIG_SMP
mfc0 t0, CP0_EBASE mfc0 t0, CP0_EBASE
andi t0, t0, 0x3ff # CPUNum andi t0, t0, 0x3ff # CPUNum
beqz t0, 1f beqz t0, 1f
# CPUs other than zero goto smp_bootstrap # CPUs other than zero goto smp_bootstrap
j smp_bootstrap j smp_bootstrap
#endif /* CONFIG_SMP */
1: 1:
.endm .endm
......
...@@ -1164,7 +1164,9 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) ...@@ -1164,7 +1164,9 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
regs->regs[31] = r31; regs->regs[31] = r31;
regs->cp0_epc = epc; regs->cp0_epc = epc;
if (!used_math()) { /* First time FPU user. */ if (!used_math()) { /* First time FPU user. */
preempt_disable();
err = init_fpu(); err = init_fpu();
preempt_enable();
set_used_math(); set_used_math();
} }
lose_fpu(1); /* Save FPU state for the emulator. */ lose_fpu(1); /* Save FPU state for the emulator. */
......
...@@ -605,14 +605,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) ...@@ -605,14 +605,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Avoid inadvertently triggering emulation */ /* Avoid inadvertently triggering emulation */
if ((value & PR_FP_MODE_FR) && cpu_has_fpu && if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* FR = 0 not supported in MIPS R6 */ /* FR = 0 not supported in MIPS R6 */
if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Proceed with the mode switch */ /* Proceed with the mode switch */
......
...@@ -87,6 +87,13 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type) ...@@ -87,6 +87,13 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
int x = boot_mem_map.nr_map; int x = boot_mem_map.nr_map;
int i; int i;
/*
* If the region reaches the top of the physical address space, adjust
* the size slightly so that (start + size) doesn't overflow
*/
if (start + size - 1 == (phys_addr_t)ULLONG_MAX)
--size;
/* Sanity check */ /* Sanity check */
if (start + size < start) { if (start + size < start) {
pr_warn("Trying to add an invalid memory region, skipped\n"); pr_warn("Trying to add an invalid memory region, skipped\n");
......
...@@ -322,6 +322,9 @@ asmlinkage void start_secondary(void) ...@@ -322,6 +322,9 @@ asmlinkage void start_secondary(void)
cpumask_set_cpu(cpu, &cpu_coherent_mask); cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu); notify_cpu_starting(cpu);
cpumask_set_cpu(cpu, &cpu_callin_map);
synchronise_count_slave(cpu);
set_cpu_online(cpu, true); set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu); set_cpu_sibling_map(cpu);
...@@ -329,10 +332,6 @@ asmlinkage void start_secondary(void) ...@@ -329,10 +332,6 @@ asmlinkage void start_secondary(void)
calculate_cpu_foreign_map(); calculate_cpu_foreign_map();
cpumask_set_cpu(cpu, &cpu_callin_map);
synchronise_count_slave(cpu);
/* /*
* irq will be enabled in ->smp_finish(), enabling it too early * irq will be enabled in ->smp_finish(), enabling it too early
* is dangerous. * is dangerous.
......
...@@ -222,7 +222,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, ...@@ -222,7 +222,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
return NOTIFY_DONE; return NOTIFY_DONE;
switch (val) { switch (val) {
case DIE_BREAK: case DIE_UPROBE:
if (uprobe_pre_sstep_notifier(regs)) if (uprobe_pre_sstep_notifier(regs))
return NOTIFY_STOP; return NOTIFY_STOP;
break; break;
......
...@@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = { ...@@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = {
static void __init init_vdso_image(struct mips_vdso_image *image) static void __init init_vdso_image(struct mips_vdso_image *image)
{ {
unsigned long num_pages, i; unsigned long num_pages, i;
unsigned long data_pfn;
BUG_ON(!PAGE_ALIGNED(image->data)); BUG_ON(!PAGE_ALIGNED(image->data));
BUG_ON(!PAGE_ALIGNED(image->size)); BUG_ON(!PAGE_ALIGNED(image->size));
num_pages = image->size / PAGE_SIZE; num_pages = image->size / PAGE_SIZE;
for (i = 0; i < num_pages; i++) { data_pfn = __phys_to_pfn(__pa_symbol(image->data));
image->mapping.pages[i] = for (i = 0; i < num_pages; i++)
virt_to_page(image->data + (i * PAGE_SIZE)); image->mapping.pages[i] = pfn_to_page(data_pfn + i);
}
} }
static int __init init_vdso(void) static int __init init_vdso(void)
......
...@@ -298,5 +298,6 @@ bool do_dsemulret(struct pt_regs *xcp) ...@@ -298,5 +298,6 @@ bool do_dsemulret(struct pt_regs *xcp)
/* Set EPC to return to post-branch instruction */ /* Set EPC to return to post-branch instruction */
xcp->cp0_epc = current->thread.bd_emu_cont_pc; xcp->cp0_epc = current->thread.bd_emu_cont_pc;
pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc); pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
MIPS_FPU_EMU_INC_STATS(ds_emul);
return true; return true;
} }
...@@ -800,7 +800,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end) ...@@ -800,7 +800,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
* If address-based cache ops don't require an SMP call, then * If address-based cache ops don't require an SMP call, then
* use them exclusively for small flushes. * use them exclusively for small flushes.
*/ */
size = start - end; size = end - start;
cache_size = icache_size; cache_size = icache_size;
if (!cpu_has_ic_fills_f_dc) { if (!cpu_has_ic_fills_f_dc) {
size *= 2; size *= 2;
......
...@@ -261,7 +261,6 @@ unsigned __weak platform_maar_init(unsigned num_pairs) ...@@ -261,7 +261,6 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
{ {
struct maar_config cfg[BOOT_MEM_MAP_MAX]; struct maar_config cfg[BOOT_MEM_MAP_MAX];
unsigned i, num_configured, num_cfg = 0; unsigned i, num_configured, num_cfg = 0;
phys_addr_t skip;
for (i = 0; i < boot_mem_map.nr_map; i++) { for (i = 0; i < boot_mem_map.nr_map; i++) {
switch (boot_mem_map.map[i].type) { switch (boot_mem_map.map[i].type) {
...@@ -272,14 +271,14 @@ unsigned __weak platform_maar_init(unsigned num_pairs) ...@@ -272,14 +271,14 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
continue; continue;
} }
skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); /* Round lower up */
cfg[num_cfg].lower = boot_mem_map.map[i].addr; cfg[num_cfg].lower = boot_mem_map.map[i].addr;
cfg[num_cfg].lower += skip; cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
cfg[num_cfg].upper = cfg[num_cfg].lower; /* Round upper down */
cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; cfg[num_cfg].upper = boot_mem_map.map[i].addr +
cfg[num_cfg].upper -= skip; boot_mem_map.map[i].size;
cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
cfg[num_cfg].attrs = MIPS_MAAR_S; cfg[num_cfg].attrs = MIPS_MAAR_S;
num_cfg++; num_cfg++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment