Commit 1a5304fe authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'parisc-for-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:
 "Two important fixes in here:

   - The argument pointer register was wrong when calling 64-bit
     firmware functions, which may cause random memory corruption or
     crashes.

   - Ensure page alignment in cache flush functions, otherwise not all
     memory might get flushed.

  The rest are cleanups (mmap implementation, panic path) and usual
  smaller updates.

  Summary:

   - Calculate correct argument pointer in real64_call_asm()

   - Cleanup mmap implementation regarding color alignment (John David
     Anglin)

   - Spinlock fixes in panic path (Guilherme G. Piccoli)

   - build doc update for parisc64 (Randy Dunlap)

   - Ensure page alignment in flush functions"

* tag 'parisc-for-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Fix argument pointer in real64_call_asm()
  parisc: Cleanup mmap implementation regarding color alignment
  parisc: Drop HP-UX constants and structs from grfioctl.h
  parisc: Ensure page alignment in flush functions
  parisc: Replace regular spinlock with spin_trylock on panic path
  parisc: update kbuild doc. aliases for parisc64
  parisc: Limit amount of kgdb breakpoints on parisc
parents b4082428 6e3220ba
...@@ -160,6 +160,7 @@ directory name found in the arch/ directory. ...@@ -160,6 +160,7 @@ directory name found in the arch/ directory.
But some architectures such as x86 and sparc have aliases. But some architectures such as x86 and sparc have aliases.
- x86: i386 for 32 bit, x86_64 for 64 bit - x86: i386 for 32 bit, x86_64 for 64 bit
- parisc: parisc64 for 64 bit
- sparc: sparc32 for 32 bit, sparc64 for 64 bit - sparc: sparc32 for 32 bit, sparc64 for 64 bit
CROSS_COMPILE CROSS_COMPILE
......
...@@ -59,42 +59,4 @@ ...@@ -59,42 +59,4 @@
#define CRT_ID_LEGO 0x35ACDA30 /* Lego FX5, FX10 ... */ #define CRT_ID_LEGO 0x35ACDA30 /* Lego FX5, FX10 ... */
#define CRT_ID_PINNACLE 0x35ACDA16 /* Pinnacle FXe */ #define CRT_ID_PINNACLE 0x35ACDA16 /* Pinnacle FXe */
/* structure for ioctl(GCDESCRIBE) */
#define gaddr_t unsigned long /* FIXME: PA2.0 (64bit) portable ? */
struct grf_fbinfo {
unsigned int id; /* upper 32 bits of graphics id */
unsigned int mapsize; /* mapped size of framebuffer */
unsigned int dwidth, dlength;/* x and y sizes */
unsigned int width, length; /* total x and total y size */
unsigned int xlen; /* x pitch size */
unsigned int bpp, bppu; /* bits per pixel and used bpp */
unsigned int npl, nplbytes; /* # of planes and bytes per plane */
char name[32]; /* name of the device (from ROM) */
unsigned int attr; /* attributes */
gaddr_t fbbase, regbase;/* framebuffer and register base addr */
gaddr_t regions[6]; /* region bases */
};
#define GCID _IOR('G', 0, int)
#define GCON _IO('G', 1)
#define GCOFF _IO('G', 2)
#define GCAON _IO('G', 3)
#define GCAOFF _IO('G', 4)
#define GCMAP _IOWR('G', 5, int)
#define GCUNMAP _IOWR('G', 6, int)
#define GCMAP_HPUX _IO('G', 5)
#define GCUNMAP_HPUX _IO('G', 6)
#define GCLOCK _IO('G', 7)
#define GCUNLOCK _IO('G', 8)
#define GCLOCK_MINIMUM _IO('G', 9)
#define GCUNLOCK_MINIMUM _IO('G', 10)
#define GCSTATIC_CMAP _IO('G', 11)
#define GCVARIABLE_CMAP _IO('G', 12)
#define GCTERM _IOWR('G',20,int) /* multi-headed Tomcat */
#define GCDESCRIBE _IOR('G', 21, struct grf_fbinfo)
#define GCFASTLOCK _IO('G', 26)
#endif /* __ASM_PARISC_GRFIOCTL_H */ #endif /* __ASM_PARISC_GRFIOCTL_H */
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#define NUMREGBYTES sizeof(struct parisc_gdb_regs) #define NUMREGBYTES sizeof(struct parisc_gdb_regs)
#define BUFMAX 4096 #define BUFMAX 4096
#define KGDB_MAX_BREAKPOINTS 40
#define CACHE_FLUSH_IS_SAFE 1 #define CACHE_FLUSH_IS_SAFE 1
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -80,6 +80,7 @@ int pdc_do_firm_test_reset(unsigned long ftc_bitmap); ...@@ -80,6 +80,7 @@ int pdc_do_firm_test_reset(unsigned long ftc_bitmap);
int pdc_do_reset(void); int pdc_do_reset(void);
int pdc_soft_power_info(unsigned long *power_reg); int pdc_soft_power_info(unsigned long *power_reg);
int pdc_soft_power_button(int sw_control); int pdc_soft_power_button(int sw_control);
int pdc_soft_power_button_panic(int sw_control);
void pdc_io_reset(void); void pdc_io_reset(void);
void pdc_io_reset_devices(void); void pdc_io_reset_devices(void);
int pdc_iodc_getc(void); int pdc_iodc_getc(void);
......
...@@ -1232,15 +1232,18 @@ int __init pdc_soft_power_info(unsigned long *power_reg) ...@@ -1232,15 +1232,18 @@ int __init pdc_soft_power_info(unsigned long *power_reg)
} }
/* /*
* pdc_soft_power_button - Control the soft power button behaviour * pdc_soft_power_button{_panic} - Control the soft power button behaviour
* @sw_control: 0 for hardware control, 1 for software control * @sw_control: 0 for hardware control, 1 for software control
* *
* *
* This PDC function places the soft power button under software or * This PDC function places the soft power button under software or
* hardware control. * hardware control.
* Under software control the OS may control to when to allow to shut * Under software control the OS may control to when to allow to shut
* down the system. Under hardware control pressing the power button * down the system. Under hardware control pressing the power button
* powers off the system immediately. * powers off the system immediately.
*
* The _panic version relies on spin_trylock to prevent deadlock
* on panic path.
*/ */
int pdc_soft_power_button(int sw_control) int pdc_soft_power_button(int sw_control)
{ {
...@@ -1254,6 +1257,22 @@ int pdc_soft_power_button(int sw_control) ...@@ -1254,6 +1257,22 @@ int pdc_soft_power_button(int sw_control)
return retval; return retval;
} }
int pdc_soft_power_button_panic(int sw_control)
{
int retval;
unsigned long flags;
if (!spin_trylock_irqsave(&pdc_lock, flags)) {
pr_emerg("Couldn't enable soft power button\n");
return -EBUSY; /* ignored by the panic notifier */
}
retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_ENABLE, __pa(pdc_result), sw_control);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/* /*
* pdc_io_reset - Hack to avoid overlapping range registers of Bridges devices. * pdc_io_reset - Hack to avoid overlapping range registers of Bridges devices.
* Primarily a problem on T600 (which parisc-linux doesn't support) but * Primarily a problem on T600 (which parisc-linux doesn't support) but
......
...@@ -889,6 +889,7 @@ ENDPROC_CFI(flush_icache_page_asm) ...@@ -889,6 +889,7 @@ ENDPROC_CFI(flush_icache_page_asm)
ENTRY_CFI(flush_kernel_dcache_page_asm) ENTRY_CFI(flush_kernel_dcache_page_asm)
88: ldil L%dcache_stride, %r1 88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23 ldw R%dcache_stride(%r1), %r23
depi_safe 0, 31,PAGE_SHIFT, %r26 /* Clear any offset bits */
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25 depdi,z 1, 63-PAGE_SHIFT,1, %r25
...@@ -925,6 +926,7 @@ ENDPROC_CFI(flush_kernel_dcache_page_asm) ...@@ -925,6 +926,7 @@ ENDPROC_CFI(flush_kernel_dcache_page_asm)
ENTRY_CFI(purge_kernel_dcache_page_asm) ENTRY_CFI(purge_kernel_dcache_page_asm)
88: ldil L%dcache_stride, %r1 88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23 ldw R%dcache_stride(%r1), %r23
depi_safe 0, 31,PAGE_SHIFT, %r26 /* Clear any offset bits */
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25 depdi,z 1, 63-PAGE_SHIFT,1, %r25
......
...@@ -235,9 +235,6 @@ ENTRY_CFI(real64_call_asm) ...@@ -235,9 +235,6 @@ ENTRY_CFI(real64_call_asm)
/* save fn */ /* save fn */
copy %arg2, %r31 copy %arg2, %r31
/* set up the new ap */
ldo 64(%arg1), %r29
/* load up the arg registers from the saved arg area */ /* load up the arg registers from the saved arg area */
/* 32-bit calling convention passes first 4 args in registers */ /* 32-bit calling convention passes first 4 args in registers */
ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */ ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */
...@@ -249,7 +246,9 @@ ENTRY_CFI(real64_call_asm) ...@@ -249,7 +246,9 @@ ENTRY_CFI(real64_call_asm)
ldd 7*REG_SZ(%arg1), %r19 ldd 7*REG_SZ(%arg1), %r19
ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */ ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */
/* set up real-mode stack and real-mode ap */
tophys_r1 %sp tophys_r1 %sp
ldo -16(%sp), %r29 /* Reference param save area */
b,l rfi_virt2real,%r2 b,l rfi_virt2real,%r2
nop nop
......
...@@ -25,31 +25,26 @@ ...@@ -25,31 +25,26 @@
#include <linux/random.h> #include <linux/random.h>
#include <linux/compat.h> #include <linux/compat.h>
/* we construct an artificial offset for the mapping based on the physical /*
* address of the kernel mapping variable */ * Construct an artificial page offset for the mapping based on the physical
#define GET_LAST_MMAP(filp) \ * address of the kernel file mapping variable.
(filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL) */
#define SET_LAST_MMAP(filp, val) \ #define GET_FILP_PGOFF(filp) \
{ /* nothing */ } (filp ? (((unsigned long) filp->f_mapping) >> 8) \
& ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL)
static int get_offset(unsigned int last_mmap)
{
return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
}
static unsigned long shared_align_offset(unsigned int last_mmap, static unsigned long shared_align_offset(unsigned long filp_pgoff,
unsigned long pgoff) unsigned long pgoff)
{ {
return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT; return (filp_pgoff + pgoff) << PAGE_SHIFT;
} }
static inline unsigned long COLOR_ALIGN(unsigned long addr, static inline unsigned long COLOR_ALIGN(unsigned long addr,
unsigned int last_mmap, unsigned long pgoff) unsigned long filp_pgoff, unsigned long pgoff)
{ {
unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1); unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
unsigned long off = (SHM_COLOUR-1) & unsigned long off = (SHM_COLOUR-1) &
(shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT); shared_align_offset(filp_pgoff, pgoff);
return base + off; return base + off;
} }
...@@ -98,126 +93,91 @@ static unsigned long mmap_upper_limit(struct rlimit *rlim_stack) ...@@ -98,126 +93,91 @@ static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
return PAGE_ALIGN(STACK_TOP - stack_base); return PAGE_ALIGN(STACK_TOP - stack_base);
} }
enum mmap_allocation_direction {UP, DOWN};
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, static unsigned long arch_get_unmapped_area_common(struct file *filp,
unsigned long len, unsigned long pgoff, unsigned long flags) unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags, enum mmap_allocation_direction dir)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev; struct vm_area_struct *vma, *prev;
unsigned long task_size = TASK_SIZE; unsigned long filp_pgoff;
int do_color_align, last_mmap; int do_color_align;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
if (len > task_size) if (unlikely(len > TASK_SIZE))
return -ENOMEM; return -ENOMEM;
do_color_align = 0; do_color_align = 0;
if (filp || (flags & MAP_SHARED)) if (filp || (flags & MAP_SHARED))
do_color_align = 1; do_color_align = 1;
last_mmap = GET_LAST_MMAP(filp); filp_pgoff = GET_FILP_PGOFF(filp);
if (flags & MAP_FIXED) { if (flags & MAP_FIXED) {
if ((flags & MAP_SHARED) && last_mmap && /* Even MAP_FIXED mappings must reside within TASK_SIZE */
(addr - shared_align_offset(last_mmap, pgoff)) if (TASK_SIZE - len < addr)
return -EINVAL;
if ((flags & MAP_SHARED) && filp &&
(addr - shared_align_offset(filp_pgoff, pgoff))
& (SHM_COLOUR - 1)) & (SHM_COLOUR - 1))
return -EINVAL; return -EINVAL;
goto found_addr; return addr;
} }
if (addr) { if (addr) {
if (do_color_align && last_mmap) if (do_color_align)
addr = COLOR_ALIGN(addr, last_mmap, pgoff); addr = COLOR_ALIGN(addr, filp_pgoff, pgoff);
else else
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma_prev(mm, addr, &prev); vma = find_vma_prev(mm, addr, &prev);
if (task_size - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)) && (!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev))) (!prev || addr >= vm_end_gap(prev)))
goto found_addr; return addr;
} }
info.flags = 0;
info.length = len; info.length = len;
info.align_mask = do_color_align ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
info.align_offset = shared_align_offset(filp_pgoff, pgoff);
if (dir == DOWN) {
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
return addr;
VM_BUG_ON(addr != -ENOMEM);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
}
info.flags = 0;
info.low_limit = mm->mmap_legacy_base; info.low_limit = mm->mmap_legacy_base;
info.high_limit = mmap_upper_limit(NULL); info.high_limit = mmap_upper_limit(NULL);
info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; return vm_unmapped_area(&info);
info.align_offset = shared_align_offset(last_mmap, pgoff);
addr = vm_unmapped_area(&info);
found_addr:
if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
return addr;
} }
unsigned long unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags)
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{ {
struct vm_area_struct *vma, *prev; return arch_get_unmapped_area_common(filp,
struct mm_struct *mm = current->mm; addr, len, pgoff, flags, UP);
unsigned long addr = addr0; }
int do_color_align, last_mmap;
struct vm_unmapped_area_info info;
/* requested length too big for entire address space */
if (len > TASK_SIZE)
return -ENOMEM;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
last_mmap = GET_LAST_MMAP(filp);
if (flags & MAP_FIXED) {
if ((flags & MAP_SHARED) && last_mmap &&
(addr - shared_align_offset(last_mmap, pgoff))
& (SHM_COLOUR - 1))
return -EINVAL;
goto found_addr;
}
/* requesting a specific address */
if (addr) {
if (do_color_align && last_mmap)
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
goto found_addr;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
info.align_offset = shared_align_offset(last_mmap, pgoff);
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
goto found_addr;
VM_BUG_ON(addr != -ENOMEM);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
found_addr:
if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
return addr; unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
return arch_get_unmapped_area_common(filp,
addr, len, pgoff, flags, DOWN);
} }
static int mmap_is_legacy(void) static int mmap_is_legacy(void)
......
...@@ -37,7 +37,6 @@ ...@@ -37,7 +37,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h> #include <linux/panic_notifier.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
...@@ -175,16 +174,21 @@ static void powerfail_interrupt(int code, void *x) ...@@ -175,16 +174,21 @@ static void powerfail_interrupt(int code, void *x)
/* parisc_panic_event() is called by the panic handler. /*
* As soon as a panic occurs, our tasklets above will not be * parisc_panic_event() is called by the panic handler.
* executed any longer. This function then re-enables the *
* soft-power switch and allows the user to switch off the system * As soon as a panic occurs, our tasklets above will not
* be executed any longer. This function then re-enables
* the soft-power switch and allows the user to switch off
* the system. We rely in pdc_soft_power_button_panic()
* since this version spin_trylocks (instead of regular
* spinlock), preventing deadlocks on panic path.
*/ */
static int parisc_panic_event(struct notifier_block *this, static int parisc_panic_event(struct notifier_block *this,
unsigned long event, void *ptr) unsigned long event, void *ptr)
{ {
/* re-enable the soft-power switch */ /* re-enable the soft-power switch */
pdc_soft_power_button(0); pdc_soft_power_button_panic(0);
return NOTIFY_DONE; return NOTIFY_DONE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment