Commit cf681c2e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fixes from Martin Schwidefsky:
 "Three more bug fixes for 4.6

   - Due to a race in the dynamic page table code a multi-threaded
     program can cause a translation specification exception.  With
     panic_on_oops a user space program can crash the system.

   - An information leak with the /dev/sclp device.

   - A use after free in the s390 PCI code"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/sclp_ctl: fix potential information leak with /dev/sclp
  s390/mm: fix asce_bits handling with dynamic pagetable levels
  s390/pci: fix use after free in dma_init
parents b75a2bf8 532c34b5
...@@ -11,7 +11,7 @@ typedef struct { ...@@ -11,7 +11,7 @@ typedef struct {
spinlock_t list_lock; spinlock_t list_lock;
struct list_head pgtable_list; struct list_head pgtable_list;
struct list_head gmap_list; struct list_head gmap_list;
unsigned long asce_bits; unsigned long asce;
unsigned long asce_limit; unsigned long asce_limit;
unsigned long vdso_base; unsigned long vdso_base;
/* The mmu context allocates 4K page tables. */ /* The mmu context allocates 4K page tables. */
......
...@@ -26,12 +26,28 @@ static inline int init_new_context(struct task_struct *tsk, ...@@ -26,12 +26,28 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.has_pgste = 0; mm->context.has_pgste = 0;
mm->context.use_skey = 0; mm->context.use_skey = 0;
#endif #endif
if (mm->context.asce_limit == 0) { switch (mm->context.asce_limit) {
case 1UL << 42:
/*
* forked 3-level task, fall through to set new asce with new
* mm->pgd
*/
case 0:
/* context created by exec, set asce limit to 4TB */ /* context created by exec, set asce limit to 4TB */
mm->context.asce_bits = _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
mm->context.asce_limit = STACK_TOP_MAX; mm->context.asce_limit = STACK_TOP_MAX;
} else if (mm->context.asce_limit == (1UL << 31)) { mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
break;
case 1UL << 53:
/* forked 4-level task, set new asce with new mm->pgd */
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
break;
case 1UL << 31:
/* forked 2-level compat task, set new asce with new mm->pgd */
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
/* pgd_alloc() did not increase mm->nr_pmds */
mm_inc_nr_pmds(mm); mm_inc_nr_pmds(mm);
} }
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
...@@ -42,7 +58,7 @@ static inline int init_new_context(struct task_struct *tsk, ...@@ -42,7 +58,7 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void set_user_asce(struct mm_struct *mm) static inline void set_user_asce(struct mm_struct *mm)
{ {
S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd); S390_lowcore.user_asce = mm->context.asce;
if (current->thread.mm_segment.ar4) if (current->thread.mm_segment.ar4)
__ctl_load(S390_lowcore.user_asce, 7, 7); __ctl_load(S390_lowcore.user_asce, 7, 7);
set_cpu_flag(CIF_ASCE); set_cpu_flag(CIF_ASCE);
...@@ -71,7 +87,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -71,7 +87,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); S390_lowcore.user_asce = next->context.asce;
if (prev == next) if (prev == next)
return; return;
if (MACHINE_HAS_TLB_LC) if (MACHINE_HAS_TLB_LC)
......
...@@ -52,8 +52,8 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm) ...@@ -52,8 +52,8 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
return _REGION2_ENTRY_EMPTY; return _REGION2_ENTRY_EMPTY;
} }
int crst_table_upgrade(struct mm_struct *, unsigned long limit); int crst_table_upgrade(struct mm_struct *);
void crst_table_downgrade(struct mm_struct *, unsigned long limit); void crst_table_downgrade(struct mm_struct *);
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
......
...@@ -175,7 +175,7 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS]; ...@@ -175,7 +175,7 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS];
regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
regs->psw.addr = new_psw; \ regs->psw.addr = new_psw; \
regs->gprs[15] = new_stackp; \ regs->gprs[15] = new_stackp; \
crst_table_downgrade(current->mm, 1UL << 31); \ crst_table_downgrade(current->mm); \
execve_tail(); \ execve_tail(); \
} while (0) } while (0)
......
...@@ -110,8 +110,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) ...@@ -110,8 +110,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
static inline void __tlb_flush_kernel(void) static inline void __tlb_flush_kernel(void)
{ {
if (MACHINE_HAS_IDTE) if (MACHINE_HAS_IDTE)
__tlb_flush_idte((unsigned long) init_mm.pgd | __tlb_flush_idte(init_mm.context.asce);
init_mm.context.asce_bits);
else else
__tlb_flush_global(); __tlb_flush_global();
} }
...@@ -133,8 +132,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) ...@@ -133,8 +132,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
static inline void __tlb_flush_kernel(void) static inline void __tlb_flush_kernel(void)
{ {
if (MACHINE_HAS_TLB_LC) if (MACHINE_HAS_TLB_LC)
__tlb_flush_idte_local((unsigned long) init_mm.pgd | __tlb_flush_idte_local(init_mm.context.asce);
init_mm.context.asce_bits);
else else
__tlb_flush_local(); __tlb_flush_local();
} }
...@@ -148,8 +146,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) ...@@ -148,8 +146,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* only ran on the local cpu. * only ran on the local cpu.
*/ */
if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
__tlb_flush_asce(mm, (unsigned long) mm->pgd | __tlb_flush_asce(mm, mm->context.asce);
mm->context.asce_bits);
else else
__tlb_flush_full(mm); __tlb_flush_full(mm);
} }
......
...@@ -89,7 +89,8 @@ void __init paging_init(void) ...@@ -89,7 +89,8 @@ void __init paging_init(void)
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY; pgd_type = _REGION3_ENTRY_EMPTY;
} }
S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
S390_lowcore.kernel_asce = init_mm.context.asce;
clear_table((unsigned long *) init_mm.pgd, pgd_type, clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048); sizeof(unsigned long)*2048);
vmem_map_init(); vmem_map_init();
......
...@@ -174,7 +174,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) ...@@ -174,7 +174,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
if (!(flags & MAP_FIXED)) if (!(flags & MAP_FIXED))
addr = 0; addr = 0;
if ((addr + len) >= TASK_SIZE) if ((addr + len) >= TASK_SIZE)
return crst_table_upgrade(current->mm, TASK_MAX_SIZE); return crst_table_upgrade(current->mm);
return 0; return 0;
} }
...@@ -191,7 +191,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -191,7 +191,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
return area; return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
/* Upgrade the page table to 4 levels and retry. */ /* Upgrade the page table to 4 levels and retry. */
rc = crst_table_upgrade(mm, TASK_MAX_SIZE); rc = crst_table_upgrade(mm);
if (rc) if (rc)
return (unsigned long) rc; return (unsigned long) rc;
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
...@@ -213,7 +213,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, ...@@ -213,7 +213,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
return area; return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
/* Upgrade the page table to 4 levels and retry. */ /* Upgrade the page table to 4 levels and retry. */
rc = crst_table_upgrade(mm, TASK_MAX_SIZE); rc = crst_table_upgrade(mm);
if (rc) if (rc)
return (unsigned long) rc; return (unsigned long) rc;
area = arch_get_unmapped_area_topdown(filp, addr, len, area = arch_get_unmapped_area_topdown(filp, addr, len,
......
...@@ -76,81 +76,52 @@ static void __crst_table_upgrade(void *arg) ...@@ -76,81 +76,52 @@ static void __crst_table_upgrade(void *arg)
__tlb_flush_local(); __tlb_flush_local();
} }
int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) int crst_table_upgrade(struct mm_struct *mm)
{ {
unsigned long *table, *pgd; unsigned long *table, *pgd;
unsigned long entry;
int flush;
BUG_ON(limit > TASK_MAX_SIZE); /* upgrade should only happen from 3 to 4 levels */
flush = 0; BUG_ON(mm->context.asce_limit != (1UL << 42));
repeat:
table = crst_table_alloc(mm); table = crst_table_alloc(mm);
if (!table) if (!table)
return -ENOMEM; return -ENOMEM;
spin_lock_bh(&mm->page_table_lock); spin_lock_bh(&mm->page_table_lock);
if (mm->context.asce_limit < limit) { pgd = (unsigned long *) mm->pgd;
pgd = (unsigned long *) mm->pgd; crst_table_init(table, _REGION2_ENTRY_EMPTY);
if (mm->context.asce_limit <= (1UL << 31)) { pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
entry = _REGION3_ENTRY_EMPTY; mm->pgd = (pgd_t *) table;
mm->context.asce_limit = 1UL << 42; mm->context.asce_limit = 1UL << 53;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
_ASCE_TYPE_REGION3; mm->task_size = mm->context.asce_limit;
} else {
entry = _REGION2_ENTRY_EMPTY;
mm->context.asce_limit = 1UL << 53;
mm->context.asce_bits = _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS |
_ASCE_TYPE_REGION2;
}
crst_table_init(table, entry);
pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
mm->pgd = (pgd_t *) table;
mm->task_size = mm->context.asce_limit;
table = NULL;
flush = 1;
}
spin_unlock_bh(&mm->page_table_lock); spin_unlock_bh(&mm->page_table_lock);
if (table)
crst_table_free(mm, table); on_each_cpu(__crst_table_upgrade, mm, 0);
if (mm->context.asce_limit < limit)
goto repeat;
if (flush)
on_each_cpu(__crst_table_upgrade, mm, 0);
return 0; return 0;
} }
void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) void crst_table_downgrade(struct mm_struct *mm)
{ {
pgd_t *pgd; pgd_t *pgd;
/* downgrade should only happen from 3 to 2 levels (compat only) */
BUG_ON(mm->context.asce_limit != (1UL << 42));
if (current->active_mm == mm) { if (current->active_mm == mm) {
clear_user_asce(); clear_user_asce();
__tlb_flush_mm(mm); __tlb_flush_mm(mm);
} }
while (mm->context.asce_limit > limit) {
pgd = mm->pgd; pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
case _REGION_ENTRY_TYPE_R2: mm->context.asce_limit = 1UL << 31;
mm->context.asce_limit = 1UL << 42; mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
_ASCE_USER_BITS | mm->task_size = mm->context.asce_limit;
_ASCE_TYPE_REGION3; crst_table_free(mm, (unsigned long *) pgd);
break;
case _REGION_ENTRY_TYPE_R3:
mm->context.asce_limit = 1UL << 31;
mm->context.asce_bits = _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS |
_ASCE_TYPE_SEGMENT;
break;
default:
BUG();
}
mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
mm->task_size = mm->context.asce_limit;
crst_table_free(mm, (unsigned long *) pgd);
}
if (current->active_mm == mm) if (current->active_mm == mm)
set_user_asce(mm); set_user_asce(mm);
} }
......
...@@ -457,7 +457,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev) ...@@ -457,7 +457,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->dma_table = dma_alloc_cpu_table(); zdev->dma_table = dma_alloc_cpu_table();
if (!zdev->dma_table) { if (!zdev->dma_table) {
rc = -ENOMEM; rc = -ENOMEM;
goto out_clean; goto out;
} }
/* /*
...@@ -477,18 +477,22 @@ int zpci_dma_init_device(struct zpci_dev *zdev) ...@@ -477,18 +477,22 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
if (!zdev->iommu_bitmap) { if (!zdev->iommu_bitmap) {
rc = -ENOMEM; rc = -ENOMEM;
goto out_reg; goto free_dma_table;
} }
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64) zdev->dma_table); (u64) zdev->dma_table);
if (rc) if (rc)
goto out_reg; goto free_bitmap;
return 0;
out_reg: return 0;
free_bitmap:
vfree(zdev->iommu_bitmap);
zdev->iommu_bitmap = NULL;
free_dma_table:
dma_free_cpu_table(zdev->dma_table); dma_free_cpu_table(zdev->dma_table);
out_clean: zdev->dma_table = NULL;
out:
return rc; return rc;
} }
......
...@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area) ...@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
{ {
struct sclp_ctl_sccb ctl_sccb; struct sclp_ctl_sccb ctl_sccb;
struct sccb_header *sccb; struct sccb_header *sccb;
unsigned long copied;
int rc; int rc;
if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb))) if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
...@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area) ...@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb) if (!sccb)
return -ENOMEM; return -ENOMEM;
if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) { copied = PAGE_SIZE -
copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
if (offsetof(struct sccb_header, length) +
sizeof(sccb->length) > copied || sccb->length > copied) {
rc = -EFAULT; rc = -EFAULT;
goto out_free; goto out_free;
} }
if (sccb->length > PAGE_SIZE || sccb->length < 8) if (sccb->length < 8) {
return -EINVAL; rc = -EINVAL;
if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
rc = -EFAULT;
goto out_free; goto out_free;
} }
rc = sclp_sync_request(ctl_sccb.cmdw, sccb); rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment