Commit 0f6f281b authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/mm: downgrade page table after fork of a 31 bit process

The downgrade of the 4 level page table created by init_new_context is
currently done only in start_thread31. If a 31 bit process forks the
new mm uses a 4 level page table, including the task size of 2<<42
that goes along with it. This is incorrect as now a 31 bit process
can map memory beyond 2GB. Define arch_dup_mmap to do the downgrade
after fork.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 8143adaf
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/ctl_reg.h> #include <asm/ctl_reg.h>
#include <asm-generic/mm_hooks.h>
static inline int init_new_context(struct task_struct *tsk, static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)
...@@ -91,4 +90,17 @@ static inline void activate_mm(struct mm_struct *prev, ...@@ -91,4 +90,17 @@ static inline void activate_mm(struct mm_struct *prev,
switch_mm(prev, next, current); switch_mm(prev, next, current);
} }
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
#ifdef CONFIG_64BIT
if (oldmm->context.asce_limit < mm->context.asce_limit)
crst_table_downgrade(mm, oldmm->context.asce_limit);
#endif
}
static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
#endif /* __S390_MMU_CONTEXT_H */ #endif /* __S390_MMU_CONTEXT_H */
...@@ -120,7 +120,9 @@ struct stack_frame { ...@@ -120,7 +120,9 @@ struct stack_frame {
regs->psw.mask = psw_user_bits | PSW_MASK_BA; \ regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \ regs->gprs[15] = new_stackp; \
__tlb_flush_mm(current->mm); \
crst_table_downgrade(current->mm, 1UL << 31); \ crst_table_downgrade(current->mm, 1UL << 31); \
update_mm(current->mm, current); \
} while (0) } while (0)
/* Forward declaration, a strange C thing */ /* Forward declaration, a strange C thing */
......
...@@ -103,9 +103,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -103,9 +103,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
int s390_mmap_check(unsigned long addr, unsigned long len) int s390_mmap_check(unsigned long addr, unsigned long len)
{ {
int rc;
if (!is_compat_task() && if (!is_compat_task() &&
len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
return crst_table_upgrade(current->mm, 1UL << 53); rc = crst_table_upgrade(current->mm, 1UL << 53);
if (rc)
return rc;
update_mm(current->mm, current);
}
return 0; return 0;
} }
...@@ -125,6 +131,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -125,6 +131,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
rc = crst_table_upgrade(mm, 1UL << 53); rc = crst_table_upgrade(mm, 1UL << 53);
if (rc) if (rc)
return (unsigned long) rc; return (unsigned long) rc;
update_mm(mm, current);
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
} }
return area; return area;
...@@ -147,6 +154,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, ...@@ -147,6 +154,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
rc = crst_table_upgrade(mm, 1UL << 53); rc = crst_table_upgrade(mm, 1UL << 53);
if (rc) if (rc)
return (unsigned long) rc; return (unsigned long) rc;
update_mm(mm, current);
area = arch_get_unmapped_area_topdown(filp, addr, len, area = arch_get_unmapped_area_topdown(filp, addr, len,
pgoff, flags); pgoff, flags);
} }
......
...@@ -85,7 +85,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) ...@@ -85,7 +85,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
crst_table_free(mm, table); crst_table_free(mm, table);
if (mm->context.asce_limit < limit) if (mm->context.asce_limit < limit)
goto repeat; goto repeat;
update_mm(mm, current);
return 0; return 0;
} }
...@@ -93,9 +92,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) ...@@ -93,9 +92,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{ {
pgd_t *pgd; pgd_t *pgd;
if (mm->context.asce_limit <= limit)
return;
__tlb_flush_mm(mm);
while (mm->context.asce_limit > limit) { while (mm->context.asce_limit > limit) {
pgd = mm->pgd; pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
...@@ -118,7 +114,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) ...@@ -118,7 +114,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
mm->task_size = mm->context.asce_limit; mm->task_size = mm->context.asce_limit;
crst_table_free(mm, (unsigned long *) pgd); crst_table_free(mm, (unsigned long *) pgd);
} }
update_mm(mm, current);
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment