Commit f22027d0 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Ben Hutchings

s390/mm: downgrade page table after fork of a 31 bit process

commit 0f6f281b upstream.

The downgrade of the 4 level page table created by init_new_context is
currently done only in start_thread31. If a 31 bit process forks the
new mm uses a 4 level page table, including the task size of 2<<42
that goes along with it. This is incorrect as now a 31 bit process
can map memory beyond 2GB. Define arch_dup_mmap to do the downgrade
after fork.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
[bwh: Backported to 3.2: adjust context]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent 60ed9e38
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
static inline int init_new_context(struct task_struct *tsk, static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)
...@@ -92,4 +91,17 @@ static inline void activate_mm(struct mm_struct *prev, ...@@ -92,4 +91,17 @@ static inline void activate_mm(struct mm_struct *prev,
switch_mm(prev, next, current); switch_mm(prev, next, current);
} }
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
#ifdef CONFIG_64BIT
if (oldmm->context.asce_limit < mm->context.asce_limit)
crst_table_downgrade(mm, oldmm->context.asce_limit);
#endif
}
static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
#endif /* __S390_MMU_CONTEXT_H */ #endif /* __S390_MMU_CONTEXT_H */
...@@ -130,7 +130,9 @@ struct stack_frame { ...@@ -130,7 +130,9 @@ struct stack_frame {
regs->psw.mask = psw_user_bits | PSW_MASK_BA; \ regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \ regs->gprs[15] = new_stackp; \
__tlb_flush_mm(current->mm); \
crst_table_downgrade(current->mm, 1UL << 31); \ crst_table_downgrade(current->mm, 1UL << 31); \
update_mm(current->mm, current); \
} while (0) } while (0)
/* Forward declaration, a strange C thing */ /* Forward declaration, a strange C thing */
......
...@@ -106,9 +106,15 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); ...@@ -106,9 +106,15 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
int s390_mmap_check(unsigned long addr, unsigned long len) int s390_mmap_check(unsigned long addr, unsigned long len)
{ {
int rc;
if (!is_compat_task() && if (!is_compat_task() &&
len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
return crst_table_upgrade(current->mm, 1UL << 53); rc = crst_table_upgrade(current->mm, 1UL << 53);
if (rc)
return rc;
update_mm(current->mm, current);
}
return 0; return 0;
} }
...@@ -128,6 +134,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -128,6 +134,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
rc = crst_table_upgrade(mm, 1UL << 53); rc = crst_table_upgrade(mm, 1UL << 53);
if (rc) if (rc)
return (unsigned long) rc; return (unsigned long) rc;
update_mm(mm, current);
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
} }
return area; return area;
...@@ -150,6 +157,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, ...@@ -150,6 +157,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
rc = crst_table_upgrade(mm, 1UL << 53); rc = crst_table_upgrade(mm, 1UL << 53);
if (rc) if (rc)
return (unsigned long) rc; return (unsigned long) rc;
update_mm(mm, current);
area = arch_get_unmapped_area_topdown(filp, addr, len, area = arch_get_unmapped_area_topdown(filp, addr, len,
pgoff, flags); pgoff, flags);
} }
......
...@@ -97,7 +97,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) ...@@ -97,7 +97,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
crst_table_free(mm, table); crst_table_free(mm, table);
if (mm->context.asce_limit < limit) if (mm->context.asce_limit < limit)
goto repeat; goto repeat;
update_mm(mm, current);
return 0; return 0;
} }
...@@ -105,9 +104,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) ...@@ -105,9 +104,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{ {
pgd_t *pgd; pgd_t *pgd;
if (mm->context.asce_limit <= limit)
return;
__tlb_flush_mm(mm);
while (mm->context.asce_limit > limit) { while (mm->context.asce_limit > limit) {
pgd = mm->pgd; pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
...@@ -130,7 +126,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) ...@@ -130,7 +126,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
mm->task_size = mm->context.asce_limit; mm->task_size = mm->context.asce_limit;
crst_table_free(mm, (unsigned long *) pgd); crst_table_free(mm, (unsigned long *) pgd);
} }
update_mm(mm, current);
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment