Commit b3242dba authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "6 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm/memblock.c: reversed logic in memblock_discard()
  fork: fix incorrect fput of ->exe_file causing use-after-free
  mm/madvise.c: fix freeing of locked page with MADV_FREE
  dax: fix deadlock due to misaligned PMD faults
  mm, shmem: fix handling /sys/kernel/mm/transparent_hugepage/shmem_enabled
  PM/hibernate: touch NMI watchdog when creating snapshot
parents 67a3b5cb 91b540f9
...@@ -1383,6 +1383,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, ...@@ -1383,6 +1383,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
/*
* Make sure that the faulting address's PMD offset (color) matches
* the PMD offset from the start of the file. This is necessary so
* that a PMD range in the page table overlaps exactly with a PMD
* range in the radix tree.
*/
if ((vmf->pgoff & PG_PMD_COLOUR) !=
((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
goto fallback;
/* Fall back to PTEs if we're going to COW */ /* Fall back to PTEs if we're going to COW */
if (write && !(vma->vm_flags & VM_SHARED)) if (write && !(vma->vm_flags & VM_SHARED))
goto fallback; goto fallback;
......
...@@ -806,6 +806,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, ...@@ -806,6 +806,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm_init_cpumask(mm); mm_init_cpumask(mm);
mm_init_aio(mm); mm_init_aio(mm);
mm_init_owner(mm, p); mm_init_owner(mm, p);
RCU_INIT_POINTER(mm->exe_file, NULL);
mmu_notifier_mm_init(mm); mmu_notifier_mm_init(mm);
init_tlb_flush_pending(mm); init_tlb_flush_pending(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
......
...@@ -368,8 +368,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -368,8 +368,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
pte_offset_map_lock(mm, pmd, addr, &ptl); pte_offset_map_lock(mm, pmd, addr, &ptl);
goto out; goto out;
} }
put_page(page);
unlock_page(page); unlock_page(page);
put_page(page);
pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte--; pte--;
addr -= PAGE_SIZE; addr -= PAGE_SIZE;
......
...@@ -299,7 +299,7 @@ void __init memblock_discard(void) ...@@ -299,7 +299,7 @@ void __init memblock_discard(void)
__memblock_free_late(addr, size); __memblock_free_late(addr, size);
} }
if (memblock.memory.regions == memblock_memory_init_regions) { if (memblock.memory.regions != memblock_memory_init_regions) {
addr = __pa(memblock.memory.regions); addr = __pa(memblock.memory.regions);
size = PAGE_ALIGN(sizeof(struct memblock_region) * size = PAGE_ALIGN(sizeof(struct memblock_region) *
memblock.memory.max); memblock.memory.max);
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/nmi.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -2535,9 +2536,14 @@ void drain_all_pages(struct zone *zone) ...@@ -2535,9 +2536,14 @@ void drain_all_pages(struct zone *zone)
#ifdef CONFIG_HIBERNATION #ifdef CONFIG_HIBERNATION
/*
* Touch the watchdog for every WD_PAGE_COUNT pages.
*/
#define WD_PAGE_COUNT (128*1024)
void mark_free_pages(struct zone *zone) void mark_free_pages(struct zone *zone)
{ {
unsigned long pfn, max_zone_pfn; unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
unsigned long flags; unsigned long flags;
unsigned int order, t; unsigned int order, t;
struct page *page; struct page *page;
...@@ -2552,6 +2558,11 @@ void mark_free_pages(struct zone *zone) ...@@ -2552,6 +2558,11 @@ void mark_free_pages(struct zone *zone)
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (!--page_count) {
touch_nmi_watchdog();
page_count = WD_PAGE_COUNT;
}
if (page_zone(page) != zone) if (page_zone(page) != zone)
continue; continue;
...@@ -2565,8 +2576,13 @@ void mark_free_pages(struct zone *zone) ...@@ -2565,8 +2576,13 @@ void mark_free_pages(struct zone *zone)
unsigned long i; unsigned long i;
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
for (i = 0; i < (1UL << order); i++) for (i = 0; i < (1UL << order); i++) {
if (!--page_count) {
touch_nmi_watchdog();
page_count = WD_PAGE_COUNT;
}
swsusp_set_page_free(pfn_to_page(pfn + i)); swsusp_set_page_free(pfn_to_page(pfn + i));
}
} }
} }
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
......
...@@ -3967,7 +3967,7 @@ int __init shmem_init(void) ...@@ -3967,7 +3967,7 @@ int __init shmem_init(void)
} }
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY) if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
else else
shmem_huge = 0; /* just in case it was patched */ shmem_huge = 0; /* just in case it was patched */
...@@ -4028,7 +4028,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, ...@@ -4028,7 +4028,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
return -EINVAL; return -EINVAL;
shmem_huge = huge; shmem_huge = huge;
if (shmem_huge < SHMEM_HUGE_DENY) if (shmem_huge > SHMEM_HUGE_DENY)
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
return count; return count;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment