Commit d8887f1c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "9 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mailmap: update Mark Yao's email address
  userfaultfd: clear the vma->vm_userfaultfd_ctx if UFFD_EVENT_FORK fails
  mm/sparse.c: wrong allocation for mem_section
  mm/zsmalloc.c: include fs.h
  mm/debug.c: provide useful debugging information for VM_BUG
  kernel/exit.c: export abort() to modules
  mm/mprotect: add a cond_resched() inside change_pmd_range()
  kernel/acct.c: fix the acct->needcheck check in check_free_space()
  mm: check pfn_valid first in zero_resv_unavail
parents 5866bec2 9a0e7120
...@@ -107,6 +107,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> ...@@ -107,6 +107,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com> Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com> Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Mark Brown <broonie@sirena.org.uk> Mark Brown <broonie@sirena.org.uk>
Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com> Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com> Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Matthieu CASTET <castet.matthieu@free.fr> Matthieu CASTET <castet.matthieu@free.fr>
......
...@@ -793,7 +793,6 @@ void abort(void) ...@@ -793,7 +793,6 @@ void abort(void)
/* if that doesn't kill us, halt */ /* if that doesn't kill us, halt */
panic("Oops failed to kill thread"); panic("Oops failed to kill thread");
} }
EXPORT_SYMBOL(abort);
void __init trap_init(void) void __init trap_init(void)
{ {
......
...@@ -122,7 +122,6 @@ void abort(void) ...@@ -122,7 +122,6 @@ void abort(void)
/* if that doesn't kill us, halt */ /* if that doesn't kill us, halt */
panic("Oops failed to kill thread"); panic("Oops failed to kill thread");
} }
EXPORT_SYMBOL(abort);
void __init trap_init(void) void __init trap_init(void)
{ {
......
...@@ -298,7 +298,6 @@ void abort(void) ...@@ -298,7 +298,6 @@ void abort(void)
/* if that doesn't kill us, halt */ /* if that doesn't kill us, halt */
panic("Oops failed to kill thread"); panic("Oops failed to kill thread");
} }
EXPORT_SYMBOL(abort);
void __init trap_init(void) void __init trap_init(void)
{ {
......
...@@ -570,11 +570,14 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) ...@@ -570,11 +570,14 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
struct userfaultfd_wait_queue *ewq) struct userfaultfd_wait_queue *ewq)
{ {
struct userfaultfd_ctx *release_new_ctx;
if (WARN_ON_ONCE(current->flags & PF_EXITING)) if (WARN_ON_ONCE(current->flags & PF_EXITING))
goto out; goto out;
ewq->ctx = ctx; ewq->ctx = ctx;
init_waitqueue_entry(&ewq->wq, current); init_waitqueue_entry(&ewq->wq, current);
release_new_ctx = NULL;
spin_lock(&ctx->event_wqh.lock); spin_lock(&ctx->event_wqh.lock);
/* /*
...@@ -601,8 +604,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, ...@@ -601,8 +604,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
new = (struct userfaultfd_ctx *) new = (struct userfaultfd_ctx *)
(unsigned long) (unsigned long)
ewq->msg.arg.reserved.reserved1; ewq->msg.arg.reserved.reserved1;
release_new_ctx = new;
userfaultfd_ctx_put(new);
} }
break; break;
} }
...@@ -617,6 +619,20 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, ...@@ -617,6 +619,20 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
spin_unlock(&ctx->event_wqh.lock); spin_unlock(&ctx->event_wqh.lock);
if (release_new_ctx) {
struct vm_area_struct *vma;
struct mm_struct *mm = release_new_ctx->mm;
/* the various vma->vm_userfaultfd_ctx still points to it */
down_write(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next)
if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
up_write(&mm->mmap_sem);
userfaultfd_ctx_put(release_new_ctx);
}
/* /*
* ctx may go away after this if the userfault pseudo fd is * ctx may go away after this if the userfault pseudo fd is
* already released. * already released.
......
...@@ -102,7 +102,7 @@ static int check_free_space(struct bsd_acct_struct *acct) ...@@ -102,7 +102,7 @@ static int check_free_space(struct bsd_acct_struct *acct)
{ {
struct kstatfs sbuf; struct kstatfs sbuf;
if (time_is_before_jiffies(acct->needcheck)) if (time_is_after_jiffies(acct->needcheck))
goto out; goto out;
/* May block */ /* May block */
......
...@@ -1763,3 +1763,4 @@ __weak void abort(void) ...@@ -1763,3 +1763,4 @@ __weak void abort(void)
/* if that doesn't kill us, halt */ /* if that doesn't kill us, halt */
panic("Oops failed to kill thread"); panic("Oops failed to kill thread");
} }
EXPORT_SYMBOL(abort);
...@@ -50,7 +50,7 @@ void __dump_page(struct page *page, const char *reason) ...@@ -50,7 +50,7 @@ void __dump_page(struct page *page, const char *reason)
*/ */
int mapcount = PageSlab(page) ? 0 : page_mapcount(page); int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx", pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
page, page_ref_count(page), mapcount, page, page_ref_count(page), mapcount,
page->mapping, page_to_pgoff(page)); page->mapping, page_to_pgoff(page));
if (PageCompound(page)) if (PageCompound(page))
...@@ -69,7 +69,7 @@ void __dump_page(struct page *page, const char *reason) ...@@ -69,7 +69,7 @@ void __dump_page(struct page *page, const char *reason)
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
if (page->mem_cgroup) if (page->mem_cgroup)
pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup); pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
#endif #endif
} }
...@@ -84,10 +84,10 @@ EXPORT_SYMBOL(dump_page); ...@@ -84,10 +84,10 @@ EXPORT_SYMBOL(dump_page);
void dump_vma(const struct vm_area_struct *vma) void dump_vma(const struct vm_area_struct *vma)
{ {
pr_emerg("vma %p start %p end %p\n" pr_emerg("vma %px start %px end %px\n"
"next %p prev %p mm %p\n" "next %px prev %px mm %px\n"
"prot %lx anon_vma %p vm_ops %p\n" "prot %lx anon_vma %px vm_ops %px\n"
"pgoff %lx file %p private_data %p\n" "pgoff %lx file %px private_data %px\n"
"flags: %#lx(%pGv)\n", "flags: %#lx(%pGv)\n",
vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
vma->vm_prev, vma->vm_mm, vma->vm_prev, vma->vm_mm,
...@@ -100,27 +100,27 @@ EXPORT_SYMBOL(dump_vma); ...@@ -100,27 +100,27 @@ EXPORT_SYMBOL(dump_vma);
void dump_mm(const struct mm_struct *mm) void dump_mm(const struct mm_struct *mm)
{ {
pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
"get_unmapped_area %p\n" "get_unmapped_area %px\n"
#endif #endif
"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
"pgd %p mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
"pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
"start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n"
"start_brk %lx brk %lx start_stack %lx\n" "start_brk %lx brk %lx start_stack %lx\n"
"arg_start %lx arg_end %lx env_start %lx env_end %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
"binfmt %p flags %lx core_state %p\n" "binfmt %px flags %lx core_state %px\n"
#ifdef CONFIG_AIO #ifdef CONFIG_AIO
"ioctx_table %p\n" "ioctx_table %px\n"
#endif #endif
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
"owner %p " "owner %px "
#endif #endif
"exe_file %p\n" "exe_file %px\n"
#ifdef CONFIG_MMU_NOTIFIER #ifdef CONFIG_MMU_NOTIFIER
"mmu_notifier_mm %p\n" "mmu_notifier_mm %px\n"
#endif #endif
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
......
...@@ -166,7 +166,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, ...@@ -166,7 +166,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd) if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
&& pmd_none_or_clear_bad(pmd)) && pmd_none_or_clear_bad(pmd))
continue; goto next;
/* invoke the mmu notifier if the pmd is populated */ /* invoke the mmu notifier if the pmd is populated */
if (!mni_start) { if (!mni_start) {
...@@ -188,7 +188,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, ...@@ -188,7 +188,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
} }
/* huge pmd was handled */ /* huge pmd was handled */
continue; goto next;
} }
} }
/* fall through, the trans huge pmd just split */ /* fall through, the trans huge pmd just split */
...@@ -196,6 +196,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, ...@@ -196,6 +196,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
this_pages = change_pte_range(vma, pmd, addr, next, newprot, this_pages = change_pte_range(vma, pmd, addr, next, newprot,
dirty_accountable, prot_numa); dirty_accountable, prot_numa);
pages += this_pages; pages += this_pages;
next:
cond_resched();
} while (pmd++, addr = next, addr != end); } while (pmd++, addr = next, addr != end);
if (mni_start) if (mni_start)
......
...@@ -6260,6 +6260,8 @@ void __paginginit zero_resv_unavail(void) ...@@ -6260,6 +6260,8 @@ void __paginginit zero_resv_unavail(void)
pgcnt = 0; pgcnt = 0;
for_each_resv_unavail_range(i, &start, &end) { for_each_resv_unavail_range(i, &start, &end) {
for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) { for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
continue;
mm_zero_struct_page(pfn_to_page(pfn)); mm_zero_struct_page(pfn_to_page(pfn));
pgcnt++; pgcnt++;
} }
......
...@@ -211,7 +211,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) ...@@ -211,7 +211,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
if (unlikely(!mem_section)) { if (unlikely(!mem_section)) {
unsigned long size, align; unsigned long size, align;
size = sizeof(struct mem_section) * NR_SECTION_ROOTS; size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
align = 1 << (INTERNODE_CACHE_SHIFT); align = 1 << (INTERNODE_CACHE_SHIFT);
mem_section = memblock_virt_alloc(size, align); mem_section = memblock_virt_alloc(size, align);
} }
......
...@@ -53,6 +53,7 @@ ...@@ -53,6 +53,7 @@
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/migrate.h> #include <linux/migrate.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/fs.h>
#define ZSPAGE_MAGIC 0x58 #define ZSPAGE_MAGIC 0x58
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment