Commit 968ef8de authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "15 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  lib/stackdepot: export save/fetch stack for drivers
  mm: kmemleak: scan .data.ro_after_init
  memcg: prevent memcg caches to be both OFF_SLAB & OBJFREELIST_SLAB
  coredump: fix unfreezable coredumping task
  mm/filemap: don't allow partially uptodate page for pipes
  mm/hugetlb: fix huge page reservation leak in private mapping error paths
  ocfs2: fix not enough credit panic
  Revert "console: don't prefer first registered if DT specifies stdout-path"
  mm: hwpoison: fix thp split handling in memory_failure()
  swapfile: fix memory corruption via malformed swapfile
  mm/cma.c: check the max limit for cma allocation
  scripts/bloat-o-meter: fix SIGPIPE
  shmem: fix pageflags after swapping DMA32 object
  mm, frontswap: make sure allocated frontswap map is assigned
  mm: remove extra newline from allocation stall warning
parents c5e4ca6d ae65a21f
...@@ -62,9 +62,11 @@ SECTIONS ...@@ -62,9 +62,11 @@ SECTIONS
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__start_ro_after_init = .; __start_ro_after_init = .;
__start_data_ro_after_init = .;
.data..ro_after_init : { .data..ro_after_init : {
*(.data..ro_after_init) *(.data..ro_after_init)
} }
__end_data_ro_after_init = .;
EXCEPTION_TABLE(16) EXCEPTION_TABLE(16)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__end_ro_after_init = .; __end_ro_after_init = .;
......
...@@ -2077,8 +2077,6 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) ...@@ -2077,8 +2077,6 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
name = of_get_property(of_aliases, "stdout", NULL); name = of_get_property(of_aliases, "stdout", NULL);
if (name) if (name)
of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
if (of_stdout)
console_set_by_of();
} }
if (!of_aliases) if (!of_aliases)
......
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/fdtable.h> #include <linux/fdtable.h>
#include <linux/freezer.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/stat.h> #include <linux/stat.h>
#include <linux/fcntl.h> #include <linux/fcntl.h>
...@@ -423,7 +424,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state) ...@@ -423,7 +424,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
if (core_waiters > 0) { if (core_waiters > 0) {
struct core_thread *ptr; struct core_thread *ptr;
freezer_do_not_count();
wait_for_completion(&core_state->startup); wait_for_completion(&core_state->startup);
freezer_count();
/* /*
* Wait for all the threads to become inactive, so that * Wait for all the threads to become inactive, so that
* all the thread context (extended register state, like * all the thread context (extended register state, like
......
...@@ -3699,7 +3699,7 @@ static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash, ...@@ -3699,7 +3699,7 @@ static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb, static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb,
struct ocfs2_dx_root_block *dx_root) struct ocfs2_dx_root_block *dx_root)
{ {
int credits = ocfs2_clusters_to_blocks(osb->sb, 2); int credits = ocfs2_clusters_to_blocks(osb->sb, 3);
credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list); credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list);
credits += ocfs2_quota_trans_credits(osb->sb); credits += ocfs2_quota_trans_credits(osb->sb);
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
* [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
* and/or .init.* sections. * and/or .init.* sections.
* [__start_rodata, __end_rodata]: contains .rodata.* sections * [__start_rodata, __end_rodata]: contains .rodata.* sections
* [__start_data_ro_after_init, __end_data_ro_after_init]:
* contains data.ro_after_init section
* [__init_begin, __init_end]: contains .init.* sections, but .init.text.* * [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
* may be out of this range on some architectures. * may be out of this range on some architectures.
* [_sinittext, _einittext]: contains .init.text.* sections * [_sinittext, _einittext]: contains .init.text.* sections
...@@ -31,6 +33,7 @@ extern char _data[], _sdata[], _edata[]; ...@@ -31,6 +33,7 @@ extern char _data[], _sdata[], _edata[];
extern char __bss_start[], __bss_stop[]; extern char __bss_start[], __bss_stop[];
extern char __init_begin[], __init_end[]; extern char __init_begin[], __init_end[];
extern char _sinittext[], _einittext[]; extern char _sinittext[], _einittext[];
extern char __start_data_ro_after_init[], __end_data_ro_after_init[];
extern char _end[]; extern char _end[];
extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[]; extern char __kprobes_text_start[], __kprobes_text_end[];
......
...@@ -259,7 +259,10 @@ ...@@ -259,7 +259,10 @@
* own by defining an empty RO_AFTER_INIT_DATA. * own by defining an empty RO_AFTER_INIT_DATA.
*/ */
#ifndef RO_AFTER_INIT_DATA #ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA *(.data..ro_after_init) #define RO_AFTER_INIT_DATA \
__start_data_ro_after_init = .; \
*(.data..ro_after_init) \
__end_data_ro_after_init = .;
#endif #endif
/* /*
......
...@@ -173,12 +173,6 @@ static inline void console_sysfs_notify(void) ...@@ -173,12 +173,6 @@ static inline void console_sysfs_notify(void)
#endif #endif
extern bool console_suspend_enabled; extern bool console_suspend_enabled;
#ifdef CONFIG_OF
extern void console_set_by_of(void);
#else
static inline void console_set_by_of(void) {}
#endif
/* Suspend and resume console messages over PM events */ /* Suspend and resume console messages over PM events */
extern void suspend_console(void); extern void suspend_console(void);
extern void resume_console(void); extern void resume_console(void);
......
...@@ -106,8 +106,9 @@ static inline void frontswap_invalidate_area(unsigned type) ...@@ -106,8 +106,9 @@ static inline void frontswap_invalidate_area(unsigned type)
static inline void frontswap_init(unsigned type, unsigned long *map) static inline void frontswap_init(unsigned type, unsigned long *map)
{ {
if (frontswap_enabled()) #ifdef CONFIG_FRONTSWAP
__frontswap_init(type, map); __frontswap_init(type, map);
#endif
} }
#endif /* _LINUX_FRONTSWAP_H */ #endif /* _LINUX_FRONTSWAP_H */
...@@ -253,17 +253,6 @@ static int preferred_console = -1; ...@@ -253,17 +253,6 @@ static int preferred_console = -1;
int console_set_on_cmdline; int console_set_on_cmdline;
EXPORT_SYMBOL(console_set_on_cmdline); EXPORT_SYMBOL(console_set_on_cmdline);
#ifdef CONFIG_OF
static bool of_specified_console;
void console_set_by_of(void)
{
of_specified_console = true;
}
#else
# define of_specified_console false
#endif
/* Flag: console code may call schedule() */ /* Flag: console code may call schedule() */
static int console_may_schedule; static int console_may_schedule;
...@@ -2657,7 +2646,7 @@ void register_console(struct console *newcon) ...@@ -2657,7 +2646,7 @@ void register_console(struct console *newcon)
* didn't select a console we take the first one * didn't select a console we take the first one
* that registers here. * that registers here.
*/ */
if (preferred_console < 0 && !of_specified_console) { if (preferred_console < 0) {
if (newcon->index < 0) if (newcon->index < 0)
newcon->index = 0; newcon->index = 0;
if (newcon->setup == NULL || if (newcon->setup == NULL ||
......
...@@ -192,6 +192,7 @@ void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace) ...@@ -192,6 +192,7 @@ void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
trace->entries = stack->entries; trace->entries = stack->entries;
trace->skip = 0; trace->skip = 0;
} }
EXPORT_SYMBOL_GPL(depot_fetch_stack);
/** /**
* depot_save_stack - save stack in a stack depot. * depot_save_stack - save stack in a stack depot.
...@@ -283,3 +284,4 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace, ...@@ -283,3 +284,4 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
fast_exit: fast_exit:
return retval; return retval;
} }
EXPORT_SYMBOL_GPL(depot_save_stack);
...@@ -385,6 +385,9 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) ...@@ -385,6 +385,9 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
bitmap_maxno = cma_bitmap_maxno(cma); bitmap_maxno = cma_bitmap_maxno(cma);
bitmap_count = cma_bitmap_pages_to_bits(cma, count); bitmap_count = cma_bitmap_pages_to_bits(cma, count);
if (bitmap_count > bitmap_maxno)
return NULL;
for (;;) { for (;;) {
mutex_lock(&cma->lock); mutex_lock(&cma->lock);
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
......
...@@ -1826,11 +1826,17 @@ static void return_unused_surplus_pages(struct hstate *h, ...@@ -1826,11 +1826,17 @@ static void return_unused_surplus_pages(struct hstate *h,
* is not the case is if a reserve map was changed between calls. It * is not the case is if a reserve map was changed between calls. It
* is the responsibility of the caller to notice the difference and * is the responsibility of the caller to notice the difference and
* take appropriate action. * take appropriate action.
*
* vma_add_reservation is used in error paths where a reservation must
* be restored when a newly allocated huge page must be freed. It is
* to be called after calling vma_needs_reservation to determine if a
* reservation exists.
*/ */
enum vma_resv_mode { enum vma_resv_mode {
VMA_NEEDS_RESV, VMA_NEEDS_RESV,
VMA_COMMIT_RESV, VMA_COMMIT_RESV,
VMA_END_RESV, VMA_END_RESV,
VMA_ADD_RESV,
}; };
static long __vma_reservation_common(struct hstate *h, static long __vma_reservation_common(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr, struct vm_area_struct *vma, unsigned long addr,
...@@ -1856,6 +1862,14 @@ static long __vma_reservation_common(struct hstate *h, ...@@ -1856,6 +1862,14 @@ static long __vma_reservation_common(struct hstate *h,
region_abort(resv, idx, idx + 1); region_abort(resv, idx, idx + 1);
ret = 0; ret = 0;
break; break;
case VMA_ADD_RESV:
if (vma->vm_flags & VM_MAYSHARE)
ret = region_add(resv, idx, idx + 1);
else {
region_abort(resv, idx, idx + 1);
ret = region_del(resv, idx, idx + 1);
}
break;
default: default:
BUG(); BUG();
} }
...@@ -1903,6 +1917,56 @@ static void vma_end_reservation(struct hstate *h, ...@@ -1903,6 +1917,56 @@ static void vma_end_reservation(struct hstate *h,
(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
} }
static long vma_add_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
}
/*
* This routine is called to restore a reservation on error paths. In the
* specific error paths, a huge page was allocated (via alloc_huge_page)
* and is about to be freed. If a reservation for the page existed,
* alloc_huge_page would have consumed the reservation and set PagePrivate
* in the newly allocated page. When the page is freed via free_huge_page,
* the global reservation count will be incremented if PagePrivate is set.
* However, free_huge_page can not adjust the reserve map. Adjust the
* reserve map here to be consistent with global reserve count adjustments
* to be made by free_huge_page.
*/
static void restore_reserve_on_error(struct hstate *h,
struct vm_area_struct *vma, unsigned long address,
struct page *page)
{
if (unlikely(PagePrivate(page))) {
long rc = vma_needs_reservation(h, vma, address);
if (unlikely(rc < 0)) {
/*
* Rare out of memory condition in reserve map
* manipulation. Clear PagePrivate so that
* global reserve count will not be incremented
* by free_huge_page. This will make it appear
* as though the reservation for this page was
* consumed. This may prevent the task from
* faulting in the page at a later time. This
* is better than inconsistent global huge page
* accounting of reserve counts.
*/
ClearPagePrivate(page);
} else if (rc) {
rc = vma_add_reservation(h, vma, address);
if (unlikely(rc < 0))
/*
* See above comment about rare out of
* memory condition.
*/
ClearPagePrivate(page);
} else
vma_end_reservation(h, vma, address);
}
}
struct page *alloc_huge_page(struct vm_area_struct *vma, struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve) unsigned long addr, int avoid_reserve)
{ {
...@@ -3498,6 +3562,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3498,6 +3562,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
spin_unlock(ptl); spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
out_release_all: out_release_all:
restore_reserve_on_error(h, vma, address, new_page);
put_page(new_page); put_page(new_page);
out_release_old: out_release_old:
put_page(old_page); put_page(old_page);
...@@ -3680,6 +3745,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3680,6 +3745,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
spin_unlock(ptl); spin_unlock(ptl);
backout_unlocked: backout_unlocked:
unlock_page(page); unlock_page(page);
restore_reserve_on_error(h, vma, address, page);
put_page(page); put_page(page);
goto out; goto out;
} }
......
...@@ -1414,6 +1414,7 @@ static void kmemleak_scan(void) ...@@ -1414,6 +1414,7 @@ static void kmemleak_scan(void)
/* data/bss scanning */ /* data/bss scanning */
scan_large_block(_sdata, _edata); scan_large_block(_sdata, _edata);
scan_large_block(__bss_start, __bss_stop); scan_large_block(__bss_start, __bss_stop);
scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* per-cpu sections scanning */ /* per-cpu sections scanning */
......
...@@ -1112,10 +1112,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags) ...@@ -1112,10 +1112,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
} }
if (!PageHuge(p) && PageTransHuge(hpage)) { if (!PageHuge(p) && PageTransHuge(hpage)) {
lock_page(hpage); lock_page(p);
if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) { if (!PageAnon(p) || unlikely(split_huge_page(p))) {
unlock_page(hpage); unlock_page(p);
if (!PageAnon(hpage)) if (!PageAnon(p))
pr_err("Memory failure: %#lx: non anonymous thp\n", pr_err("Memory failure: %#lx: non anonymous thp\n",
pfn); pfn);
else else
...@@ -1126,9 +1126,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) ...@@ -1126,9 +1126,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
put_hwpoison_page(p); put_hwpoison_page(p);
return -EBUSY; return -EBUSY;
} }
unlock_page(hpage); unlock_page(p);
get_hwpoison_page(p);
put_hwpoison_page(hpage);
VM_BUG_ON_PAGE(!page_count(p), p); VM_BUG_ON_PAGE(!page_count(p), p);
hpage = compound_head(p); hpage = compound_head(p);
} }
......
...@@ -3658,7 +3658,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -3658,7 +3658,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
/* Make sure we know about allocations which stall for too long */ /* Make sure we know about allocations which stall for too long */
if (time_after(jiffies, alloc_start + stall_timeout)) { if (time_after(jiffies, alloc_start + stall_timeout)) {
warn_alloc(gfp_mask, warn_alloc(gfp_mask,
"page alloction stalls for %ums, order:%u\n", "page allocation stalls for %ums, order:%u",
jiffies_to_msecs(jiffies-alloc_start), order); jiffies_to_msecs(jiffies-alloc_start), order);
stall_timeout += 10 * HZ; stall_timeout += 10 * HZ;
} }
......
...@@ -1483,6 +1483,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, ...@@ -1483,6 +1483,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
copy_highpage(newpage, oldpage); copy_highpage(newpage, oldpage);
flush_dcache_page(newpage); flush_dcache_page(newpage);
__SetPageLocked(newpage);
__SetPageSwapBacked(newpage);
SetPageUptodate(newpage); SetPageUptodate(newpage);
set_page_private(newpage, swap_index); set_page_private(newpage, swap_index);
SetPageSwapCache(newpage); SetPageSwapCache(newpage);
......
...@@ -533,8 +533,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, ...@@ -533,8 +533,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
s = create_cache(cache_name, root_cache->object_size, s = create_cache(cache_name, root_cache->object_size,
root_cache->size, root_cache->align, root_cache->size, root_cache->align,
root_cache->flags, root_cache->ctor, root_cache->flags & CACHE_CREATE_MASK,
memcg, root_cache); root_cache->ctor, memcg, root_cache);
/* /*
* If we could not create a memcg cache, do not complain, because * If we could not create a memcg cache, do not complain, because
* that's not critical at all as we can always proceed with the root * that's not critical at all as we can always proceed with the root
......
...@@ -2224,6 +2224,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p, ...@@ -2224,6 +2224,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
swab32s(&swap_header->info.version); swab32s(&swap_header->info.version);
swab32s(&swap_header->info.last_page); swab32s(&swap_header->info.last_page);
swab32s(&swap_header->info.nr_badpages); swab32s(&swap_header->info.nr_badpages);
if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
return 0;
for (i = 0; i < swap_header->info.nr_badpages; i++) for (i = 0; i < swap_header->info.nr_badpages; i++)
swab32s(&swap_header->info.badpages[i]); swab32s(&swap_header->info.badpages[i]);
} }
......
...@@ -8,6 +8,9 @@ ...@@ -8,6 +8,9 @@
# of the GNU General Public License, incorporated herein by reference. # of the GNU General Public License, incorporated herein by reference.
import sys, os, re import sys, os, re
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
if len(sys.argv) != 3: if len(sys.argv) != 3:
sys.stderr.write("usage: %s file1 file2\n" % sys.argv[0]) sys.stderr.write("usage: %s file1 file2\n" % sys.argv[0])
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment