Commit b7aea68a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "17 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  drivers/acpi/scan.c: document why we don't need the device_hotplug_lock
  memremap: move from kernel/ to mm/
  lib/test_meminit.c: use GFP_ATOMIC in RCU critical section
  asm-generic: fix -Wtype-limits compiler warnings
  cgroup: kselftest: relax fs_spec checks
  mm/memory_hotplug.c: remove unneeded return for void function
  mm/migrate.c: initialize pud_entry in migrate_vma()
  coredump: split pipe command whitespace before expanding template
  page flags: prioritize kasan bits over last-cpuid
  ubsan: build ubsan.c more conservatively
  kasan: remove clang version check for KASAN_STACK
  mm: compaction: avoid 100% CPU usage during compaction when a task is killed
  mm: migrate: fix reference check race between __find_get_block() and migration
  mm: vmscan: check if mem cgroup is disabled or not before calling memcg slab shrinker
  ocfs2: remove set but not used variable 'last_hash'
  Revert "kmemleak: allow to coexist with fault injection"
  kernel/signal.c: fix a kernel-doc markup
parents 61672549 7291edca
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#if _MIPS_SIM != _MIPS_SIM_ABI64 && defined(CONFIG_64BIT) #if _MIPS_SIM != _MIPS_SIM_ABI64 && defined(CONFIG_64BIT)
/* Building 32-bit VDSO for the 64-bit kernel. Fake a 32-bit Kconfig. */ /* Building 32-bit VDSO for the 64-bit kernel. Fake a 32-bit Kconfig. */
#define BUILD_VDSO32_64
#undef CONFIG_64BIT #undef CONFIG_64BIT
#define CONFIG_32BIT 1 #define CONFIG_32BIT 1
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -2204,6 +2204,12 @@ int __init acpi_scan_init(void) ...@@ -2204,6 +2204,12 @@ int __init acpi_scan_init(void)
acpi_gpe_apply_masked_gpes(); acpi_gpe_apply_masked_gpes();
acpi_update_all_gpes(); acpi_update_all_gpes();
/*
* Although we call __add_memory() that is documented to require the
* device_hotplug_lock, it is not necessary here because this is an
* early code when userspace or any other code path cannot trigger
* hotplug/hotunplug operations.
*/
mutex_lock(&acpi_scan_lock); mutex_lock(&acpi_scan_lock);
/* /*
* Enumerate devices in the ACPI namespace. * Enumerate devices in the ACPI namespace.
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/stat.h> #include <linux/stat.h>
#include <linux/fcntl.h> #include <linux/fcntl.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/ctype.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
...@@ -187,11 +188,13 @@ static int cn_print_exe_file(struct core_name *cn) ...@@ -187,11 +188,13 @@ static int cn_print_exe_file(struct core_name *cn)
* name into corename, which must have space for at least * name into corename, which must have space for at least
* CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
*/ */
static int format_corename(struct core_name *cn, struct coredump_params *cprm) static int format_corename(struct core_name *cn, struct coredump_params *cprm,
size_t **argv, int *argc)
{ {
const struct cred *cred = current_cred(); const struct cred *cred = current_cred();
const char *pat_ptr = core_pattern; const char *pat_ptr = core_pattern;
int ispipe = (*pat_ptr == '|'); int ispipe = (*pat_ptr == '|');
bool was_space = false;
int pid_in_pattern = 0; int pid_in_pattern = 0;
int err = 0; int err = 0;
...@@ -201,12 +204,35 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm) ...@@ -201,12 +204,35 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
return -ENOMEM; return -ENOMEM;
cn->corename[0] = '\0'; cn->corename[0] = '\0';
if (ispipe) if (ispipe) {
int argvs = sizeof(core_pattern) / 2;
(*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
if (!(*argv))
return -ENOMEM;
(*argv)[(*argc)++] = 0;
++pat_ptr; ++pat_ptr;
}
/* Repeat as long as we have more pattern to process and more output /* Repeat as long as we have more pattern to process and more output
space */ space */
while (*pat_ptr) { while (*pat_ptr) {
/*
* Split on spaces before doing template expansion so that
* %e and %E don't get split if they have spaces in them
*/
if (ispipe) {
if (isspace(*pat_ptr)) {
was_space = true;
pat_ptr++;
continue;
} else if (was_space) {
was_space = false;
err = cn_printf(cn, "%c", '\0');
if (err)
return err;
(*argv)[(*argc)++] = cn->used;
}
}
if (*pat_ptr != '%') { if (*pat_ptr != '%') {
err = cn_printf(cn, "%c", *pat_ptr++); err = cn_printf(cn, "%c", *pat_ptr++);
} else { } else {
...@@ -546,6 +572,8 @@ void do_coredump(const kernel_siginfo_t *siginfo) ...@@ -546,6 +572,8 @@ void do_coredump(const kernel_siginfo_t *siginfo)
struct cred *cred; struct cred *cred;
int retval = 0; int retval = 0;
int ispipe; int ispipe;
size_t *argv = NULL;
int argc = 0;
struct files_struct *displaced; struct files_struct *displaced;
/* require nonrelative corefile path and be extra careful */ /* require nonrelative corefile path and be extra careful */
bool need_suid_safe = false; bool need_suid_safe = false;
...@@ -592,9 +620,10 @@ void do_coredump(const kernel_siginfo_t *siginfo) ...@@ -592,9 +620,10 @@ void do_coredump(const kernel_siginfo_t *siginfo)
old_cred = override_creds(cred); old_cred = override_creds(cred);
ispipe = format_corename(&cn, &cprm); ispipe = format_corename(&cn, &cprm, &argv, &argc);
if (ispipe) { if (ispipe) {
int argi;
int dump_count; int dump_count;
char **helper_argv; char **helper_argv;
struct subprocess_info *sub_info; struct subprocess_info *sub_info;
...@@ -637,12 +666,16 @@ void do_coredump(const kernel_siginfo_t *siginfo) ...@@ -637,12 +666,16 @@ void do_coredump(const kernel_siginfo_t *siginfo)
goto fail_dropcount; goto fail_dropcount;
} }
helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL); helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
GFP_KERNEL);
if (!helper_argv) { if (!helper_argv) {
printk(KERN_WARNING "%s failed to allocate memory\n", printk(KERN_WARNING "%s failed to allocate memory\n",
__func__); __func__);
goto fail_dropcount; goto fail_dropcount;
} }
for (argi = 0; argi < argc; argi++)
helper_argv[argi] = cn.corename + argv[argi];
helper_argv[argi] = NULL;
retval = -ENOMEM; retval = -ENOMEM;
sub_info = call_usermodehelper_setup(helper_argv[0], sub_info = call_usermodehelper_setup(helper_argv[0],
...@@ -652,7 +685,7 @@ void do_coredump(const kernel_siginfo_t *siginfo) ...@@ -652,7 +685,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
retval = call_usermodehelper_exec(sub_info, retval = call_usermodehelper_exec(sub_info,
UMH_WAIT_EXEC); UMH_WAIT_EXEC);
argv_free(helper_argv); kfree(helper_argv);
if (retval) { if (retval) {
printk(KERN_INFO "Core dump to |%s pipe failed\n", printk(KERN_INFO "Core dump to |%s pipe failed\n",
cn.corename); cn.corename);
...@@ -766,6 +799,7 @@ void do_coredump(const kernel_siginfo_t *siginfo) ...@@ -766,6 +799,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
if (ispipe) if (ispipe)
atomic_dec(&core_dump_count); atomic_dec(&core_dump_count);
fail_unlock: fail_unlock:
kfree(argv);
kfree(cn.corename); kfree(cn.corename);
coredump_finish(mm, core_dumped); coredump_finish(mm, core_dumped);
revert_creds(old_cred); revert_creds(old_cred);
......
...@@ -3825,7 +3825,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, ...@@ -3825,7 +3825,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
int low_bucket = 0, bucket, high_bucket; int low_bucket = 0, bucket, high_bucket;
struct ocfs2_xattr_bucket *search; struct ocfs2_xattr_bucket *search;
u32 last_hash;
u64 blkno, lower_blkno = 0; u64 blkno, lower_blkno = 0;
search = ocfs2_xattr_bucket_new(inode); search = ocfs2_xattr_bucket_new(inode);
...@@ -3869,8 +3868,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, ...@@ -3869,8 +3868,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
if (xh->xh_count) if (xh->xh_count)
xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1]; xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
last_hash = le32_to_cpu(xe->xe_name_hash);
/* record lower_blkno which may be the insert place. */ /* record lower_blkno which may be the insert place. */
lower_blkno = blkno; lower_blkno = blkno;
......
...@@ -7,24 +7,6 @@ ...@@ -7,24 +7,6 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/log2.h> #include <linux/log2.h>
/*
* Runtime evaluation of get_order()
*/
static inline __attribute_const__
int __get_order(unsigned long size)
{
int order;
size--;
size >>= PAGE_SHIFT;
#if BITS_PER_LONG == 32
order = fls(size);
#else
order = fls64(size);
#endif
return order;
}
/** /**
* get_order - Determine the allocation order of a memory size * get_order - Determine the allocation order of a memory size
* @size: The size for which to get the order * @size: The size for which to get the order
...@@ -43,19 +25,27 @@ int __get_order(unsigned long size) ...@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
* to hold an object of the specified size. * to hold an object of the specified size.
* *
* The result is undefined if the size is 0. * The result is undefined if the size is 0.
*
* This function may be used to initialise variables with compile time
* evaluations of constants.
*/ */
#define get_order(n) \ static inline __attribute_const__ int get_order(unsigned long size)
( \ {
__builtin_constant_p(n) ? ( \ if (__builtin_constant_p(size)) {
((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \ if (!size)
(((n) < (1UL << PAGE_SHIFT)) ? 0 : \ return BITS_PER_LONG - PAGE_SHIFT;
ilog2((n) - 1) - PAGE_SHIFT + 1) \
) : \ if (size < (1UL << PAGE_SHIFT))
__get_order(n) \ return 0;
)
return ilog2((size) - 1) - PAGE_SHIFT + 1;
}
size--;
size >>= PAGE_SHIFT;
#if BITS_PER_LONG == 32
return fls(size);
#else
return fls64(size);
#endif
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#endif /* CONFIG_SPARSEMEM */ #endif /* CONFIG_SPARSEMEM */
#ifndef BUILD_VDSO32_64
/* /*
* page->flags layout: * page->flags layout:
* *
...@@ -76,20 +77,22 @@ ...@@ -76,20 +77,22 @@
#define LAST_CPUPID_SHIFT 0 #define LAST_CPUPID_SHIFT 0
#endif #endif
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS #ifdef CONFIG_KASAN_SW_TAGS
#define KASAN_TAG_WIDTH 8
#else
#define KASAN_TAG_WIDTH 0
#endif
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
<= BITS_PER_LONG - NR_PAGEFLAGS
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
#else #else
#define LAST_CPUPID_WIDTH 0 #define LAST_CPUPID_WIDTH 0
#endif #endif
#ifdef CONFIG_KASAN_SW_TAGS
#define KASAN_TAG_WIDTH 8
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \ #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
> BITS_PER_LONG - NR_PAGEFLAGS > BITS_PER_LONG - NR_PAGEFLAGS
#error "KASAN: not enough bits in page flags for tag" #error "Not enough bits in page flags"
#endif
#else
#define KASAN_TAG_WIDTH 0
#endif #endif
/* /*
...@@ -104,4 +107,5 @@ ...@@ -104,4 +107,5 @@
#define LAST_CPUPID_NOT_IN_PAGE_FLAGS #define LAST_CPUPID_NOT_IN_PAGE_FLAGS
#endif #endif
#endif
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */ #endif /* _LINUX_PAGE_FLAGS_LAYOUT */
...@@ -111,7 +111,6 @@ obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o ...@@ -111,7 +111,6 @@ obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
obj-$(CONFIG_TORTURE_TEST) += torture.o obj-$(CONFIG_TORTURE_TEST) += torture.o
obj-$(CONFIG_HAS_IOMEM) += iomem.o obj-$(CONFIG_HAS_IOMEM) += iomem.o
obj-$(CONFIG_ZONE_DEVICE) += memremap.o
obj-$(CONFIG_RSEQ) += rseq.o obj-$(CONFIG_RSEQ) += rseq.o
obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o
......
...@@ -349,7 +349,7 @@ void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) ...@@ -349,7 +349,7 @@ void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
* @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
* Group stop states are cleared and the group stop count is consumed if * Group stop states are cleared and the group stop count is consumed if
* %JOBCTL_STOP_CONSUME was set. If the consumption completes the group * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
* stop, the appropriate %SIGNAL_* flags are set. * stop, the appropriate `SIGNAL_*` flags are set.
* *
* CONTEXT: * CONTEXT:
* Must be called with @task->sighand->siglock held. * Must be called with @task->sighand->siglock held.
......
...@@ -106,7 +106,6 @@ endchoice ...@@ -106,7 +106,6 @@ endchoice
config KASAN_STACK_ENABLE config KASAN_STACK_ENABLE
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
default !(CLANG_VERSION < 90000)
depends on KASAN depends on KASAN
help help
The LLVM stack address sanitizer has a know problem that The LLVM stack address sanitizer has a know problem that
...@@ -115,11 +114,11 @@ config KASAN_STACK_ENABLE ...@@ -115,11 +114,11 @@ config KASAN_STACK_ENABLE
Disabling asan-stack makes it safe to run kernels build Disabling asan-stack makes it safe to run kernels build
with clang-8 with KASAN enabled, though it loses some of with clang-8 with KASAN enabled, though it loses some of
the functionality. the functionality.
This feature is always disabled when compile-testing with clang-8 This feature is always disabled when compile-testing with clang
or earlier to avoid cluttering the output in stack overflow to avoid cluttering the output in stack overflow warnings,
warnings, but clang-8 users can still enable it for builds without but clang users can still enable it for builds without
CONFIG_COMPILE_TEST. On gcc and later clang versions it is CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe
assumed to always be safe to use and enabled by default. to use and enabled by default.
config KASAN_STACK config KASAN_STACK
int int
......
...@@ -279,7 +279,8 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o ...@@ -279,7 +279,8 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
obj-$(CONFIG_UBSAN) += ubsan.o obj-$(CONFIG_UBSAN) += ubsan.o
UBSAN_SANITIZE_ubsan.o := n UBSAN_SANITIZE_ubsan.o := n
CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) KASAN_SANITIZE_ubsan.o := n
CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_SBITMAP) += sbitmap.o obj-$(CONFIG_SBITMAP) += sbitmap.o
......
...@@ -222,7 +222,7 @@ static int __init do_kmem_cache_size(size_t size, bool want_ctor, ...@@ -222,7 +222,7 @@ static int __init do_kmem_cache_size(size_t size, bool want_ctor,
* Copy the buffer to check that it's not wiped on * Copy the buffer to check that it's not wiped on
* free(). * free().
*/ */
buf_copy = kmalloc(size, GFP_KERNEL); buf_copy = kmalloc(size, GFP_ATOMIC);
if (buf_copy) if (buf_copy)
memcpy(buf_copy, buf, size); memcpy(buf_copy, buf, size);
......
...@@ -102,5 +102,6 @@ obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o ...@@ -102,5 +102,6 @@ obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o
obj-$(CONFIG_ZONE_DEVICE) += memremap.o
obj-$(CONFIG_HMM_MIRROR) += hmm.o obj-$(CONFIG_HMM_MIRROR) += hmm.o
obj-$(CONFIG_MEMFD_CREATE) += memfd.o obj-$(CONFIG_MEMFD_CREATE) += memfd.o
...@@ -842,13 +842,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, ...@@ -842,13 +842,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/* /*
* Periodically drop the lock (if held) regardless of its * Periodically drop the lock (if held) regardless of its
* contention, to give chance to IRQs. Abort async compaction * contention, to give chance to IRQs. Abort completely if
* if contended. * a fatal signal is pending.
*/ */
if (!(low_pfn % SWAP_CLUSTER_MAX) if (!(low_pfn % SWAP_CLUSTER_MAX)
&& compact_unlock_should_abort(&pgdat->lru_lock, && compact_unlock_should_abort(&pgdat->lru_lock,
flags, &locked, cc)) flags, &locked, cc)) {
break; low_pfn = 0;
goto fatal_pending;
}
if (!pfn_valid_within(low_pfn)) if (!pfn_valid_within(low_pfn))
goto isolate_fail; goto isolate_fail;
...@@ -1060,6 +1062,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, ...@@ -1060,6 +1062,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
nr_scanned, nr_isolated); nr_scanned, nr_isolated);
fatal_pending:
cc->total_migrate_scanned += nr_scanned; cc->total_migrate_scanned += nr_scanned;
if (nr_isolated) if (nr_isolated)
count_compact_events(COMPACTISOLATED, nr_isolated); count_compact_events(COMPACTISOLATED, nr_isolated);
......
...@@ -114,7 +114,7 @@ ...@@ -114,7 +114,7 @@
/* GFP bitmask for kmemleak internal allocations */ /* GFP bitmask for kmemleak internal allocations */
#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
__GFP_NORETRY | __GFP_NOMEMALLOC | \ __GFP_NORETRY | __GFP_NOMEMALLOC | \
__GFP_NOWARN | __GFP_NOFAIL) __GFP_NOWARN)
/* scanning area inside a memory block */ /* scanning area inside a memory block */
struct kmemleak_scan_area { struct kmemleak_scan_area {
......
...@@ -132,7 +132,6 @@ static void release_memory_resource(struct resource *res) ...@@ -132,7 +132,6 @@ static void release_memory_resource(struct resource *res)
return; return;
release_resource(res); release_resource(res);
kfree(res); kfree(res);
return;
} }
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
...@@ -979,7 +978,6 @@ static void rollback_node_hotadd(int nid) ...@@ -979,7 +978,6 @@ static void rollback_node_hotadd(int nid)
arch_refresh_nodedata(nid, NULL); arch_refresh_nodedata(nid, NULL);
free_percpu(pgdat->per_cpu_nodestats); free_percpu(pgdat->per_cpu_nodestats);
arch_free_nodedata(pgdat); arch_free_nodedata(pgdat);
return;
} }
......
...@@ -767,12 +767,12 @@ static int __buffer_migrate_page(struct address_space *mapping, ...@@ -767,12 +767,12 @@ static int __buffer_migrate_page(struct address_space *mapping,
} }
bh = bh->b_this_page; bh = bh->b_this_page;
} while (bh != head); } while (bh != head);
spin_unlock(&mapping->private_lock);
if (busy) { if (busy) {
if (invalidated) { if (invalidated) {
rc = -EAGAIN; rc = -EAGAIN;
goto unlock_buffers; goto unlock_buffers;
} }
spin_unlock(&mapping->private_lock);
invalidate_bh_lrus(); invalidate_bh_lrus();
invalidated = true; invalidated = true;
goto recheck_buffers; goto recheck_buffers;
...@@ -805,6 +805,8 @@ static int __buffer_migrate_page(struct address_space *mapping, ...@@ -805,6 +805,8 @@ static int __buffer_migrate_page(struct address_space *mapping,
rc = MIGRATEPAGE_SUCCESS; rc = MIGRATEPAGE_SUCCESS;
unlock_buffers: unlock_buffers:
if (check_refs)
spin_unlock(&mapping->private_lock);
bh = head; bh = head;
do { do {
unlock_buffer(bh); unlock_buffer(bh);
...@@ -2338,16 +2340,13 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, ...@@ -2338,16 +2340,13 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
static void migrate_vma_collect(struct migrate_vma *migrate) static void migrate_vma_collect(struct migrate_vma *migrate)
{ {
struct mmu_notifier_range range; struct mmu_notifier_range range;
struct mm_walk mm_walk; struct mm_walk mm_walk = {
.pmd_entry = migrate_vma_collect_pmd,
mm_walk.pmd_entry = migrate_vma_collect_pmd; .pte_hole = migrate_vma_collect_hole,
mm_walk.pte_entry = NULL; .vma = migrate->vma,
mm_walk.pte_hole = migrate_vma_collect_hole; .mm = migrate->vma->vm_mm,
mm_walk.hugetlb_entry = NULL; .private = migrate,
mm_walk.test_walk = NULL; };
mm_walk.vma = migrate->vma;
mm_walk.mm = migrate->vma->vm_mm;
mm_walk.private = migrate;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm, mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm,
migrate->start, migrate->start,
......
...@@ -699,7 +699,14 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, ...@@ -699,7 +699,14 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
unsigned long ret, freed = 0; unsigned long ret, freed = 0;
struct shrinker *shrinker; struct shrinker *shrinker;
if (!mem_cgroup_is_root(memcg)) /*
* The root memcg might be allocated even though memcg is disabled
* via "cgroup_disable=memory" boot parameter. This could make
* mem_cgroup_is_root() return false, then just run memcg slab
* shrink, but skip global shrink. This may result in premature
* oom.
*/
if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
return shrink_slab_memcg(gfp_mask, nid, memcg, priority); return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
if (!down_read_trylock(&shrinker_rwsem)) if (!down_read_trylock(&shrinker_rwsem))
......
...@@ -191,8 +191,7 @@ int cg_find_unified_root(char *root, size_t len) ...@@ -191,8 +191,7 @@ int cg_find_unified_root(char *root, size_t len)
strtok(NULL, delim); strtok(NULL, delim);
strtok(NULL, delim); strtok(NULL, delim);
if (strcmp(fs, "cgroup") == 0 && if (strcmp(type, "cgroup2") == 0) {
strcmp(type, "cgroup2") == 0) {
strncpy(root, mount, len); strncpy(root, mount, len);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment