Commit 7f5e47f7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2024-01-12-16-52' of...

Merge tag 'mm-hotfixes-stable-2024-01-12-16-52' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc hotfixes from Andrew Morton:
 "For once not mostly MM-related.

  17 hotfixes. 10 address post-6.7 issues and the other 7 are cc:stable"

* tag 'mm-hotfixes-stable-2024-01-12-16-52' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  userfaultfd: avoid huge_zero_page in UFFDIO_MOVE
  MAINTAINERS: add entry for shrinker
  selftests: mm: hugepage-vmemmap fails on 64K page size systems
  mm/memory_hotplug: fix memmap_on_memory sysfs value retrieval
  mailmap: switch email for Tanzir Hasan
  mailmap: add old address mappings for Randy
  kernel/crash_core.c: make __crash_hotplug_lock static
  efi: disable mirror feature during crashkernel
  kexec: do syscore_shutdown() in kernel_kexec
  mailmap: update entry for Manivannan Sadhasivam
  fs/proc/task_mmu: move mmu notification mechanism inside mm lock
  mm: zswap: switch maintainers to recently active developers and reviewers
  scripts/decode_stacktrace.sh: optionally use LLVM utilities
  kasan: avoid resetting aux_lock
  lib/Kconfig.debug: disable CONFIG_DEBUG_INFO_BTF for Hexagon
  MAINTAINERS: update LTP maintainers
  kdump: defer the insertion of crashkernel resources
parents 052d5343 5d4747a6
......@@ -363,7 +363,6 @@ Maheshwar Ajja <quic_majja@quicinc.com> <majja@codeaurora.org>
Malathi Gottam <quic_mgottam@quicinc.com> <mgottam@codeaurora.org>
Manikanta Pubbisetty <quic_mpubbise@quicinc.com> <mpubbise@codeaurora.org>
Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com>
Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org>
Manoj Basapathi <quic_manojbm@quicinc.com> <manojbm@codeaurora.org>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
......@@ -504,6 +503,9 @@ Ralf Baechle <ralf@linux-mips.org>
Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
Ram Chandra Jangir <quic_rjangir@quicinc.com> <rjangir@codeaurora.org>
Randy Dunlap <rdunlap@infradead.org> <rdunlap@xenotime.net>
Randy Dunlap <rdunlap@infradead.org> <randy.dunlap@oracle.com>
Randy Dunlap <rdunlap@infradead.org> <rddunlap@osdl.org>
Randy Dunlap <rdunlap@infradead.org> <randy.dunlap@intel.com>
Ravi Kumar Bokka <quic_rbokka@quicinc.com> <rbokka@codeaurora.org>
Ravi Kumar Siddojigari <quic_rsiddoji@quicinc.com> <rsiddoji@codeaurora.org>
Rémi Denis-Courmont <rdenis@simphalempin.com>
......@@ -582,6 +584,7 @@ Surabhi Vishnoi <quic_svishnoi@quicinc.com> <svishnoi@codeaurora.org>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
Tamizh Chelvam Raja <quic_tamizhr@quicinc.com> <tamizhr@codeaurora.org>
Taniya Das <quic_tdas@quicinc.com> <tdas@codeaurora.org>
Tanzir Hasan <tanzhasanwork@gmail.com> <tanzirh@google.com>
Tejun Heo <htejun@gmail.com>
Tomeu Vizoso <tomeu@tomeuvizoso.net> <tomeu.vizoso@collabora.com>
Thomas Graf <tgraf@suug.ch>
......
......@@ -12724,12 +12724,11 @@ F: Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt
F: drivers/i2c/muxes/i2c-mux-ltc4306.c
LTP (Linux Test Project)
M: Mike Frysinger <vapier@gentoo.org>
M: Cyril Hrubis <chrubis@suse.cz>
M: Wanlong Gao <wanlong.gao@gmail.com>
M: Jan Stancek <jstancek@redhat.com>
M: Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com>
M: Alexey Kodanev <alexey.kodanev@oracle.com>
M: Petr Vorel <pvorel@suse.cz>
M: Li Wang <liwang@redhat.com>
M: Yang Xu <xuyang2018.jy@fujitsu.com>
L: ltp@lists.linux.it (subscribers-only)
S: Maintained
W: http://linux-test-project.github.io/
......@@ -19738,6 +19737,19 @@ T: git git://linuxtv.org/media_tree.git
F: drivers/media/i2c/rj54n1cb0c.c
F: include/media/i2c/rj54n1cb0c.h
SHRINKER
M: Andrew Morton <akpm@linux-foundation.org>
M: Dave Chinner <david@fromorbit.com>
R: Qi Zheng <zhengqi.arch@bytedance.com>
R: Roman Gushchin <roman.gushchin@linux.dev>
R: Muchun Song <muchun.song@linux.dev>
L: linux-mm@kvack.org
S: Maintained
F: Documentation/admin-guide/mm/shrinker_debugfs.rst
F: include/linux/shrinker.h
F: mm/shrinker.c
F: mm/shrinker_debug.c
SH_VOU V4L2 OUTPUT DRIVER
L: linux-media@vger.kernel.org
S: Orphan
......@@ -24263,11 +24275,13 @@ N: zstd
K: zstd
ZSWAP COMPRESSED SWAP CACHING
M: Seth Jennings <sjenning@redhat.com>
M: Dan Streetman <ddstreet@ieee.org>
M: Vitaly Wool <vitaly.wool@konsulko.com>
M: Johannes Weiner <hannes@cmpxchg.org>
M: Yosry Ahmed <yosryahmed@google.com>
M: Nhat Pham <nphamcs@gmail.com>
L: linux-mm@kvack.org
S: Maintained
F: Documentation/admin-guide/mm/zswap.rst
F: include/linux/zswap.h
F: mm/zswap.c
THE REST
......
......@@ -2432,7 +2432,6 @@ static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p)
static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
{
struct mmu_notifier_range range;
struct pagemap_scan_private p = {0};
unsigned long walk_start;
size_t n_ranges_out = 0;
......@@ -2448,15 +2447,9 @@ static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
if (ret)
return ret;
/* Protection change for the range is going to happen. */
if (p.arg.flags & PM_SCAN_WP_MATCHING) {
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
mm, p.arg.start, p.arg.end);
mmu_notifier_invalidate_range_start(&range);
}
for (walk_start = p.arg.start; walk_start < p.arg.end;
walk_start = p.arg.walk_end) {
struct mmu_notifier_range range;
long n_out;
if (fatal_signal_pending(current)) {
......@@ -2467,8 +2460,20 @@ static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
ret = mmap_read_lock_killable(mm);
if (ret)
break;
/* Protection change for the range is going to happen. */
if (p.arg.flags & PM_SCAN_WP_MATCHING) {
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
mm, walk_start, p.arg.end);
mmu_notifier_invalidate_range_start(&range);
}
ret = walk_page_range(mm, walk_start, p.arg.end,
&pagemap_scan_ops, &p);
if (p.arg.flags & PM_SCAN_WP_MATCHING)
mmu_notifier_invalidate_range_end(&range);
mmap_read_unlock(mm);
n_out = pagemap_scan_flush_buffer(&p);
......@@ -2494,9 +2499,6 @@ static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
if (pagemap_scan_writeback_args(&p.arg, uarg))
ret = -EFAULT;
if (p.arg.flags & PM_SCAN_WP_MATCHING)
mmu_notifier_invalidate_range_end(&range);
kfree(p.vec_buf);
return ret;
}
......
......@@ -376,7 +376,6 @@ static int __init reserve_crashkernel_low(unsigned long long low_size)
crashk_low_res.start = low_base;
crashk_low_res.end = low_base + low_size - 1;
insert_resource(&iomem_resource, &crashk_low_res);
#endif
return 0;
}
......@@ -458,8 +457,19 @@ void __init reserve_crashkernel_generic(char *cmdline,
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
insert_resource(&iomem_resource, &crashk_res);
}
static __init int insert_crashkernel_resources(void)
{
if (crashk_res.start < crashk_res.end)
insert_resource(&iomem_resource, &crashk_res);
if (crashk_low_res.start < crashk_low_res.end)
insert_resource(&iomem_resource, &crashk_low_res);
return 0;
}
early_initcall(insert_crashkernel_resources);
#endif
int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
......@@ -867,7 +877,7 @@ subsys_initcall(crash_notes_memory_init);
* regions are online. So mutex lock __crash_hotplug_lock is used to
* serialize the crash hotplug handling specifically.
*/
DEFINE_MUTEX(__crash_hotplug_lock);
static DEFINE_MUTEX(__crash_hotplug_lock);
#define crash_hotplug_lock() mutex_lock(&__crash_hotplug_lock)
#define crash_hotplug_unlock() mutex_unlock(&__crash_hotplug_lock)
......
......@@ -1257,6 +1257,7 @@ int kernel_kexec(void)
kexec_in_progress = true;
kernel_restart_prepare("kexec reboot");
migrate_to_reboot_cpu();
syscore_shutdown();
/*
* migrate_to_reboot_cpu() disables CPU hotplug assuming that
......
......@@ -378,6 +378,8 @@ config DEBUG_INFO_BTF
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
depends on BPF_SYSCALL
depends on !DEBUG_INFO_DWARF5 || PAHOLE_VERSION >= 121
# pahole uses elfutils, which does not have support for Hexagon relocations
depends on !HEXAGON
help
Generate deduplicated BTF type information from DWARF debug info.
Turning this on expects presence of pahole tool, which will convert
......
......@@ -487,6 +487,7 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
__memset(alloc_meta, 0, sizeof(*alloc_meta));
/*
* Prepare the lock for saving auxiliary stack traces.
* Temporarily disable KASAN bug reporting to allow instrumented
* raw_spin_lock_init to access aux_lock, which resides inside
* of a redzone.
......@@ -510,8 +511,13 @@ static void release_alloc_meta(struct kasan_alloc_meta *meta)
stack_depot_put(meta->aux_stack[0]);
stack_depot_put(meta->aux_stack[1]);
/* Zero out alloc meta to mark it as invalid. */
__memset(meta, 0, sizeof(*meta));
/*
* Zero out alloc meta to mark it as invalid but keep aux_lock
* initialized to avoid having to reinitialize it when another object
* is allocated in the same slot.
*/
__memset(&meta->alloc_track, 0, sizeof(meta->alloc_track));
__memset(meta->aux_stack, 0, sizeof(meta->aux_stack));
}
static void release_free_meta(const void *object, struct kasan_free_meta *meta)
......
......@@ -101,9 +101,11 @@ static int set_memmap_mode(const char *val, const struct kernel_param *kp)
static int get_memmap_mode(char *buffer, const struct kernel_param *kp)
{
if (*((int *)kp->arg) == MEMMAP_ON_MEMORY_FORCE)
return sprintf(buffer, "force\n");
return param_get_bool(buffer, kp);
int mode = *((int *)kp->arg);
if (mode == MEMMAP_ON_MEMORY_FORCE)
return sprintf(buffer, "force\n");
return sprintf(buffer, "%c\n", mode ? 'Y' : 'N');
}
static const struct kernel_param_ops memmap_mode_ops = {
......
......@@ -26,6 +26,7 @@
#include <linux/pgtable.h>
#include <linux/swap.h>
#include <linux/cma.h>
#include <linux/crash_dump.h>
#include "internal.h"
#include "slab.h"
#include "shuffle.h"
......@@ -381,6 +382,11 @@ static void __init find_zone_movable_pfns_for_nodes(void)
goto out;
}
if (is_kdump_kernel()) {
pr_warn("The system is under kdump, ignore kernelcore=mirror.\n");
goto out;
}
for_each_mem_region(r) {
if (memblock_is_mirror(r))
continue;
......
......@@ -1393,6 +1393,12 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
err = -ENOENT;
break;
}
/* Avoid moving zeropages for now */
if (is_huge_zero_pmd(*src_pmd)) {
spin_unlock(ptl);
err = -EBUSY;
break;
}
/* Check if we can move the pmd without splitting it. */
if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
......
......@@ -16,6 +16,21 @@ elif type c++filt >/dev/null 2>&1 ; then
cppfilt_opts=-i
fi
UTIL_SUFFIX=
if [[ -z ${LLVM:-} ]]; then
UTIL_PREFIX=${CROSS_COMPILE:-}
else
UTIL_PREFIX=llvm-
if [[ ${LLVM} == */ ]]; then
UTIL_PREFIX=${LLVM}${UTIL_PREFIX}
elif [[ ${LLVM} == -* ]]; then
UTIL_SUFFIX=${LLVM}
fi
fi
READELF=${UTIL_PREFIX}readelf${UTIL_SUFFIX}
ADDR2LINE=${UTIL_PREFIX}addr2line${UTIL_SUFFIX}
if [[ $1 == "-r" ]] ; then
vmlinux=""
basepath="auto"
......@@ -75,7 +90,7 @@ find_module() {
if [[ "$modpath" != "" ]] ; then
for fn in $(find "$modpath" -name "${module//_/[-_]}.ko*") ; do
if readelf -WS "$fn" | grep -qwF .debug_line ; then
if ${READELF} -WS "$fn" | grep -qwF .debug_line ; then
echo $fn
return
fi
......@@ -169,7 +184,7 @@ parse_symbol() {
if [[ $aarray_support == true && "${cache[$module,$address]+isset}" == "isset" ]]; then
local code=${cache[$module,$address]}
else
local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address" 2>/dev/null)
local code=$(${ADDR2LINE} -i -e "$objfile" "$address" 2>/dev/null)
if [[ $aarray_support == true ]]; then
cache[$module,$address]=$code
fi
......
......@@ -10,10 +10,7 @@
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
#define MAP_LENGTH (2UL * 1024 * 1024)
#define PAGE_SIZE 4096
#include "vm_util.h"
#define PAGE_COMPOUND_HEAD (1UL << 15)
#define PAGE_COMPOUND_TAIL (1UL << 16)
......@@ -39,6 +36,9 @@
#define MAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB)
#endif
static size_t pagesize;
static size_t maplength;
static void write_bytes(char *addr, size_t length)
{
unsigned long i;
......@@ -56,7 +56,7 @@ static unsigned long virt_to_pfn(void *addr)
if (fd < 0)
return -1UL;
lseek(fd, (unsigned long)addr / PAGE_SIZE * sizeof(pagemap), SEEK_SET);
lseek(fd, (unsigned long)addr / pagesize * sizeof(pagemap), SEEK_SET);
read(fd, &pagemap, sizeof(pagemap));
close(fd);
......@@ -86,7 +86,7 @@ static int check_page_flags(unsigned long pfn)
* this also verifies kernel has correctly set the fake page_head to tail
* while hugetlb_free_vmemmap is enabled.
*/
for (i = 1; i < MAP_LENGTH / PAGE_SIZE; i++) {
for (i = 1; i < maplength / pagesize; i++) {
read(fd, &pageflags, sizeof(pageflags));
if ((pageflags & TAIL_PAGE_FLAGS) != TAIL_PAGE_FLAGS ||
(pageflags & HEAD_PAGE_FLAGS) == HEAD_PAGE_FLAGS) {
......@@ -106,18 +106,25 @@ int main(int argc, char **argv)
void *addr;
unsigned long pfn;
addr = mmap(MAP_ADDR, MAP_LENGTH, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0);
pagesize = psize();
maplength = default_huge_page_size();
if (!maplength) {
printf("Unable to determine huge page size\n");
exit(1);
}
addr = mmap(MAP_ADDR, maplength, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
exit(1);
}
/* Trigger allocation of HugeTLB page. */
write_bytes(addr, MAP_LENGTH);
write_bytes(addr, maplength);
pfn = virt_to_pfn(addr);
if (pfn == -1UL) {
munmap(addr, MAP_LENGTH);
munmap(addr, maplength);
perror("virt_to_pfn");
exit(1);
}
......@@ -125,13 +132,13 @@ int main(int argc, char **argv)
printf("Returned address is %p whose pfn is %lx\n", addr, pfn);
if (check_page_flags(pfn) < 0) {
munmap(addr, MAP_LENGTH);
munmap(addr, maplength);
perror("check_page_flags");
exit(1);
}
/* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
if (munmap(addr, MAP_LENGTH)) {
if (munmap(addr, maplength)) {
perror("munmap");
exit(1);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment