Commit 7b0acd91 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2024-07-26-14-33' of...

Merge tag 'mm-hotfixes-stable-2024-07-26-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc hotfixes from Andrew Morton:
 "11 hotfixes, 7 of which are cc:stable.  7 are MM, 4 are other"

* tag 'mm-hotfixes-stable-2024-07-26-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  nilfs2: handle inconsistent state in nilfs_btnode_create_block()
  selftests/mm: skip test for non-LPA2 and non-LVA systems
  mm/page_alloc: fix pcp->count race between drain_pages_zone() vs __rmqueue_pcplist()
  mm: memcg: add cacheline padding after lruvec in mem_cgroup_per_node
  alloc_tag: outline and export free_reserved_page()
  decompress_bunzip2: fix rare decompression failure
  mm/huge_memory: avoid PMD-size page cache if needed
  mm: huge_memory: use !CONFIG_64BIT to relax huge page alignment on 32 bit machines
  mm: fix old/young bit handling in the faulting path
  dt-bindings: arm: update James Clark's email address
  MAINTAINERS: mailmap: update James Clark's email address
parents 5256184b 4811f7af
...@@ -260,6 +260,7 @@ Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com> ...@@ -260,6 +260,7 @@ Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com>
Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com> Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com>
James Bottomley <jejb@mulgrave.(none)> James Bottomley <jejb@mulgrave.(none)>
James Bottomley <jejb@titanic.il.steeleye.com> James Bottomley <jejb@titanic.il.steeleye.com>
James Clark <james.clark@linaro.org> <james.clark@arm.com>
James E Wilson <wilson@specifix.com> James E Wilson <wilson@specifix.com>
James Hogan <jhogan@kernel.org> <james@albanarts.com> James Hogan <jhogan@kernel.org> <james@albanarts.com>
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com> James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
......
...@@ -30,7 +30,7 @@ description: | ...@@ -30,7 +30,7 @@ description: |
maintainers: maintainers:
- Mike Leach <mike.leach@linaro.org> - Mike Leach <mike.leach@linaro.org>
- Suzuki K Poulose <suzuki.poulose@arm.com> - Suzuki K Poulose <suzuki.poulose@arm.com>
- James Clark <james.clark@arm.com> - James Clark <james.clark@linaro.org>
- Mao Jinlong <quic_jinlmao@quicinc.com> - Mao Jinlong <quic_jinlmao@quicinc.com>
- Hao Zhang <quic_hazha@quicinc.com> - Hao Zhang <quic_hazha@quicinc.com>
......
...@@ -29,7 +29,7 @@ description: | ...@@ -29,7 +29,7 @@ description: |
maintainers: maintainers:
- Mike Leach <mike.leach@linaro.org> - Mike Leach <mike.leach@linaro.org>
- Suzuki K Poulose <suzuki.poulose@arm.com> - Suzuki K Poulose <suzuki.poulose@arm.com>
- James Clark <james.clark@arm.com> - James Clark <james.clark@linaro.org>
- Mao Jinlong <quic_jinlmao@quicinc.com> - Mao Jinlong <quic_jinlmao@quicinc.com>
- Hao Zhang <quic_hazha@quicinc.com> - Hao Zhang <quic_hazha@quicinc.com>
......
...@@ -2196,7 +2196,7 @@ N: digicolor ...@@ -2196,7 +2196,7 @@ N: digicolor
ARM/CORESIGHT FRAMEWORK AND DRIVERS ARM/CORESIGHT FRAMEWORK AND DRIVERS
M: Suzuki K Poulose <suzuki.poulose@arm.com> M: Suzuki K Poulose <suzuki.poulose@arm.com>
R: Mike Leach <mike.leach@linaro.org> R: Mike Leach <mike.leach@linaro.org>
R: James Clark <james.clark@arm.com> R: James Clark <james.clark@linaro.org>
L: coresight@lists.linaro.org (moderated for non-subscribers) L: coresight@lists.linaro.org (moderated for non-subscribers)
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
...@@ -17894,7 +17894,7 @@ F: tools/perf/ ...@@ -17894,7 +17894,7 @@ F: tools/perf/
PERFORMANCE EVENTS TOOLING ARM64 PERFORMANCE EVENTS TOOLING ARM64
R: John Garry <john.g.garry@oracle.com> R: John Garry <john.g.garry@oracle.com>
R: Will Deacon <will@kernel.org> R: Will Deacon <will@kernel.org>
R: James Clark <james.clark@arm.com> R: James Clark <james.clark@linaro.org>
R: Mike Leach <mike.leach@linaro.org> R: Mike Leach <mike.leach@linaro.org>
R: Leo Yan <leo.yan@linux.dev> R: Leo Yan <leo.yan@linux.dev>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
......
...@@ -51,12 +51,21 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) ...@@ -51,12 +51,21 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node)); bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
if (unlikely(!bh)) if (unlikely(!bh))
return NULL; return ERR_PTR(-ENOMEM);
if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) || if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
buffer_dirty(bh))) { buffer_dirty(bh))) {
brelse(bh); /*
BUG(); * The block buffer at the specified new address was already
* in use. This can happen if it is a virtual block number
* and has been reallocated due to corruption of the bitmap
* used to manage its allocation state (if not, the buffer
* clearing of an abandoned b-tree node is missing somewhere).
*/
nilfs_error(inode->i_sb,
"state inconsistency probably due to duplicate use of b-tree node block address %llu (ino=%lu)",
(unsigned long long)blocknr, inode->i_ino);
goto failed;
} }
memset(bh->b_data, 0, i_blocksize(inode)); memset(bh->b_data, 0, i_blocksize(inode));
bh->b_bdev = inode->i_sb->s_bdev; bh->b_bdev = inode->i_sb->s_bdev;
...@@ -67,6 +76,12 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) ...@@ -67,6 +76,12 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
folio_unlock(bh->b_folio); folio_unlock(bh->b_folio);
folio_put(bh->b_folio); folio_put(bh->b_folio);
return bh; return bh;
failed:
folio_unlock(bh->b_folio);
folio_put(bh->b_folio);
brelse(bh);
return ERR_PTR(-EIO);
} }
int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
...@@ -217,8 +232,8 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc, ...@@ -217,8 +232,8 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
} }
nbh = nilfs_btnode_create_block(btnc, newkey); nbh = nilfs_btnode_create_block(btnc, newkey);
if (!nbh) if (IS_ERR(nbh))
return -ENOMEM; return PTR_ERR(nbh);
BUG_ON(nbh == obh); BUG_ON(nbh == obh);
ctxt->newbh = nbh; ctxt->newbh = nbh;
......
...@@ -63,8 +63,8 @@ static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree, ...@@ -63,8 +63,8 @@ static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
struct buffer_head *bh; struct buffer_head *bh;
bh = nilfs_btnode_create_block(btnc, ptr); bh = nilfs_btnode_create_block(btnc, ptr);
if (!bh) if (IS_ERR(bh))
return -ENOMEM; return PTR_ERR(bh);
set_buffer_nilfs_volatile(bh); set_buffer_nilfs_volatile(bh);
*bhp = bh; *bhp = bh;
......
...@@ -74,14 +74,20 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr; ...@@ -74,14 +74,20 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1))) #define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
/* /*
* Mask of all large folio orders supported for file THP. * Mask of all large folio orders supported for file THP. Folios in a DAX
* file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
* it.
*/ */
#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER)) #define THP_ORDERS_ALL_FILE_DAX \
(BIT(PMD_ORDER) | BIT(PUD_ORDER))
#define THP_ORDERS_ALL_FILE_DEFAULT \
((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
/* /*
* Mask of all large folio orders supported for THP. * Mask of all large folio orders supported for THP.
*/ */
#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE) #define THP_ORDERS_ALL \
(THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DAX | THP_ORDERS_ALL_FILE_DEFAULT)
#define TVA_SMAPS (1 << 0) /* Will be used for procfs */ #define TVA_SMAPS (1 << 0) /* Will be used for procfs */
#define TVA_IN_PF (1 << 1) /* Page fault handler */ #define TVA_IN_PF (1 << 1) /* Page fault handler */
......
...@@ -109,6 +109,7 @@ struct mem_cgroup_per_node { ...@@ -109,6 +109,7 @@ struct mem_cgroup_per_node {
/* Fields which get updated often at the end. */ /* Fields which get updated often at the end. */
struct lruvec lruvec; struct lruvec lruvec;
CACHELINE_PADDING(_pad2_);
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter iter; struct mem_cgroup_reclaim_iter iter;
}; };
......
...@@ -3137,21 +3137,7 @@ extern void reserve_bootmem_region(phys_addr_t start, ...@@ -3137,21 +3137,7 @@ extern void reserve_bootmem_region(phys_addr_t start,
phys_addr_t end, int nid); phys_addr_t end, int nid);
/* Free the reserved page into the buddy system, so it gets managed. */ /* Free the reserved page into the buddy system, so it gets managed. */
static inline void free_reserved_page(struct page *page) void free_reserved_page(struct page *page);
{
if (mem_alloc_profiling_enabled()) {
union codetag_ref *ref = get_page_tag_ref(page);
if (ref) {
set_codetag_empty(ref);
put_page_tag_ref(ref);
}
}
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
adjust_managed_page_count(page, 1);
}
#define free_highmem_page(page) free_reserved_page(page) #define free_highmem_page(page) free_reserved_page(page)
static inline void mark_page_reserved(struct page *page) static inline void mark_page_reserved(struct page *page)
......
...@@ -232,7 +232,8 @@ static int INIT get_next_block(struct bunzip_data *bd) ...@@ -232,7 +232,8 @@ static int INIT get_next_block(struct bunzip_data *bd)
RUNB) */ RUNB) */
symCount = symTotal+2; symCount = symTotal+2;
for (j = 0; j < groupCount; j++) { for (j = 0; j < groupCount; j++) {
unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1]; unsigned char length[MAX_SYMBOLS];
unsigned short temp[MAX_HUFCODE_BITS+1];
int minLen, maxLen, pp; int minLen, maxLen, pp;
/* Read Huffman code lengths for each symbol. They're /* Read Huffman code lengths for each symbol. They're
stored in a way similar to mtf; record a starting stored in a way similar to mtf; record a starting
......
...@@ -89,9 +89,17 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, ...@@ -89,9 +89,17 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
bool smaps = tva_flags & TVA_SMAPS; bool smaps = tva_flags & TVA_SMAPS;
bool in_pf = tva_flags & TVA_IN_PF; bool in_pf = tva_flags & TVA_IN_PF;
bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS; bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS;
unsigned long supported_orders;
/* Check the intersection of requested and supported orders. */ /* Check the intersection of requested and supported orders. */
orders &= vma_is_anonymous(vma) ? if (vma_is_anonymous(vma))
THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE; supported_orders = THP_ORDERS_ALL_ANON;
else if (vma_is_dax(vma))
supported_orders = THP_ORDERS_ALL_FILE_DAX;
else
supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
orders &= supported_orders;
if (!orders) if (!orders)
return 0; return 0;
...@@ -877,7 +885,7 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, ...@@ -877,7 +885,7 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
loff_t off_align = round_up(off, size); loff_t off_align = round_up(off, size);
unsigned long len_pad, ret, off_sub; unsigned long len_pad, ret, off_sub;
if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall()) if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())
return 0; return 0;
if (off_end <= off_align || (off_end - off_align) < size) if (off_end <= off_align || (off_end - off_align) < size)
......
...@@ -4780,7 +4780,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, ...@@ -4780,7 +4780,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
bool write = vmf->flags & FAULT_FLAG_WRITE; bool write = vmf->flags & FAULT_FLAG_WRITE;
bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE); bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
pte_t entry; pte_t entry;
flush_icache_pages(vma, page, nr); flush_icache_pages(vma, page, nr);
......
...@@ -2343,16 +2343,20 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) ...@@ -2343,16 +2343,20 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
static void drain_pages_zone(unsigned int cpu, struct zone *zone) static void drain_pages_zone(unsigned int cpu, struct zone *zone)
{ {
struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
int count = READ_ONCE(pcp->count); int count;
while (count) {
int to_drain = min(count, pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
count -= to_drain;
do {
spin_lock(&pcp->lock); spin_lock(&pcp->lock);
count = pcp->count;
if (count) {
int to_drain = min(count,
pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
free_pcppages_bulk(zone, to_drain, pcp, 0); free_pcppages_bulk(zone, to_drain, pcp, 0);
spin_unlock(&pcp->lock); count -= to_drain;
} }
spin_unlock(&pcp->lock);
} while (count);
} }
/* /*
...@@ -5815,6 +5819,23 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char ...@@ -5815,6 +5819,23 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char
return pages; return pages;
} }
void free_reserved_page(struct page *page)
{
if (mem_alloc_profiling_enabled()) {
union codetag_ref *ref = get_page_tag_ref(page);
if (ref) {
set_codetag_empty(ref);
put_page_tag_ref(ref);
}
}
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
adjust_managed_page_count(page, 1);
}
EXPORT_SYMBOL(free_reserved_page);
static int page_alloc_cpu_dead(unsigned int cpu) static int page_alloc_cpu_dead(unsigned int cpu)
{ {
struct zone *zone; struct zone *zone;
......
...@@ -293,6 +293,20 @@ static int run_test(struct testcase *test, int count) ...@@ -293,6 +293,20 @@ static int run_test(struct testcase *test, int count)
return ret; return ret;
} }
#ifdef __aarch64__
/* Check if userspace VA > 48 bits */
static int high_address_present(void)
{
void *ptr = mmap((void *)(1UL << 50), 1, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (ptr == MAP_FAILED)
return 0;
munmap(ptr, 1);
return 1;
}
#endif
static int supported_arch(void) static int supported_arch(void)
{ {
#if defined(__powerpc64__) #if defined(__powerpc64__)
...@@ -300,7 +314,7 @@ static int supported_arch(void) ...@@ -300,7 +314,7 @@ static int supported_arch(void)
#elif defined(__x86_64__) #elif defined(__x86_64__)
return 1; return 1;
#elif defined(__aarch64__) #elif defined(__aarch64__)
return 1; return high_address_present();
#else #else
return 0; return 0;
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment