Commit 4b16b656 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "14 patches.

  Subsystems affected by this patch series: mm (kasan, mremap, tmpfs,
  selftests, memcg, and slub), MAINTAINERS, squashfs, nilfs2, and
  firmware"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  nilfs2: make splice write available again
  mm, slub: better heuristic for number of cpus when calculating slab order
  Revert "mm: memcontrol: avoid workload stalls when lowering memory.high"
  MAINTAINERS: update Andrey Ryabinin's email address
  selftests/vm: rename file run_vmtests to run_vmtests.sh
  tmpfs: disallow CONFIG_TMPFS_INODE64 on alpha
  tmpfs: disallow CONFIG_TMPFS_INODE64 on s390
  mm/mremap: fix BUILD_BUG_ON() error in get_extent
  firmware_loader: align .builtin_fw to 8
  kasan: fix stack traces dependency for HW_TAGS
  squashfs: add more sanity checks in xattr id lookup
  squashfs: add more sanity checks in inode lookup
  squashfs: add more sanity checks in id lookup
  squashfs: avoid out of bounds writes in decompressors
parents ef7d0b59 a35d8f01
...@@ -37,6 +37,7 @@ Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk> ...@@ -37,6 +37,7 @@ Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com> Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
Andrew Vasquez <andrew.vasquez@qlogic.com> Andrew Vasquez <andrew.vasquez@qlogic.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com> Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
Andy Adamson <andros@citi.umich.edu> Andy Adamson <andros@citi.umich.edu>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com> Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@free-electrons.com> Antoine Tenart <atenart@kernel.org> <antoine.tenart@free-electrons.com>
......
...@@ -163,8 +163,7 @@ particular KASAN features. ...@@ -163,8 +163,7 @@ particular KASAN features.
- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``). - ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack - ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
traces collection (default: ``on`` for ``CONFIG_DEBUG_KERNEL=y``, otherwise traces collection (default: ``on``).
``off``).
- ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN - ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN
report or also panic the kernel (default: ``report``). report or also panic the kernel (default: ``report``).
......
...@@ -9559,7 +9559,7 @@ F: Documentation/hwmon/k8temp.rst ...@@ -9559,7 +9559,7 @@ F: Documentation/hwmon/k8temp.rst
F: drivers/hwmon/k8temp.c F: drivers/hwmon/k8temp.c
KASAN KASAN
M: Andrey Ryabinin <aryabinin@virtuozzo.com> M: Andrey Ryabinin <ryabinin.a.a@gmail.com>
R: Alexander Potapenko <glider@google.com> R: Alexander Potapenko <glider@google.com>
R: Dmitry Vyukov <dvyukov@google.com> R: Dmitry Vyukov <dvyukov@google.com>
L: kasan-dev@googlegroups.com L: kasan-dev@googlegroups.com
......
...@@ -203,7 +203,7 @@ config TMPFS_XATTR ...@@ -203,7 +203,7 @@ config TMPFS_XATTR
config TMPFS_INODE64 config TMPFS_INODE64
bool "Use 64-bit ino_t by default in tmpfs" bool "Use 64-bit ino_t by default in tmpfs"
depends on TMPFS && 64BIT depends on TMPFS && 64BIT && !(S390 || ALPHA)
default n default n
help help
tmpfs has historically used only inode numbers as wide as an unsigned tmpfs has historically used only inode numbers as wide as an unsigned
......
...@@ -141,6 +141,7 @@ const struct file_operations nilfs_file_operations = { ...@@ -141,6 +141,7 @@ const struct file_operations nilfs_file_operations = {
/* .release = nilfs_release_file, */ /* .release = nilfs_release_file, */
.fsync = nilfs_sync_file, .fsync = nilfs_sync_file,
.splice_read = generic_file_splice_read, .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
}; };
const struct inode_operations nilfs_file_inode_operations = { const struct inode_operations nilfs_file_inode_operations = {
......
...@@ -196,9 +196,15 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, ...@@ -196,9 +196,15 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
length = SQUASHFS_COMPRESSED_SIZE(length); length = SQUASHFS_COMPRESSED_SIZE(length);
index += 2; index += 2;
TRACE("Block @ 0x%llx, %scompressed size %d\n", index, TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2,
compressed ? "" : "un", length); compressed ? "" : "un", length);
} }
if (length < 0 || length > output->length ||
(index + length) > msblk->bytes_used) {
res = -EIO;
goto out;
}
if (next_index) if (next_index)
*next_index = index + length; *next_index = index + length;
......
...@@ -41,12 +41,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num) ...@@ -41,12 +41,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num)
struct squashfs_sb_info *msblk = sb->s_fs_info; struct squashfs_sb_info *msblk = sb->s_fs_info;
int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1); int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1);
int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1); int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1);
u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]); u64 start;
__le64 ino; __le64 ino;
int err; int err;
TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num); TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num);
if (ino_num == 0 || (ino_num - 1) >= msblk->inodes)
return -EINVAL;
start = le64_to_cpu(msblk->inode_lookup_table[blk]);
err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino)); err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino));
if (err < 0) if (err < 0)
return err; return err;
...@@ -111,7 +116,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, ...@@ -111,7 +116,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
u64 lookup_table_start, u64 next_table, unsigned int inodes) u64 lookup_table_start, u64 next_table, unsigned int inodes)
{ {
unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes); unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes);
int n;
__le64 *table; __le64 *table;
u64 start, end;
TRACE("In read_inode_lookup_table, length %d\n", length); TRACE("In read_inode_lookup_table, length %d\n", length);
...@@ -121,20 +129,37 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, ...@@ -121,20 +129,37 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
if (inodes == 0) if (inodes == 0)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* length bytes should not extend into the next table - this check /*
* also traps instances where lookup_table_start is incorrectly larger * The computed size of the lookup table (length bytes) should exactly
* than the next table start * match the table start and end points
*/ */
if (lookup_table_start + length > next_table) if (length != (next_table - lookup_table_start))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
table = squashfs_read_table(sb, lookup_table_start, length); table = squashfs_read_table(sb, lookup_table_start, length);
if (IS_ERR(table))
return table;
/* /*
* table[0] points to the first inode lookup table metadata block, * table0], table[1], ... table[indexes - 1] store the locations
* this should be less than lookup_table_start * of the compressed inode lookup blocks. Each entry should be
* less than the next (i.e. table[0] < table[1]), and the difference
* between them should be SQUASHFS_METADATA_SIZE or less.
* table[indexes - 1] should be less than lookup_table_start, and
* again the difference should be SQUASHFS_METADATA_SIZE or less
*/ */
if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) { for (n = 0; n < (indexes - 1); n++) {
start = le64_to_cpu(table[n]);
end = le64_to_cpu(table[n + 1]);
if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
kfree(table);
return ERR_PTR(-EINVAL);
}
}
start = le64_to_cpu(table[indexes - 1]);
if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
kfree(table); kfree(table);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
......
...@@ -35,10 +35,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index, ...@@ -35,10 +35,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index,
struct squashfs_sb_info *msblk = sb->s_fs_info; struct squashfs_sb_info *msblk = sb->s_fs_info;
int block = SQUASHFS_ID_BLOCK(index); int block = SQUASHFS_ID_BLOCK(index);
int offset = SQUASHFS_ID_BLOCK_OFFSET(index); int offset = SQUASHFS_ID_BLOCK_OFFSET(index);
u64 start_block = le64_to_cpu(msblk->id_table[block]); u64 start_block;
__le32 disk_id; __le32 disk_id;
int err; int err;
if (index >= msblk->ids)
return -EINVAL;
start_block = le64_to_cpu(msblk->id_table[block]);
err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset, err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset,
sizeof(disk_id)); sizeof(disk_id));
if (err < 0) if (err < 0)
...@@ -56,7 +61,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, ...@@ -56,7 +61,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
u64 id_table_start, u64 next_table, unsigned short no_ids) u64 id_table_start, u64 next_table, unsigned short no_ids)
{ {
unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids); unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids);
int n;
__le64 *table; __le64 *table;
u64 start, end;
TRACE("In read_id_index_table, length %d\n", length); TRACE("In read_id_index_table, length %d\n", length);
...@@ -67,20 +75,36 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, ...@@ -67,20 +75,36 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* /*
* length bytes should not extend into the next table - this check * The computed size of the index table (length bytes) should exactly
* also traps instances where id_table_start is incorrectly larger * match the table start and end points
* than the next table start
*/ */
if (id_table_start + length > next_table) if (length != (next_table - id_table_start))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
table = squashfs_read_table(sb, id_table_start, length); table = squashfs_read_table(sb, id_table_start, length);
if (IS_ERR(table))
return table;
/* /*
* table[0] points to the first id lookup table metadata block, this * table[0], table[1], ... table[indexes - 1] store the locations
* should be less than id_table_start * of the compressed id blocks. Each entry should be less than
* the next (i.e. table[0] < table[1]), and the difference between them
* should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
* should be less than id_table_start, and again the difference
* should be SQUASHFS_METADATA_SIZE or less
*/ */
if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) { for (n = 0; n < (indexes - 1); n++) {
start = le64_to_cpu(table[n]);
end = le64_to_cpu(table[n + 1]);
if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
kfree(table);
return ERR_PTR(-EINVAL);
}
}
start = le64_to_cpu(table[indexes - 1]);
if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
kfree(table); kfree(table);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
......
...@@ -64,5 +64,6 @@ struct squashfs_sb_info { ...@@ -64,5 +64,6 @@ struct squashfs_sb_info {
unsigned int inodes; unsigned int inodes;
unsigned int fragments; unsigned int fragments;
int xattr_ids; int xattr_ids;
unsigned int ids;
}; };
#endif #endif
...@@ -166,6 +166,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) ...@@ -166,6 +166,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
msblk->directory_table = le64_to_cpu(sblk->directory_table_start); msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
msblk->inodes = le32_to_cpu(sblk->inodes); msblk->inodes = le32_to_cpu(sblk->inodes);
msblk->fragments = le32_to_cpu(sblk->fragments); msblk->fragments = le32_to_cpu(sblk->fragments);
msblk->ids = le16_to_cpu(sblk->no_ids);
flags = le16_to_cpu(sblk->flags); flags = le16_to_cpu(sblk->flags);
TRACE("Found valid superblock on %pg\n", sb->s_bdev); TRACE("Found valid superblock on %pg\n", sb->s_bdev);
...@@ -177,7 +178,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) ...@@ -177,7 +178,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
TRACE("Block size %d\n", msblk->block_size); TRACE("Block size %d\n", msblk->block_size);
TRACE("Number of inodes %d\n", msblk->inodes); TRACE("Number of inodes %d\n", msblk->inodes);
TRACE("Number of fragments %d\n", msblk->fragments); TRACE("Number of fragments %d\n", msblk->fragments);
TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); TRACE("Number of ids %d\n", msblk->ids);
TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
TRACE("sblk->fragment_table_start %llx\n", TRACE("sblk->fragment_table_start %llx\n",
...@@ -236,8 +237,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) ...@@ -236,8 +237,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
allocate_id_index_table: allocate_id_index_table:
/* Allocate and read id index table */ /* Allocate and read id index table */
msblk->id_table = squashfs_read_id_index_table(sb, msblk->id_table = squashfs_read_id_index_table(sb,
le64_to_cpu(sblk->id_table_start), next_table, le64_to_cpu(sblk->id_table_start), next_table, msblk->ids);
le16_to_cpu(sblk->no_ids));
if (IS_ERR(msblk->id_table)) { if (IS_ERR(msblk->id_table)) {
errorf(fc, "unable to read id index table"); errorf(fc, "unable to read id index table");
err = PTR_ERR(msblk->id_table); err = PTR_ERR(msblk->id_table);
......
...@@ -17,8 +17,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *, ...@@ -17,8 +17,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb, static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
u64 start, u64 *xattr_table_start, int *xattr_ids) u64 start, u64 *xattr_table_start, int *xattr_ids)
{ {
struct squashfs_xattr_id_table *id_table;
id_table = squashfs_read_table(sb, start, sizeof(*id_table));
if (IS_ERR(id_table))
return (__le64 *) id_table;
*xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
kfree(id_table);
ERROR("Xattrs in filesystem, these will be ignored\n"); ERROR("Xattrs in filesystem, these will be ignored\n");
*xattr_table_start = start;
return ERR_PTR(-ENOTSUPP); return ERR_PTR(-ENOTSUPP);
} }
......
...@@ -31,10 +31,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, ...@@ -31,10 +31,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
struct squashfs_sb_info *msblk = sb->s_fs_info; struct squashfs_sb_info *msblk = sb->s_fs_info;
int block = SQUASHFS_XATTR_BLOCK(index); int block = SQUASHFS_XATTR_BLOCK(index);
int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index); int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index);
u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]); u64 start_block;
struct squashfs_xattr_id id; struct squashfs_xattr_id id;
int err; int err;
if (index >= msblk->xattr_ids)
return -EINVAL;
start_block = le64_to_cpu(msblk->xattr_id_table[block]);
err = squashfs_read_metadata(sb, &id, &start_block, &offset, err = squashfs_read_metadata(sb, &id, &start_block, &offset,
sizeof(id)); sizeof(id));
if (err < 0) if (err < 0)
...@@ -50,13 +55,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, ...@@ -50,13 +55,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
/* /*
* Read uncompressed xattr id lookup table indexes from disk into memory * Read uncompressed xattr id lookup table indexes from disk into memory
*/ */
__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
u64 *xattr_table_start, int *xattr_ids) u64 *xattr_table_start, int *xattr_ids)
{ {
unsigned int len; struct squashfs_sb_info *msblk = sb->s_fs_info;
unsigned int len, indexes;
struct squashfs_xattr_id_table *id_table; struct squashfs_xattr_id_table *id_table;
__le64 *table;
u64 start, end;
int n;
id_table = squashfs_read_table(sb, start, sizeof(*id_table)); id_table = squashfs_read_table(sb, table_start, sizeof(*id_table));
if (IS_ERR(id_table)) if (IS_ERR(id_table))
return (__le64 *) id_table; return (__le64 *) id_table;
...@@ -70,13 +79,52 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, ...@@ -70,13 +79,52 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
if (*xattr_ids == 0) if (*xattr_ids == 0)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* xattr_table should be less than start */ len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
if (*xattr_table_start >= start) indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids);
/*
* The computed size of the index table (len bytes) should exactly
* match the table start and end points
*/
start = table_start + sizeof(*id_table);
end = msblk->bytes_used;
if (len != (end - start))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); table = squashfs_read_table(sb, start, len);
if (IS_ERR(table))
return table;
TRACE("In read_xattr_index_table, length %d\n", len); /* table[0], table[1], ... table[indexes - 1] store the locations
* of the compressed xattr id blocks. Each entry should be less than
* the next (i.e. table[0] < table[1]), and the difference between them
* should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
* should be less than table_start, and again the difference
* shouls be SQUASHFS_METADATA_SIZE or less.
*
* Finally xattr_table_start should be less than table[0].
*/
for (n = 0; n < (indexes - 1); n++) {
start = le64_to_cpu(table[n]);
end = le64_to_cpu(table[n + 1]);
if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
kfree(table);
return ERR_PTR(-EINVAL);
}
}
start = le64_to_cpu(table[indexes - 1]);
if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
kfree(table);
return ERR_PTR(-EINVAL);
}
if (*xattr_table_start >= le64_to_cpu(table[0])) {
kfree(table);
return ERR_PTR(-EINVAL);
}
return squashfs_read_table(sb, start + sizeof(*id_table), len); return table;
} }
...@@ -459,7 +459,7 @@ ...@@ -459,7 +459,7 @@
} \ } \
\ \
/* Built-in firmware blobs */ \ /* Built-in firmware blobs */ \
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
__start_builtin_fw = .; \ __start_builtin_fw = .; \
KEEP(*(.builtin_fw)) \ KEEP(*(.builtin_fw)) \
__end_builtin_fw = .; \ __end_builtin_fw = .; \
......
...@@ -134,11 +134,7 @@ void __init kasan_init_hw_tags(void) ...@@ -134,11 +134,7 @@ void __init kasan_init_hw_tags(void)
switch (kasan_arg_stacktrace) { switch (kasan_arg_stacktrace) {
case KASAN_ARG_STACKTRACE_DEFAULT: case KASAN_ARG_STACKTRACE_DEFAULT:
/* /* Default to enabling stack trace collection. */
* Default to enabling stack trace collection for
* debug kernels.
*/
if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
static_branch_enable(&kasan_flag_stacktrace); static_branch_enable(&kasan_flag_stacktrace);
break; break;
case KASAN_ARG_STACKTRACE_OFF: case KASAN_ARG_STACKTRACE_OFF:
......
...@@ -6271,6 +6271,8 @@ static ssize_t memory_high_write(struct kernfs_open_file *of, ...@@ -6271,6 +6271,8 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
if (err) if (err)
return err; return err;
page_counter_set_high(&memcg->memory, high);
for (;;) { for (;;) {
unsigned long nr_pages = page_counter_read(&memcg->memory); unsigned long nr_pages = page_counter_read(&memcg->memory);
unsigned long reclaimed; unsigned long reclaimed;
...@@ -6294,10 +6296,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of, ...@@ -6294,10 +6296,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
break; break;
} }
page_counter_set_high(&memcg->memory, high);
memcg_wb_domain_size_changed(memcg); memcg_wb_domain_size_changed(memcg);
return nbytes; return nbytes;
} }
......
...@@ -336,8 +336,9 @@ enum pgt_entry { ...@@ -336,8 +336,9 @@ enum pgt_entry {
* valid. Else returns a smaller extent bounded by the end of the source and * valid. Else returns a smaller extent bounded by the end of the source and
* destination pgt_entry. * destination pgt_entry.
*/ */
static unsigned long get_extent(enum pgt_entry entry, unsigned long old_addr, static __always_inline unsigned long get_extent(enum pgt_entry entry,
unsigned long old_end, unsigned long new_addr) unsigned long old_addr, unsigned long old_end,
unsigned long new_addr)
{ {
unsigned long next, extent, mask, size; unsigned long next, extent, mask, size;
......
...@@ -3423,6 +3423,7 @@ static inline int calculate_order(unsigned int size) ...@@ -3423,6 +3423,7 @@ static inline int calculate_order(unsigned int size)
unsigned int order; unsigned int order;
unsigned int min_objects; unsigned int min_objects;
unsigned int max_objects; unsigned int max_objects;
unsigned int nr_cpus;
/* /*
* Attempt to find best configuration for a slab. This * Attempt to find best configuration for a slab. This
...@@ -3433,8 +3434,21 @@ static inline int calculate_order(unsigned int size) ...@@ -3433,8 +3434,21 @@ static inline int calculate_order(unsigned int size)
* we reduce the minimum objects required in a slab. * we reduce the minimum objects required in a slab.
*/ */
min_objects = slub_min_objects; min_objects = slub_min_objects;
if (!min_objects) if (!min_objects) {
min_objects = 4 * (fls(num_online_cpus()) + 1); /*
* Some architectures will only update present cpus when
* onlining them, so don't trust the number if it's just 1. But
* we also don't want to use nr_cpu_ids always, as on some other
* architectures, there can be many possible cpus, but never
* onlined. Here we compromise between trying to avoid too high
* order on systems that appear larger than they are, and too
* low order on systems that appear smaller than they are.
*/
nr_cpus = num_present_cpus();
if (nr_cpus <= 1)
nr_cpus = nr_cpu_ids;
min_objects = 4 * (fls(nr_cpus) + 1);
}
max_objects = order_objects(slub_max_order, size); max_objects = order_objects(slub_max_order, size);
min_objects = min(min_objects, max_objects); min_objects = min(min_objects, max_objects);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment