Commit 00232240 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "14 patches.

  Subsystems affected by this patch series: mm (hugetlb, kasan, gup,
  selftests, z3fold, kfence, memblock, and highmem), squashfs, ia64,
  gcov, and mailmap"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mailmap: update Andrey Konovalov's email address
  mm/highmem: fix CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
  mm: memblock: fix section mismatch warning again
  kfence: make compatible with kmemleak
  gcov: fix clang-11+ support
  ia64: fix format strings for err_inject
  ia64: mca: allocate early mca with GFP_ATOMIC
  squashfs: fix xattr id and id lookup sanity checks
  squashfs: fix inode lookup sanity checks
  z3fold: prevent reclaim/free race for headless pages
  selftests/vm: fix out-of-tree build
  mm/mmu_notifiers: ensure range_end() is paired with range_start()
  kasan: fix per-page tags for non-page_alloc pages
  hugetlb_cgroup: fix imbalanced css_get and css_put pair for shared mappings
parents 2ba9bea2 d3e2ff28
...@@ -36,6 +36,7 @@ Andrew Morton <akpm@linux-foundation.org> ...@@ -36,6 +36,7 @@ Andrew Morton <akpm@linux-foundation.org>
Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk> Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com> Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
Andrew Vasquez <andrew.vasquez@qlogic.com> Andrew Vasquez <andrew.vasquez@qlogic.com>
Andrey Konovalov <andreyknvl@gmail.com> <andreyknvl@google.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com> Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com> Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
Andy Adamson <andros@citi.umich.edu> Andy Adamson <andros@citi.umich.edu>
......
...@@ -59,7 +59,7 @@ show_##name(struct device *dev, struct device_attribute *attr, \ ...@@ -59,7 +59,7 @@ show_##name(struct device *dev, struct device_attribute *attr, \
char *buf) \ char *buf) \
{ \ { \
u32 cpu=dev->id; \ u32 cpu=dev->id; \
return sprintf(buf, "%lx\n", name[cpu]); \ return sprintf(buf, "%llx\n", name[cpu]); \
} }
#define store(name) \ #define store(name) \
...@@ -86,9 +86,9 @@ store_call_start(struct device *dev, struct device_attribute *attr, ...@@ -86,9 +86,9 @@ store_call_start(struct device *dev, struct device_attribute *attr,
#ifdef ERR_INJ_DEBUG #ifdef ERR_INJ_DEBUG
printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu); printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]); printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]);
printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]); printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]);
printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n", printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n",
err_data_buffer[cpu].data1, err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2, err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3); err_data_buffer[cpu].data3);
...@@ -117,8 +117,8 @@ store_call_start(struct device *dev, struct device_attribute *attr, ...@@ -117,8 +117,8 @@ store_call_start(struct device *dev, struct device_attribute *attr,
#ifdef ERR_INJ_DEBUG #ifdef ERR_INJ_DEBUG
printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]); printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]); printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]);
printk(KERN_DEBUG "resources=%lx\n", resources[cpu]); printk(KERN_DEBUG "resources=%llx\n", resources[cpu]);
#endif #endif
return size; return size;
} }
...@@ -131,7 +131,7 @@ show_virtual_to_phys(struct device *dev, struct device_attribute *attr, ...@@ -131,7 +131,7 @@ show_virtual_to_phys(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ {
unsigned int cpu=dev->id; unsigned int cpu=dev->id;
return sprintf(buf, "%lx\n", phys_addr[cpu]); return sprintf(buf, "%llx\n", phys_addr[cpu]);
} }
static ssize_t static ssize_t
...@@ -145,7 +145,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr, ...@@ -145,7 +145,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL); ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
if (ret<=0) { if (ret<=0) {
#ifdef ERR_INJ_DEBUG #ifdef ERR_INJ_DEBUG
printk("Virtual address %lx is not existing.\n",virt_addr); printk("Virtual address %llx is not existing.\n", virt_addr);
#endif #endif
return -EINVAL; return -EINVAL;
} }
...@@ -163,7 +163,7 @@ show_err_data_buffer(struct device *dev, ...@@ -163,7 +163,7 @@ show_err_data_buffer(struct device *dev,
{ {
unsigned int cpu=dev->id; unsigned int cpu=dev->id;
return sprintf(buf, "%lx, %lx, %lx\n", return sprintf(buf, "%llx, %llx, %llx\n",
err_data_buffer[cpu].data1, err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2, err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3); err_data_buffer[cpu].data3);
...@@ -178,13 +178,13 @@ store_err_data_buffer(struct device *dev, ...@@ -178,13 +178,13 @@ store_err_data_buffer(struct device *dev,
int ret; int ret;
#ifdef ERR_INJ_DEBUG #ifdef ERR_INJ_DEBUG
printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n", printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n",
err_data_buffer[cpu].data1, err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2, err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3, err_data_buffer[cpu].data3,
cpu); cpu);
#endif #endif
ret=sscanf(buf, "%lx, %lx, %lx", ret = sscanf(buf, "%llx, %llx, %llx",
&err_data_buffer[cpu].data1, &err_data_buffer[cpu].data1,
&err_data_buffer[cpu].data2, &err_data_buffer[cpu].data2,
&err_data_buffer[cpu].data3); &err_data_buffer[cpu].data3);
......
...@@ -1824,7 +1824,7 @@ ia64_mca_cpu_init(void *cpu_data) ...@@ -1824,7 +1824,7 @@ ia64_mca_cpu_init(void *cpu_data)
data = mca_bootmem(); data = mca_bootmem();
first_time = 0; first_time = 0;
} else } else
data = (void *)__get_free_pages(GFP_KERNEL, data = (void *)__get_free_pages(GFP_ATOMIC,
get_order(sz)); get_order(sz));
if (!data) if (!data)
panic("Could not allocate MCA memory for cpu %d\n", panic("Could not allocate MCA memory for cpu %d\n",
......
...@@ -152,14 +152,18 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, ...@@ -152,14 +152,18 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
start = le64_to_cpu(table[n]); start = le64_to_cpu(table[n]);
end = le64_to_cpu(table[n + 1]); end = le64_to_cpu(table[n + 1]);
if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { if (start >= end
|| (end - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table); kfree(table);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
} }
start = le64_to_cpu(table[indexes - 1]); start = le64_to_cpu(table[indexes - 1]);
if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) { if (start >= lookup_table_start ||
(lookup_table_start - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table); kfree(table);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
......
...@@ -97,14 +97,16 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, ...@@ -97,14 +97,16 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
start = le64_to_cpu(table[n]); start = le64_to_cpu(table[n]);
end = le64_to_cpu(table[n + 1]); end = le64_to_cpu(table[n + 1]);
if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { if (start >= end || (end - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table); kfree(table);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
} }
start = le64_to_cpu(table[indexes - 1]); start = le64_to_cpu(table[indexes - 1]);
if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) { if (start >= id_table_start || (id_table_start - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table); kfree(table);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
/* size of metadata (inode and directory) blocks */ /* size of metadata (inode and directory) blocks */
#define SQUASHFS_METADATA_SIZE 8192 #define SQUASHFS_METADATA_SIZE 8192
#define SQUASHFS_BLOCK_OFFSET 2
/* default size of block device I/O */ /* default size of block device I/O */
#ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE #ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE
......
...@@ -109,14 +109,16 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start, ...@@ -109,14 +109,16 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
start = le64_to_cpu(table[n]); start = le64_to_cpu(table[n]);
end = le64_to_cpu(table[n + 1]); end = le64_to_cpu(table[n + 1]);
if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { if (start >= end || (end - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table); kfree(table);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
} }
start = le64_to_cpu(table[indexes - 1]); start = le64_to_cpu(table[indexes - 1]);
if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) { if (start >= table_start || (table_start - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table); kfree(table);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
......
...@@ -113,6 +113,11 @@ static inline bool hugetlb_cgroup_disabled(void) ...@@ -113,6 +113,11 @@ static inline bool hugetlb_cgroup_disabled(void)
return !cgroup_subsys_enabled(hugetlb_cgrp_subsys); return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
} }
static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
{
css_put(&h_cg->css);
}
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr); struct hugetlb_cgroup **ptr);
extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
...@@ -138,7 +143,8 @@ extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, ...@@ -138,7 +143,8 @@ extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
struct file_region *rg, struct file_region *rg,
unsigned long nr_pages); unsigned long nr_pages,
bool region_del);
extern void hugetlb_cgroup_file_init(void) __init; extern void hugetlb_cgroup_file_init(void) __init;
extern void hugetlb_cgroup_migrate(struct page *oldhpage, extern void hugetlb_cgroup_migrate(struct page *oldhpage,
...@@ -147,7 +153,8 @@ extern void hugetlb_cgroup_migrate(struct page *oldhpage, ...@@ -147,7 +153,8 @@ extern void hugetlb_cgroup_migrate(struct page *oldhpage,
#else #else
static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
struct file_region *rg, struct file_region *rg,
unsigned long nr_pages) unsigned long nr_pages,
bool region_del)
{ {
} }
...@@ -185,6 +192,10 @@ static inline bool hugetlb_cgroup_disabled(void) ...@@ -185,6 +192,10 @@ static inline bool hugetlb_cgroup_disabled(void)
return true; return true;
} }
static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
{
}
static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr) struct hugetlb_cgroup **ptr)
{ {
......
...@@ -460,7 +460,7 @@ static inline void memblock_free_late(phys_addr_t base, phys_addr_t size) ...@@ -460,7 +460,7 @@ static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
/* /*
* Set the allocation direction to bottom-up or top-down. * Set the allocation direction to bottom-up or top-down.
*/ */
static inline __init void memblock_set_bottom_up(bool enable) static inline __init_memblock void memblock_set_bottom_up(bool enable)
{ {
memblock.bottom_up = enable; memblock.bottom_up = enable;
} }
...@@ -470,7 +470,7 @@ static inline __init void memblock_set_bottom_up(bool enable) ...@@ -470,7 +470,7 @@ static inline __init void memblock_set_bottom_up(bool enable)
* if this is true, that said, memblock will allocate memory * if this is true, that said, memblock will allocate memory
* in bottom-up direction. * in bottom-up direction.
*/ */
static inline __init bool memblock_bottom_up(void) static inline __init_memblock bool memblock_bottom_up(void)
{ {
return memblock.bottom_up; return memblock.bottom_up;
} }
......
...@@ -1461,16 +1461,28 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) ...@@ -1461,16 +1461,28 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
/*
* KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
* setting tags for all pages to native kernel tag value 0xff, as the default
* value 0x00 maps to 0xff.
*/
static inline u8 page_kasan_tag(const struct page *page) static inline u8 page_kasan_tag(const struct page *page)
{ {
if (kasan_enabled()) u8 tag = 0xff;
return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
return 0xff; if (kasan_enabled()) {
tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
tag ^= 0xff;
}
return tag;
} }
static inline void page_kasan_tag_set(struct page *page, u8 tag) static inline void page_kasan_tag_set(struct page *page, u8 tag)
{ {
if (kasan_enabled()) { if (kasan_enabled()) {
tag ^= 0xff;
page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
} }
......
...@@ -169,11 +169,11 @@ struct mmu_notifier_ops { ...@@ -169,11 +169,11 @@ struct mmu_notifier_ops {
* the last refcount is dropped. * the last refcount is dropped.
* *
* If blockable argument is set to false then the callback cannot * If blockable argument is set to false then the callback cannot
* sleep and has to return with -EAGAIN. 0 should be returned * sleep and has to return with -EAGAIN if sleeping would be required.
* otherwise. Please note that if invalidate_range_start approves * 0 should be returned otherwise. Please note that notifiers that can
* a non-blocking behavior then the same applies to * fail invalidate_range_start are not allowed to implement
* invalidate_range_end. * invalidate_range_end, as there is no mechanism for informing the
* * notifier that its start failed.
*/ */
int (*invalidate_range_start)(struct mmu_notifier *subscription, int (*invalidate_range_start)(struct mmu_notifier *subscription,
const struct mmu_notifier_range *range); const struct mmu_notifier_range *range);
......
...@@ -75,7 +75,9 @@ struct gcov_fn_info { ...@@ -75,7 +75,9 @@ struct gcov_fn_info {
u32 num_counters; u32 num_counters;
u64 *counters; u64 *counters;
#if CONFIG_CLANG_VERSION < 110000
const char *function_name; const char *function_name;
#endif
}; };
static struct gcov_info *current_info; static struct gcov_info *current_info;
...@@ -105,6 +107,7 @@ void llvm_gcov_init(llvm_gcov_callback writeout, llvm_gcov_callback flush) ...@@ -105,6 +107,7 @@ void llvm_gcov_init(llvm_gcov_callback writeout, llvm_gcov_callback flush)
} }
EXPORT_SYMBOL(llvm_gcov_init); EXPORT_SYMBOL(llvm_gcov_init);
#if CONFIG_CLANG_VERSION < 110000
void llvm_gcda_start_file(const char *orig_filename, const char version[4], void llvm_gcda_start_file(const char *orig_filename, const char version[4],
u32 checksum) u32 checksum)
{ {
...@@ -113,7 +116,17 @@ void llvm_gcda_start_file(const char *orig_filename, const char version[4], ...@@ -113,7 +116,17 @@ void llvm_gcda_start_file(const char *orig_filename, const char version[4],
current_info->checksum = checksum; current_info->checksum = checksum;
} }
EXPORT_SYMBOL(llvm_gcda_start_file); EXPORT_SYMBOL(llvm_gcda_start_file);
#else
void llvm_gcda_start_file(const char *orig_filename, u32 version, u32 checksum)
{
current_info->filename = orig_filename;
current_info->version = version;
current_info->checksum = checksum;
}
EXPORT_SYMBOL(llvm_gcda_start_file);
#endif
#if CONFIG_CLANG_VERSION < 110000
void llvm_gcda_emit_function(u32 ident, const char *function_name, void llvm_gcda_emit_function(u32 ident, const char *function_name,
u32 func_checksum, u8 use_extra_checksum, u32 cfg_checksum) u32 func_checksum, u8 use_extra_checksum, u32 cfg_checksum)
{ {
...@@ -133,6 +146,24 @@ void llvm_gcda_emit_function(u32 ident, const char *function_name, ...@@ -133,6 +146,24 @@ void llvm_gcda_emit_function(u32 ident, const char *function_name,
list_add_tail(&info->head, &current_info->functions); list_add_tail(&info->head, &current_info->functions);
} }
EXPORT_SYMBOL(llvm_gcda_emit_function); EXPORT_SYMBOL(llvm_gcda_emit_function);
#else
void llvm_gcda_emit_function(u32 ident, u32 func_checksum,
u8 use_extra_checksum, u32 cfg_checksum)
{
struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return;
INIT_LIST_HEAD(&info->head);
info->ident = ident;
info->checksum = func_checksum;
info->use_extra_checksum = use_extra_checksum;
info->cfg_checksum = cfg_checksum;
list_add_tail(&info->head, &current_info->functions);
}
EXPORT_SYMBOL(llvm_gcda_emit_function);
#endif
void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters) void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters)
{ {
...@@ -295,6 +326,7 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src) ...@@ -295,6 +326,7 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
} }
} }
#if CONFIG_CLANG_VERSION < 110000
static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn) static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
{ {
size_t cv_size; /* counter values size */ size_t cv_size; /* counter values size */
...@@ -322,6 +354,28 @@ static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn) ...@@ -322,6 +354,28 @@ static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
kfree(fn_dup); kfree(fn_dup);
return NULL; return NULL;
} }
#else
static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
{
size_t cv_size; /* counter values size */
struct gcov_fn_info *fn_dup = kmemdup(fn, sizeof(*fn),
GFP_KERNEL);
if (!fn_dup)
return NULL;
INIT_LIST_HEAD(&fn_dup->head);
cv_size = fn->num_counters * sizeof(fn->counters[0]);
fn_dup->counters = vmalloc(cv_size);
if (!fn_dup->counters) {
kfree(fn_dup);
return NULL;
}
memcpy(fn_dup->counters, fn->counters, cv_size);
return fn_dup;
}
#endif
/** /**
* gcov_info_dup - duplicate profiling data set * gcov_info_dup - duplicate profiling data set
...@@ -362,6 +416,7 @@ struct gcov_info *gcov_info_dup(struct gcov_info *info) ...@@ -362,6 +416,7 @@ struct gcov_info *gcov_info_dup(struct gcov_info *info)
* gcov_info_free - release memory for profiling data set duplicate * gcov_info_free - release memory for profiling data set duplicate
* @info: profiling data set duplicate to free * @info: profiling data set duplicate to free
*/ */
#if CONFIG_CLANG_VERSION < 110000
void gcov_info_free(struct gcov_info *info) void gcov_info_free(struct gcov_info *info)
{ {
struct gcov_fn_info *fn, *tmp; struct gcov_fn_info *fn, *tmp;
...@@ -375,6 +430,20 @@ void gcov_info_free(struct gcov_info *info) ...@@ -375,6 +430,20 @@ void gcov_info_free(struct gcov_info *info)
kfree(info->filename); kfree(info->filename);
kfree(info); kfree(info);
} }
#else
void gcov_info_free(struct gcov_info *info)
{
struct gcov_fn_info *fn, *tmp;
list_for_each_entry_safe(fn, tmp, &info->functions, head) {
vfree(fn->counters);
list_del(&fn->head);
kfree(fn);
}
kfree(info->filename);
kfree(info);
}
#endif
#define ITER_STRIDE PAGE_SIZE #define ITER_STRIDE PAGE_SIZE
......
...@@ -618,7 +618,7 @@ void __kmap_local_sched_out(void) ...@@ -618,7 +618,7 @@ void __kmap_local_sched_out(void)
int idx; int idx;
/* With debug all even slots are unmapped and act as guard */ /* With debug all even slots are unmapped and act as guard */
if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) { if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
WARN_ON_ONCE(!pte_none(pteval)); WARN_ON_ONCE(!pte_none(pteval));
continue; continue;
} }
...@@ -654,7 +654,7 @@ void __kmap_local_sched_in(void) ...@@ -654,7 +654,7 @@ void __kmap_local_sched_in(void)
int idx; int idx;
/* With debug all even slots are unmapped and act as guard */ /* With debug all even slots are unmapped and act as guard */
if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) { if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
WARN_ON_ONCE(!pte_none(pteval)); WARN_ON_ONCE(!pte_none(pteval));
continue; continue;
} }
......
...@@ -280,6 +280,17 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, ...@@ -280,6 +280,17 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
nrg->reservation_counter = nrg->reservation_counter =
&h_cg->rsvd_hugepage[hstate_index(h)]; &h_cg->rsvd_hugepage[hstate_index(h)];
nrg->css = &h_cg->css; nrg->css = &h_cg->css;
/*
* The caller will hold exactly one h_cg->css reference for the
* whole contiguous reservation region. But this area might be
* scattered when there are already some file_regions reside in
* it. As a result, many file_regions may share only one css
* reference. In order to ensure that one file_region must hold
* exactly one h_cg->css reference, we should do css_get for
* each file_region and leave the reference held by caller
* untouched.
*/
css_get(&h_cg->css);
if (!resv->pages_per_hpage) if (!resv->pages_per_hpage)
resv->pages_per_hpage = pages_per_huge_page(h); resv->pages_per_hpage = pages_per_huge_page(h);
/* pages_per_hpage should be the same for all entries in /* pages_per_hpage should be the same for all entries in
...@@ -293,6 +304,14 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, ...@@ -293,6 +304,14 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
#endif #endif
} }
static void put_uncharge_info(struct file_region *rg)
{
#ifdef CONFIG_CGROUP_HUGETLB
if (rg->css)
css_put(rg->css);
#endif
}
static bool has_same_uncharge_info(struct file_region *rg, static bool has_same_uncharge_info(struct file_region *rg,
struct file_region *org) struct file_region *org)
{ {
...@@ -316,6 +335,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) ...@@ -316,6 +335,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
prg->to = rg->to; prg->to = rg->to;
list_del(&rg->link); list_del(&rg->link);
put_uncharge_info(rg);
kfree(rg); kfree(rg);
rg = prg; rg = prg;
...@@ -327,6 +347,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) ...@@ -327,6 +347,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
nrg->from = rg->from; nrg->from = rg->from;
list_del(&rg->link); list_del(&rg->link);
put_uncharge_info(rg);
kfree(rg); kfree(rg);
} }
} }
...@@ -662,7 +683,7 @@ static long region_del(struct resv_map *resv, long f, long t) ...@@ -662,7 +683,7 @@ static long region_del(struct resv_map *resv, long f, long t)
del += t - f; del += t - f;
hugetlb_cgroup_uncharge_file_region( hugetlb_cgroup_uncharge_file_region(
resv, rg, t - f); resv, rg, t - f, false);
/* New entry for end of split region */ /* New entry for end of split region */
nrg->from = t; nrg->from = t;
...@@ -683,7 +704,7 @@ static long region_del(struct resv_map *resv, long f, long t) ...@@ -683,7 +704,7 @@ static long region_del(struct resv_map *resv, long f, long t)
if (f <= rg->from && t >= rg->to) { /* Remove entire region */ if (f <= rg->from && t >= rg->to) { /* Remove entire region */
del += rg->to - rg->from; del += rg->to - rg->from;
hugetlb_cgroup_uncharge_file_region(resv, rg, hugetlb_cgroup_uncharge_file_region(resv, rg,
rg->to - rg->from); rg->to - rg->from, true);
list_del(&rg->link); list_del(&rg->link);
kfree(rg); kfree(rg);
continue; continue;
...@@ -691,13 +712,13 @@ static long region_del(struct resv_map *resv, long f, long t) ...@@ -691,13 +712,13 @@ static long region_del(struct resv_map *resv, long f, long t)
if (f <= rg->from) { /* Trim beginning of region */ if (f <= rg->from) { /* Trim beginning of region */
hugetlb_cgroup_uncharge_file_region(resv, rg, hugetlb_cgroup_uncharge_file_region(resv, rg,
t - rg->from); t - rg->from, false);
del += t - rg->from; del += t - rg->from;
rg->from = t; rg->from = t;
} else { /* Trim end of region */ } else { /* Trim end of region */
hugetlb_cgroup_uncharge_file_region(resv, rg, hugetlb_cgroup_uncharge_file_region(resv, rg,
rg->to - f); rg->to - f, false);
del += rg->to - f; del += rg->to - f;
rg->to = f; rg->to = f;
...@@ -5187,6 +5208,10 @@ bool hugetlb_reserve_pages(struct inode *inode, ...@@ -5187,6 +5208,10 @@ bool hugetlb_reserve_pages(struct inode *inode,
*/ */
long rsv_adjust; long rsv_adjust;
/*
* hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
* reference to h_cg->css. See comment below for detail.
*/
hugetlb_cgroup_uncharge_cgroup_rsvd( hugetlb_cgroup_uncharge_cgroup_rsvd(
hstate_index(h), hstate_index(h),
(chg - add) * pages_per_huge_page(h), h_cg); (chg - add) * pages_per_huge_page(h), h_cg);
...@@ -5194,6 +5219,14 @@ bool hugetlb_reserve_pages(struct inode *inode, ...@@ -5194,6 +5219,14 @@ bool hugetlb_reserve_pages(struct inode *inode,
rsv_adjust = hugepage_subpool_put_pages(spool, rsv_adjust = hugepage_subpool_put_pages(spool,
chg - add); chg - add);
hugetlb_acct_memory(h, -rsv_adjust); hugetlb_acct_memory(h, -rsv_adjust);
} else if (h_cg) {
/*
* The file_regions will hold their own reference to
* h_cg->css. So we should release the reference held
* via hugetlb_cgroup_charge_cgroup_rsvd() when we are
* done.
*/
hugetlb_cgroup_put_rsvd_cgroup(h_cg);
} }
} }
return true; return true;
......
...@@ -391,7 +391,8 @@ void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start, ...@@ -391,7 +391,8 @@ void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
struct file_region *rg, struct file_region *rg,
unsigned long nr_pages) unsigned long nr_pages,
bool region_del)
{ {
if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages) if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
return; return;
...@@ -400,7 +401,12 @@ void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, ...@@ -400,7 +401,12 @@ void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
!resv->reservation_counter) { !resv->reservation_counter) {
page_counter_uncharge(rg->reservation_counter, page_counter_uncharge(rg->reservation_counter,
nr_pages * resv->pages_per_hpage); nr_pages * resv->pages_per_hpage);
css_put(rg->css); /*
* Only do css_put(rg->css) when we delete the entire region
* because one file_region must hold exactly one css reference.
*/
if (region_del)
css_put(rg->css);
} }
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/kcsan-checks.h> #include <linux/kcsan-checks.h>
#include <linux/kfence.h> #include <linux/kfence.h>
#include <linux/kmemleak.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/memblock.h> #include <linux/memblock.h>
...@@ -480,6 +481,14 @@ static bool __init kfence_init_pool(void) ...@@ -480,6 +481,14 @@ static bool __init kfence_init_pool(void)
addr += 2 * PAGE_SIZE; addr += 2 * PAGE_SIZE;
} }
/*
* The pool is live and will never be deallocated from this point on.
* Remove the pool object from the kmemleak object tree, as it would
* otherwise overlap with allocations returned by kfence_alloc(), which
* are registered with kmemleak through the slab post-alloc hook.
*/
kmemleak_free(__kfence_pool);
return true; return true;
err: err:
......
...@@ -97,6 +97,7 @@ ...@@ -97,6 +97,7 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/kfence.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <linux/memory_hotplug.h> #include <linux/memory_hotplug.h>
...@@ -589,7 +590,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, ...@@ -589,7 +590,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
atomic_set(&object->use_count, 1); atomic_set(&object->use_count, 1);
object->flags = OBJECT_ALLOCATED; object->flags = OBJECT_ALLOCATED;
object->pointer = ptr; object->pointer = ptr;
object->size = size; object->size = kfence_ksize((void *)ptr) ?: size;
object->excess_ref = 0; object->excess_ref = 0;
object->min_count = min_count; object->min_count = min_count;
object->count = 0; /* white color initially */ object->count = 0; /* white color initially */
......
...@@ -501,10 +501,33 @@ static int mn_hlist_invalidate_range_start( ...@@ -501,10 +501,33 @@ static int mn_hlist_invalidate_range_start(
""); "");
WARN_ON(mmu_notifier_range_blockable(range) || WARN_ON(mmu_notifier_range_blockable(range) ||
_ret != -EAGAIN); _ret != -EAGAIN);
/*
* We call all the notifiers on any EAGAIN,
* there is no way for a notifier to know if
* its start method failed, thus a start that
* does EAGAIN can't also do end.
*/
WARN_ON(ops->invalidate_range_end);
ret = _ret; ret = _ret;
} }
} }
} }
if (ret) {
/*
* Must be non-blocking to get here. If there are multiple
* notifiers and one or more failed start, any that succeeded
* start are expecting their end to be called. Do so now.
*/
hlist_for_each_entry_rcu(subscription, &subscriptions->list,
hlist, srcu_read_lock_held(&srcu)) {
if (!subscription->ops->invalidate_range_end)
continue;
subscription->ops->invalidate_range_end(subscription,
range);
}
}
srcu_read_unlock(&srcu, id); srcu_read_unlock(&srcu, id);
return ret; return ret;
......
...@@ -1346,8 +1346,22 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) ...@@ -1346,8 +1346,22 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
page = list_entry(pos, struct page, lru); page = list_entry(pos, struct page, lru);
zhdr = page_address(page); zhdr = page_address(page);
if (test_bit(PAGE_HEADLESS, &page->private)) if (test_bit(PAGE_HEADLESS, &page->private)) {
/*
* For non-headless pages, we wait to do this
* until we have the page lock to avoid racing
* with __z3fold_alloc(). Headless pages don't
* have a lock (and __z3fold_alloc() will never
* see them), but we still need to test and set
* PAGE_CLAIMED to avoid racing with
* z3fold_free(), so just do it now before
* leaving the loop.
*/
if (test_and_set_bit(PAGE_CLAIMED, &page->private))
continue;
break; break;
}
if (kref_get_unless_zero(&zhdr->refcount) == 0) { if (kref_get_unless_zero(&zhdr->refcount) == 0) {
zhdr = NULL; zhdr = NULL;
......
...@@ -101,7 +101,7 @@ endef ...@@ -101,7 +101,7 @@ endef
ifeq ($(CAN_BUILD_I386),1) ifeq ($(CAN_BUILD_I386),1)
$(BINARIES_32): CFLAGS += -m32 $(BINARIES_32): CFLAGS += -m32
$(BINARIES_32): LDLIBS += -lrt -ldl -lm $(BINARIES_32): LDLIBS += -lrt -ldl -lm
$(BINARIES_32): %_32: %.c $(BINARIES_32): $(OUTPUT)/%_32: %.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t)))) $(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t))))
endif endif
...@@ -109,7 +109,7 @@ endif ...@@ -109,7 +109,7 @@ endif
ifeq ($(CAN_BUILD_X86_64),1) ifeq ($(CAN_BUILD_X86_64),1)
$(BINARIES_64): CFLAGS += -m64 $(BINARIES_64): CFLAGS += -m64
$(BINARIES_64): LDLIBS += -lrt -ldl $(BINARIES_64): LDLIBS += -lrt -ldl
$(BINARIES_64): %_64: %.c $(BINARIES_64): $(OUTPUT)/%_64: %.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-64,$(t)))) $(foreach t,$(TARGETS),$(eval $(call gen-target-rule-64,$(t))))
endif endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment