Commit 7c903253 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "23 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (23 commits)
  mm, memory_hotplug: fix off-by-one in is_pageblock_removable
  mm: don't let userspace spam allocations warnings
  slub: fix a crash with SLUB_DEBUG + KASAN_SW_TAGS
  kasan, slab: remove redundant kasan_slab_alloc hooks
  kasan, slab: make freelist stored without tags
  kasan, slab: fix conflicts with CONFIG_HARDENED_USERCOPY
  kasan: prevent tracing of tags.c
  kasan: fix random seed generation for tag-based mode
  tmpfs: fix link accounting when a tmpfile is linked in
  psi: avoid divide-by-zero crash inside virtual machines
  mm: handle lru_add_drain_all for UP properly
  mm, page_alloc: fix a division by zero error when boosting watermarks v2
  mm/debug.c: fix __dump_page() for poisoned pages
  proc, oom: do not report alien mms when setting oom_score_adj
  slub: fix SLAB_CONSISTENCY_CHECKS + KASAN_SW_TAGS
  kasan, slub: fix more conflicts with CONFIG_SLAB_FREELIST_HARDENED
  kasan, slub: fix conflicts with CONFIG_SLAB_FREELIST_HARDENED
  kasan, slub: move kasan_poison_slab hook before page_address
  kmemleak: account for tagged pointers when calculating pointer range
  kasan, kmemleak: pass tagged pointers to kmemleak
  ...
parents f6163d67 891cb2a7
...@@ -339,6 +339,9 @@ void __init setup_arch(char **cmdline_p) ...@@ -339,6 +339,9 @@ void __init setup_arch(char **cmdline_p)
smp_init_cpus(); smp_init_cpus();
smp_build_mpidr_hash(); smp_build_mpidr_hash();
/* Init percpu seeds for random tags after cpus are set up. */
kasan_init_tags();
#ifdef CONFIG_ARM64_SW_TTBR0_PAN #ifdef CONFIG_ARM64_SW_TTBR0_PAN
/* /*
* Make sure init_thread_info.ttbr0 always generates translation * Make sure init_thread_info.ttbr0 always generates translation
......
...@@ -252,8 +252,6 @@ void __init kasan_init(void) ...@@ -252,8 +252,6 @@ void __init kasan_init(void)
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
kasan_init_tags();
/* At this point kasan is fully initialized. Enable error messages */ /* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0; init_task.kasan_depth = 0;
pr_info("KernelAddressSanitizer initialized\n"); pr_info("KernelAddressSanitizer initialized\n");
......
...@@ -1086,10 +1086,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) ...@@ -1086,10 +1086,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
task_lock(p); task_lock(p);
if (!p->vfork_done && process_shares_mm(p, mm)) { if (!p->vfork_done && process_shares_mm(p, mm)) {
pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
task_pid_nr(p), p->comm,
p->signal->oom_score_adj, oom_adj,
task_pid_nr(task), task->comm);
p->signal->oom_score_adj = oom_adj; p->signal->oom_score_adj = oom_adj;
if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
p->signal->oom_score_adj_min = (short)oom_adj; p->signal->oom_score_adj_min = (short)oom_adj;
......
...@@ -550,6 +550,7 @@ static void __init free_initrd(void) ...@@ -550,6 +550,7 @@ static void __init free_initrd(void)
initrd_end = 0; initrd_end = 0;
} }
#ifdef CONFIG_BLK_DEV_RAM
#define BUF_SIZE 1024 #define BUF_SIZE 1024
static void __init clean_rootfs(void) static void __init clean_rootfs(void)
{ {
...@@ -596,6 +597,7 @@ static void __init clean_rootfs(void) ...@@ -596,6 +597,7 @@ static void __init clean_rootfs(void)
ksys_close(fd); ksys_close(fd);
kfree(buf); kfree(buf);
} }
#endif
static int __init populate_rootfs(void) static int __init populate_rootfs(void)
{ {
...@@ -638,10 +640,8 @@ static int __init populate_rootfs(void) ...@@ -638,10 +640,8 @@ static int __init populate_rootfs(void)
printk(KERN_INFO "Unpacking initramfs...\n"); printk(KERN_INFO "Unpacking initramfs...\n");
err = unpack_to_rootfs((char *)initrd_start, err = unpack_to_rootfs((char *)initrd_start,
initrd_end - initrd_start); initrd_end - initrd_start);
if (err) { if (err)
printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
clean_rootfs();
}
free_initrd(); free_initrd();
#endif #endif
} }
......
...@@ -322,7 +322,7 @@ static bool update_stats(struct psi_group *group) ...@@ -322,7 +322,7 @@ static bool update_stats(struct psi_group *group)
expires = group->next_update; expires = group->next_update;
if (now < expires) if (now < expires)
goto out; goto out;
if (now - expires > psi_period) if (now - expires >= psi_period)
missed_periods = div_u64(now - expires, psi_period); missed_periods = div_u64(now - expires, psi_period);
/* /*
......
...@@ -44,7 +44,7 @@ const struct trace_print_flags vmaflag_names[] = { ...@@ -44,7 +44,7 @@ const struct trace_print_flags vmaflag_names[] = {
void __dump_page(struct page *page, const char *reason) void __dump_page(struct page *page, const char *reason)
{ {
struct address_space *mapping = page_mapping(page); struct address_space *mapping;
bool page_poisoned = PagePoisoned(page); bool page_poisoned = PagePoisoned(page);
int mapcount; int mapcount;
...@@ -58,6 +58,8 @@ void __dump_page(struct page *page, const char *reason) ...@@ -58,6 +58,8 @@ void __dump_page(struct page *page, const char *reason)
goto hex_only; goto hex_only;
} }
mapping = page_mapping(page);
/* /*
* Avoid VM_BUG_ON() in page_mapcount(). * Avoid VM_BUG_ON() in page_mapcount().
* page->_mapcount space in struct page is used by sl[aou]b pages to * page->_mapcount space in struct page is used by sl[aou]b pages to
......
...@@ -7,6 +7,8 @@ KCOV_INSTRUMENT := n ...@@ -7,6 +7,8 @@ KCOV_INSTRUMENT := n
CFLAGS_REMOVE_common.o = -pg CFLAGS_REMOVE_common.o = -pg
CFLAGS_REMOVE_generic.o = -pg CFLAGS_REMOVE_generic.o = -pg
CFLAGS_REMOVE_tags.o = -pg
# Function splitter causes unnecessary splits in __asan_load1/__asan_store1 # Function splitter causes unnecessary splits in __asan_load1/__asan_store1
# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
......
...@@ -361,10 +361,15 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object) ...@@ -361,10 +361,15 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
* get different tags. * get different tags.
*/ */
static u8 assign_tag(struct kmem_cache *cache, const void *object, static u8 assign_tag(struct kmem_cache *cache, const void *object,
bool init, bool krealloc) bool init, bool keep_tag)
{ {
/* Reuse the same tag for krealloc'ed objects. */ /*
if (krealloc) * 1. When an object is kmalloc()'ed, two hooks are called:
* kasan_slab_alloc() and kasan_kmalloc(). We assign the
* tag only in the first one.
* 2. We reuse the same tag for krealloc'ed objects.
*/
if (keep_tag)
return get_tag(object); return get_tag(object);
/* /*
...@@ -405,12 +410,6 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, ...@@ -405,12 +410,6 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
return (void *)object; return (void *)object;
} }
void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
gfp_t flags)
{
return kasan_kmalloc(cache, object, cache->object_size, flags);
}
static inline bool shadow_invalid(u8 tag, s8 shadow_byte) static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
{ {
if (IS_ENABLED(CONFIG_KASAN_GENERIC)) if (IS_ENABLED(CONFIG_KASAN_GENERIC))
...@@ -467,7 +466,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) ...@@ -467,7 +466,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
} }
static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
size_t size, gfp_t flags, bool krealloc) size_t size, gfp_t flags, bool keep_tag)
{ {
unsigned long redzone_start; unsigned long redzone_start;
unsigned long redzone_end; unsigned long redzone_end;
...@@ -485,7 +484,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, ...@@ -485,7 +484,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
KASAN_SHADOW_SCALE_SIZE); KASAN_SHADOW_SCALE_SIZE);
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
tag = assign_tag(cache, object, false, krealloc); tag = assign_tag(cache, object, false, keep_tag);
/* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
kasan_unpoison_shadow(set_tag(object, tag), size); kasan_unpoison_shadow(set_tag(object, tag), size);
...@@ -498,10 +497,16 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, ...@@ -498,10 +497,16 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
return set_tag(object, tag); return set_tag(object, tag);
} }
void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
gfp_t flags)
{
return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
}
void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
size_t size, gfp_t flags) size_t size, gfp_t flags)
{ {
return __kasan_kmalloc(cache, object, size, flags, false); return __kasan_kmalloc(cache, object, size, flags, true);
} }
EXPORT_SYMBOL(kasan_kmalloc); EXPORT_SYMBOL(kasan_kmalloc);
......
...@@ -46,7 +46,7 @@ void kasan_init_tags(void) ...@@ -46,7 +46,7 @@ void kasan_init_tags(void)
int cpu; int cpu;
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
per_cpu(prng_state, cpu) = get_random_u32(); per_cpu(prng_state, cpu) = (u32)get_cycles();
} }
/* /*
......
...@@ -574,6 +574,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, ...@@ -574,6 +574,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
unsigned long flags; unsigned long flags;
struct kmemleak_object *object, *parent; struct kmemleak_object *object, *parent;
struct rb_node **link, *rb_parent; struct rb_node **link, *rb_parent;
unsigned long untagged_ptr;
object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
if (!object) { if (!object) {
...@@ -619,8 +620,9 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, ...@@ -619,8 +620,9 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
write_lock_irqsave(&kmemleak_lock, flags); write_lock_irqsave(&kmemleak_lock, flags);
min_addr = min(min_addr, ptr); untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
max_addr = max(max_addr, ptr + size); min_addr = min(min_addr, untagged_ptr);
max_addr = max(max_addr, untagged_ptr + size);
link = &object_tree_root.rb_node; link = &object_tree_root.rb_node;
rb_parent = NULL; rb_parent = NULL;
while (*link) { while (*link) {
...@@ -1333,6 +1335,7 @@ static void scan_block(void *_start, void *_end, ...@@ -1333,6 +1335,7 @@ static void scan_block(void *_start, void *_end,
unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
unsigned long *end = _end - (BYTES_PER_POINTER - 1); unsigned long *end = _end - (BYTES_PER_POINTER - 1);
unsigned long flags; unsigned long flags;
unsigned long untagged_ptr;
read_lock_irqsave(&kmemleak_lock, flags); read_lock_irqsave(&kmemleak_lock, flags);
for (ptr = start; ptr < end; ptr++) { for (ptr = start; ptr < end; ptr++) {
...@@ -1347,7 +1350,8 @@ static void scan_block(void *_start, void *_end, ...@@ -1347,7 +1350,8 @@ static void scan_block(void *_start, void *_end,
pointer = *ptr; pointer = *ptr;
kasan_enable_current(); kasan_enable_current();
if (pointer < min_addr || pointer >= max_addr) untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
continue; continue;
/* /*
......
...@@ -1188,11 +1188,13 @@ static inline int pageblock_free(struct page *page) ...@@ -1188,11 +1188,13 @@ static inline int pageblock_free(struct page *page)
return PageBuddy(page) && page_order(page) >= pageblock_order; return PageBuddy(page) && page_order(page) >= pageblock_order;
} }
/* Return the start of the next active pageblock after a given page */ /* Return the pfn of the start of the next active pageblock after a given pfn */
static struct page *next_active_pageblock(struct page *page) static unsigned long next_active_pageblock(unsigned long pfn)
{ {
struct page *page = pfn_to_page(pfn);
/* Ensure the starting page is pageblock-aligned */ /* Ensure the starting page is pageblock-aligned */
BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); BUG_ON(pfn & (pageblock_nr_pages - 1));
/* If the entire pageblock is free, move to the end of free page */ /* If the entire pageblock is free, move to the end of free page */
if (pageblock_free(page)) { if (pageblock_free(page)) {
...@@ -1200,16 +1202,16 @@ static struct page *next_active_pageblock(struct page *page) ...@@ -1200,16 +1202,16 @@ static struct page *next_active_pageblock(struct page *page)
/* be careful. we don't have locks, page_order can be changed.*/ /* be careful. we don't have locks, page_order can be changed.*/
order = page_order(page); order = page_order(page);
if ((order < MAX_ORDER) && (order >= pageblock_order)) if ((order < MAX_ORDER) && (order >= pageblock_order))
return page + (1 << order); return pfn + (1 << order);
} }
return page + pageblock_nr_pages; return pfn + pageblock_nr_pages;
} }
static bool is_pageblock_removable_nolock(struct page *page) static bool is_pageblock_removable_nolock(unsigned long pfn)
{ {
struct page *page = pfn_to_page(pfn);
struct zone *zone; struct zone *zone;
unsigned long pfn;
/* /*
* We have to be careful here because we are iterating over memory * We have to be careful here because we are iterating over memory
...@@ -1232,13 +1234,14 @@ static bool is_pageblock_removable_nolock(struct page *page) ...@@ -1232,13 +1234,14 @@ static bool is_pageblock_removable_nolock(struct page *page)
/* Checks if this range of memory is likely to be hot-removable. */ /* Checks if this range of memory is likely to be hot-removable. */
bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
{ {
struct page *page = pfn_to_page(start_pfn); unsigned long end_pfn, pfn;
unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page)));
struct page *end_page = pfn_to_page(end_pfn); end_pfn = min(start_pfn + nr_pages,
zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
/* Check the starting page of each pageblock within the range */ /* Check the starting page of each pageblock within the range */
for (; page < end_page; page = next_active_pageblock(page)) { for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
if (!is_pageblock_removable_nolock(page)) if (!is_pageblock_removable_nolock(pfn))
return false; return false;
cond_resched(); cond_resched();
} }
......
...@@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, ...@@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
nodemask_t *nodes) nodemask_t *nodes)
{ {
unsigned long copy = ALIGN(maxnode-1, 64) / 8; unsigned long copy = ALIGN(maxnode-1, 64) / 8;
const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
if (copy > nbytes) { if (copy > nbytes) {
if (copy > PAGE_SIZE) if (copy > PAGE_SIZE)
...@@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy, ...@@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy,
int uninitialized_var(pval); int uninitialized_var(pval);
nodemask_t nodes; nodemask_t nodes;
if (nmask != NULL && maxnode < MAX_NUMNODES) if (nmask != NULL && maxnode < nr_node_ids)
return -EINVAL; return -EINVAL;
err = do_get_mempolicy(&pval, &nodes, addr, flags); err = do_get_mempolicy(&pval, &nodes, addr, flags);
...@@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, ...@@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
unsigned long nr_bits, alloc_size; unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES); DECLARE_BITMAP(bm, MAX_NUMNODES);
nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) if (nmask)
......
...@@ -2170,6 +2170,18 @@ static inline void boost_watermark(struct zone *zone) ...@@ -2170,6 +2170,18 @@ static inline void boost_watermark(struct zone *zone)
max_boost = mult_frac(zone->_watermark[WMARK_HIGH], max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
watermark_boost_factor, 10000); watermark_boost_factor, 10000);
/*
* high watermark may be uninitialised if fragmentation occurs
* very early in boot so do not boost. We do not fall
* through and boost by pageblock_nr_pages as failing
* allocations that early means that reclaim is not going
* to help and it may even be impossible to reclaim the
* boosted watermark resulting in a hang.
*/
if (!max_boost)
return;
max_boost = max(pageblock_nr_pages, max_boost); max_boost = max(pageblock_nr_pages, max_boost);
zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
......
...@@ -2854,10 +2854,14 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr ...@@ -2854,10 +2854,14 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
* No ordinary (disk based) filesystem counts links as inodes; * No ordinary (disk based) filesystem counts links as inodes;
* but each new link needs a new dentry, pinning lowmem, and * but each new link needs a new dentry, pinning lowmem, and
* tmpfs dentries cannot be pruned until they are unlinked. * tmpfs dentries cannot be pruned until they are unlinked.
* But if an O_TMPFILE file is linked into the tmpfs, the
* first link must skip that, to get the accounting right.
*/ */
ret = shmem_reserve_inode(inode->i_sb); if (inode->i_nlink) {
if (ret) ret = shmem_reserve_inode(inode->i_sb);
goto out; if (ret)
goto out;
}
dir->i_size += BOGO_DIRENT_SIZE; dir->i_size += BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
......
...@@ -2359,7 +2359,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, ...@@ -2359,7 +2359,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
void *freelist; void *freelist;
void *addr = page_address(page); void *addr = page_address(page);
page->s_mem = kasan_reset_tag(addr) + colour_off; page->s_mem = addr + colour_off;
page->active = 0; page->active = 0;
if (OBJFREELIST_SLAB(cachep)) if (OBJFREELIST_SLAB(cachep))
...@@ -2368,6 +2368,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, ...@@ -2368,6 +2368,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
/* Slab management obj is off-slab. */ /* Slab management obj is off-slab. */
freelist = kmem_cache_alloc_node(cachep->freelist_cache, freelist = kmem_cache_alloc_node(cachep->freelist_cache,
local_flags, nodeid); local_flags, nodeid);
freelist = kasan_reset_tag(freelist);
if (!freelist) if (!freelist)
return NULL; return NULL;
} else { } else {
...@@ -2681,6 +2682,13 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, ...@@ -2681,6 +2682,13 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
offset *= cachep->colour_off; offset *= cachep->colour_off;
/*
* Call kasan_poison_slab() before calling alloc_slabmgmt(), so
* page_address() in the latter returns a non-tagged pointer,
* as it should be for slab pages.
*/
kasan_poison_slab(page);
/* Get slab management. */ /* Get slab management. */
freelist = alloc_slabmgmt(cachep, page, offset, freelist = alloc_slabmgmt(cachep, page, offset,
local_flags & ~GFP_CONSTRAINT_MASK, page_node); local_flags & ~GFP_CONSTRAINT_MASK, page_node);
...@@ -2689,7 +2697,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, ...@@ -2689,7 +2697,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
slab_map_pages(cachep, page, freelist); slab_map_pages(cachep, page, freelist);
kasan_poison_slab(page);
cache_init_objs(cachep, page); cache_init_objs(cachep, page);
if (gfpflags_allow_blocking(local_flags)) if (gfpflags_allow_blocking(local_flags))
...@@ -3540,7 +3547,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) ...@@ -3540,7 +3547,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{ {
void *ret = slab_alloc(cachep, flags, _RET_IP_); void *ret = slab_alloc(cachep, flags, _RET_IP_);
ret = kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc(_RET_IP_, ret, trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags); cachep->object_size, cachep->size, flags);
...@@ -3630,7 +3636,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) ...@@ -3630,7 +3636,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{ {
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
ret = kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc_node(_RET_IP_, ret, trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size, cachep->object_size, cachep->size,
flags, nodeid); flags, nodeid);
...@@ -4408,6 +4413,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, ...@@ -4408,6 +4413,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
unsigned int objnr; unsigned int objnr;
unsigned long offset; unsigned long offset;
ptr = kasan_reset_tag(ptr);
/* Find and validate object. */ /* Find and validate object. */
cachep = page->slab_cache; cachep = page->slab_cache;
objnr = obj_to_index(cachep, page, (void *)ptr); objnr = obj_to_index(cachep, page, (void *)ptr);
......
...@@ -437,11 +437,10 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, ...@@ -437,11 +437,10 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
flags &= gfp_allowed_mask; flags &= gfp_allowed_mask;
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
void *object = p[i]; p[i] = kasan_slab_alloc(s, p[i], flags);
/* As p[i] might get tagged, call kmemleak hook after KASAN. */
kmemleak_alloc_recursive(object, s->object_size, 1, kmemleak_alloc_recursive(p[i], s->object_size, 1,
s->flags, flags); s->flags, flags);
p[i] = kasan_slab_alloc(s, object, flags);
} }
if (memcg_kmem_enabled()) if (memcg_kmem_enabled())
......
...@@ -1228,8 +1228,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) ...@@ -1228,8 +1228,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
flags |= __GFP_COMP; flags |= __GFP_COMP;
page = alloc_pages(flags, order); page = alloc_pages(flags, order);
ret = page ? page_address(page) : NULL; ret = page ? page_address(page) : NULL;
kmemleak_alloc(ret, size, 1, flags);
ret = kasan_kmalloc_large(ret, size, flags); ret = kasan_kmalloc_large(ret, size, flags);
/* As ret might get tagged, call kmemleak hook after KASAN. */
kmemleak_alloc(ret, size, 1, flags);
return ret; return ret;
} }
EXPORT_SYMBOL(kmalloc_order); EXPORT_SYMBOL(kmalloc_order);
......
...@@ -249,7 +249,18 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, ...@@ -249,7 +249,18 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
unsigned long ptr_addr) unsigned long ptr_addr)
{ {
#ifdef CONFIG_SLAB_FREELIST_HARDENED #ifdef CONFIG_SLAB_FREELIST_HARDENED
return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr); /*
* When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
* Normally, this doesn't cause any issues, as both set_freepointer()
* and get_freepointer() are called with a pointer with the same tag.
* However, there are some issues with CONFIG_SLUB_DEBUG code. For
* example, when __free_slub() iterates over objects in a cache, it
* passes untagged pointers to check_object(). check_object() in turns
* calls get_freepointer() with an untagged pointer, which causes the
* freepointer to be restored incorrectly.
*/
return (void *)((unsigned long)ptr ^ s->random ^
(unsigned long)kasan_reset_tag((void *)ptr_addr));
#else #else
return ptr; return ptr;
#endif #endif
...@@ -303,15 +314,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) ...@@ -303,15 +314,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
__p < (__addr) + (__objects) * (__s)->size; \ __p < (__addr) + (__objects) * (__s)->size; \
__p += (__s)->size) __p += (__s)->size)
#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
for (__p = fixup_red_left(__s, __addr), __idx = 1; \
__idx <= __objects; \
__p += (__s)->size, __idx++)
/* Determine object index from a given position */ /* Determine object index from a given position */
static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
{ {
return (p - addr) / s->size; return (kasan_reset_tag(p) - addr) / s->size;
} }
static inline unsigned int order_objects(unsigned int order, unsigned int size) static inline unsigned int order_objects(unsigned int order, unsigned int size)
...@@ -507,6 +513,7 @@ static inline int check_valid_pointer(struct kmem_cache *s, ...@@ -507,6 +513,7 @@ static inline int check_valid_pointer(struct kmem_cache *s,
return 1; return 1;
base = page_address(page); base = page_address(page);
object = kasan_reset_tag(object);
object = restore_red_left(s, object); object = restore_red_left(s, object);
if (object < base || object >= base + page->objects * s->size || if (object < base || object >= base + page->objects * s->size ||
(object - base) % s->size) { (object - base) % s->size) {
...@@ -1075,6 +1082,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, ...@@ -1075,6 +1082,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
init_tracking(s, object); init_tracking(s, object);
} }
static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
{
if (!(s->flags & SLAB_POISON))
return;
metadata_access_enable();
memset(addr, POISON_INUSE, PAGE_SIZE << order);
metadata_access_disable();
}
static inline int alloc_consistency_checks(struct kmem_cache *s, static inline int alloc_consistency_checks(struct kmem_cache *s,
struct page *page, struct page *page,
void *object, unsigned long addr) void *object, unsigned long addr)
...@@ -1330,6 +1347,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size, ...@@ -1330,6 +1347,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
#else /* !CONFIG_SLUB_DEBUG */ #else /* !CONFIG_SLUB_DEBUG */
static inline void setup_object_debug(struct kmem_cache *s, static inline void setup_object_debug(struct kmem_cache *s,
struct page *page, void *object) {} struct page *page, void *object) {}
static inline void setup_page_debug(struct kmem_cache *s,
void *addr, int order) {}
static inline int alloc_debug_processing(struct kmem_cache *s, static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr) { return 0; } struct page *page, void *object, unsigned long addr) { return 0; }
...@@ -1374,8 +1393,10 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, ...@@ -1374,8 +1393,10 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
*/ */
static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{ {
ptr = kasan_kmalloc_large(ptr, size, flags);
/* As ptr might get tagged, call kmemleak hook after KASAN. */
kmemleak_alloc(ptr, size, 1, flags); kmemleak_alloc(ptr, size, 1, flags);
return kasan_kmalloc_large(ptr, size, flags); return ptr;
} }
static __always_inline void kfree_hook(void *x) static __always_inline void kfree_hook(void *x)
...@@ -1641,27 +1662,25 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1641,27 +1662,25 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (page_is_pfmemalloc(page)) if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page); SetPageSlabPfmemalloc(page);
kasan_poison_slab(page);
start = page_address(page); start = page_address(page);
if (unlikely(s->flags & SLAB_POISON)) setup_page_debug(s, start, order);
memset(start, POISON_INUSE, PAGE_SIZE << order);
kasan_poison_slab(page);
shuffle = shuffle_freelist(s, page); shuffle = shuffle_freelist(s, page);
if (!shuffle) { if (!shuffle) {
for_each_object_idx(p, idx, s, start, page->objects) {
if (likely(idx < page->objects)) {
next = p + s->size;
next = setup_object(s, page, next);
set_freepointer(s, p, next);
} else
set_freepointer(s, p, NULL);
}
start = fixup_red_left(s, start); start = fixup_red_left(s, start);
start = setup_object(s, page, start); start = setup_object(s, page, start);
page->freelist = start; page->freelist = start;
for (idx = 0, p = start; idx < page->objects - 1; idx++) {
next = p + s->size;
next = setup_object(s, page, next);
set_freepointer(s, p, next);
p = next;
}
set_freepointer(s, p, NULL);
} }
page->inuse = page->objects; page->inuse = page->objects;
......
...@@ -320,11 +320,6 @@ static inline void activate_page_drain(int cpu) ...@@ -320,11 +320,6 @@ static inline void activate_page_drain(int cpu)
{ {
} }
static bool need_activate_page_drain(int cpu)
{
return false;
}
void activate_page(struct page *page) void activate_page(struct page *page)
{ {
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
...@@ -653,13 +648,15 @@ void lru_add_drain(void) ...@@ -653,13 +648,15 @@ void lru_add_drain(void)
put_cpu(); put_cpu();
} }
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
static void lru_add_drain_per_cpu(struct work_struct *dummy) static void lru_add_drain_per_cpu(struct work_struct *dummy)
{ {
lru_add_drain(); lru_add_drain();
} }
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
/* /*
* Doesn't need any cpu hotplug locking because we do rely on per-cpu * Doesn't need any cpu hotplug locking because we do rely on per-cpu
* kworkers being shut down before our page_alloc_cpu_dead callback is * kworkers being shut down before our page_alloc_cpu_dead callback is
...@@ -702,6 +699,12 @@ void lru_add_drain_all(void) ...@@ -702,6 +699,12 @@ void lru_add_drain_all(void)
mutex_unlock(&lock); mutex_unlock(&lock);
} }
#else
void lru_add_drain_all(void)
{
lru_add_drain();
}
#endif
/** /**
* release_pages - batched put_page() * release_pages - batched put_page()
......
...@@ -150,7 +150,7 @@ void *memdup_user(const void __user *src, size_t len) ...@@ -150,7 +150,7 @@ void *memdup_user(const void __user *src, size_t len)
{ {
void *p; void *p;
p = kmalloc_track_caller(len, GFP_USER); p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
if (!p) if (!p)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment