Commit 59250f8a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "14 patches.

  Subsystems affected by this patch series: MAINTAINERS, binfmt, and
  mm (tmpfs, secretmem, kasan, kfence, pagealloc, zram, compaction,
  hugetlb, vmalloc, and kmemleak)"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm: kmemleak: take a full lowmem check in kmemleak_*_phys()
  mm/vmalloc: fix spinning drain_vmap_work after reading from /proc/vmcore
  revert "fs/binfmt_elf: use PT_LOAD p_align values for static PIE"
  revert "fs/binfmt_elf: fix PT_LOAD p_align values for loaders"
  hugetlb: do not demote poisoned hugetlb pages
  mm: compaction: fix compiler warning when CONFIG_COMPACTION=n
  mm: fix unexpected zeroed page mapping with zram swap
  mm, page_alloc: fix build_zonerefs_node()
  mm, kfence: support kmem_dump_obj() for KFENCE objects
  kasan: fix hw tags enablement when KUNIT tests are disabled
  irq_work: use kasan_record_aux_stack_noalloc() record callstack
  mm/secretmem: fix panic when growing a memfd_secret
  tmpfs: fix regressions from wider use of ZERO_PAGE
  MAINTAINERS: Broadcom internal lists aren't maintainers
parents ce673f63 23c2d497
This diff is collapsed.
...@@ -210,8 +210,6 @@ void __iomem *ioremap(resource_size_t offset, unsigned long size); ...@@ -210,8 +210,6 @@ void __iomem *ioremap(resource_size_t offset, unsigned long size);
extern void iounmap(volatile void __iomem *addr); extern void iounmap(volatile void __iomem *addr);
#define iounmap iounmap #define iounmap iounmap
extern void set_iounmap_nonlazy(void);
#ifdef __KERNEL__ #ifdef __KERNEL__
void memcpy_fromio(void *, const volatile void __iomem *, size_t); void memcpy_fromio(void *, const volatile void __iomem *, size_t);
......
...@@ -37,7 +37,6 @@ static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, ...@@ -37,7 +37,6 @@ static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
} else } else
memcpy(buf, vaddr + offset, csize); memcpy(buf, vaddr + offset, csize);
set_iounmap_nonlazy();
iounmap((void __iomem *)vaddr); iounmap((void __iomem *)vaddr);
return csize; return csize;
} }
......
...@@ -1117,11 +1117,11 @@ static int load_elf_binary(struct linux_binprm *bprm) ...@@ -1117,11 +1117,11 @@ static int load_elf_binary(struct linux_binprm *bprm)
* independently randomized mmap region (0 load_bias * independently randomized mmap region (0 load_bias
* without MAP_FIXED nor MAP_FIXED_NOREPLACE). * without MAP_FIXED nor MAP_FIXED_NOREPLACE).
*/ */
alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum); if (interpreter) {
if (interpreter || alignment > ELF_MIN_ALIGN) {
load_bias = ELF_ET_DYN_BASE; load_bias = ELF_ET_DYN_BASE;
if (current->flags & PF_RANDOMIZE) if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd(); load_bias += arch_mmap_rnd();
alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
if (alignment) if (alignment)
load_bias &= ~(alignment - 1); load_bias &= ~(alignment - 1);
elf_flags |= MAP_FIXED_NOREPLACE; elf_flags |= MAP_FIXED_NOREPLACE;
......
...@@ -204,6 +204,22 @@ static __always_inline __must_check bool kfence_free(void *addr) ...@@ -204,6 +204,22 @@ static __always_inline __must_check bool kfence_free(void *addr)
*/ */
bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs); bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
#ifdef CONFIG_PRINTK
struct kmem_obj_info;
/**
* __kfence_obj_info() - fill kmem_obj_info struct
* @kpp: kmem_obj_info to be filled
* @object: the object
*
* Return:
* * false - not a KFENCE object
* * true - a KFENCE object, filled @kpp
*
* Copies information to @kpp for KFENCE objects.
*/
bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
#endif
#else /* CONFIG_KFENCE */ #else /* CONFIG_KFENCE */
static inline bool is_kfence_address(const void *addr) { return false; } static inline bool is_kfence_address(const void *addr) { return false; }
...@@ -221,6 +237,14 @@ static inline bool __must_check kfence_handle_page_fault(unsigned long addr, boo ...@@ -221,6 +237,14 @@ static inline bool __must_check kfence_handle_page_fault(unsigned long addr, boo
return false; return false;
} }
#ifdef CONFIG_PRINTK
struct kmem_obj_info;
static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{
return false;
}
#endif
#endif #endif
#endif /* _LINUX_KFENCE_H */ #endif /* _LINUX_KFENCE_H */
...@@ -137,7 +137,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) ...@@ -137,7 +137,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
if (!irq_work_claim(work)) if (!irq_work_claim(work))
return false; return false;
kasan_record_aux_stack(work); kasan_record_aux_stack_noalloc(work);
preempt_disable(); preempt_disable();
if (cpu != smp_processor_id()) { if (cpu != smp_processor_id()) {
......
...@@ -26,6 +26,11 @@ ...@@ -26,6 +26,11 @@
#include "internal.h" #include "internal.h"
#ifdef CONFIG_COMPACTION #ifdef CONFIG_COMPACTION
/*
* Fragmentation score check interval for proactive compaction purposes.
*/
#define HPAGE_FRAG_CHECK_INTERVAL_MSEC (500)
static inline void count_compact_event(enum vm_event_item item) static inline void count_compact_event(enum vm_event_item item)
{ {
count_vm_event(item); count_vm_event(item);
...@@ -50,11 +55,6 @@ static inline void count_compact_events(enum vm_event_item item, long delta) ...@@ -50,11 +55,6 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
/*
* Fragmentation score check interval for proactive compaction purposes.
*/
static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
/* /*
* Page order with-respect-to which proactive compaction * Page order with-respect-to which proactive compaction
* calculates external fragmentation, which is used as * calculates external fragmentation, which is used as
......
...@@ -1063,12 +1063,6 @@ void __init pagecache_init(void) ...@@ -1063,12 +1063,6 @@ void __init pagecache_init(void)
init_waitqueue_head(&folio_wait_table[i]); init_waitqueue_head(&folio_wait_table[i]);
page_writeback_init(); page_writeback_init();
/*
* tmpfs uses the ZERO_PAGE for reading holes: it is up-to-date,
* and splice's page_cache_pipe_buf_confirm() needs to see that.
*/
SetPageUptodate(ZERO_PAGE(0));
} }
/* /*
......
...@@ -3475,7 +3475,6 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) ...@@ -3475,7 +3475,6 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
{ {
int nr_nodes, node; int nr_nodes, node;
struct page *page; struct page *page;
int rc = 0;
lockdep_assert_held(&hugetlb_lock); lockdep_assert_held(&hugetlb_lock);
...@@ -3486,15 +3485,19 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) ...@@ -3486,15 +3485,19 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
} }
for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
if (!list_empty(&h->hugepage_freelists[node])) { list_for_each_entry(page, &h->hugepage_freelists[node], lru) {
page = list_entry(h->hugepage_freelists[node].next, if (PageHWPoison(page))
struct page, lru); continue;
rc = demote_free_huge_page(h, page);
break; return demote_free_huge_page(h, page);
} }
} }
return rc; /*
* Only way to get here is if all pages on free lists are poisoned.
* Return -EBUSY so that caller will not retry.
*/
return -EBUSY;
} }
#define HSTATE_ATTR_RO(_name) \ #define HSTATE_ATTR_RO(_name) \
......
...@@ -336,8 +336,6 @@ void __kasan_poison_vmalloc(const void *start, unsigned long size) ...@@ -336,8 +336,6 @@ void __kasan_poison_vmalloc(const void *start, unsigned long size)
#endif #endif
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
void kasan_enable_tagging(void) void kasan_enable_tagging(void)
{ {
if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC) if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
...@@ -347,6 +345,9 @@ void kasan_enable_tagging(void) ...@@ -347,6 +345,9 @@ void kasan_enable_tagging(void)
else else
hw_enable_tagging_sync(); hw_enable_tagging_sync();
} }
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
EXPORT_SYMBOL_GPL(kasan_enable_tagging); EXPORT_SYMBOL_GPL(kasan_enable_tagging);
void kasan_force_async_fault(void) void kasan_force_async_fault(void)
......
...@@ -355,25 +355,27 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag) ...@@ -355,25 +355,27 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#define hw_set_mem_tag_range(addr, size, tag, init) \ #define hw_set_mem_tag_range(addr, size, tag, init) \
arch_set_mem_tag_range((addr), (size), (tag), (init)) arch_set_mem_tag_range((addr), (size), (tag), (init))
void kasan_enable_tagging(void);
#else /* CONFIG_KASAN_HW_TAGS */ #else /* CONFIG_KASAN_HW_TAGS */
#define hw_enable_tagging_sync() #define hw_enable_tagging_sync()
#define hw_enable_tagging_async() #define hw_enable_tagging_async()
#define hw_enable_tagging_asymm() #define hw_enable_tagging_asymm()
static inline void kasan_enable_tagging(void) { }
#endif /* CONFIG_KASAN_HW_TAGS */ #endif /* CONFIG_KASAN_HW_TAGS */
#if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) #if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
void kasan_enable_tagging(void);
void kasan_force_async_fault(void); void kasan_force_async_fault(void);
#else /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */ #else /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
static inline void kasan_enable_tagging(void) { }
static inline void kasan_force_async_fault(void) { } static inline void kasan_force_async_fault(void) { }
#endif /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */ #endif /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
#ifdef CONFIG_KASAN_SW_TAGS #ifdef CONFIG_KASAN_SW_TAGS
u8 kasan_random_tag(void); u8 kasan_random_tag(void);
......
...@@ -231,27 +231,6 @@ static bool kfence_unprotect(unsigned long addr) ...@@ -231,27 +231,6 @@ static bool kfence_unprotect(unsigned long addr)
return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false)); return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
} }
static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
{
long index;
/* The checks do not affect performance; only called from slow-paths. */
if (!is_kfence_address((void *)addr))
return NULL;
/*
* May be an invalid index if called with an address at the edge of
* __kfence_pool, in which case we would report an "invalid access"
* error.
*/
index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
return NULL;
return &kfence_metadata[index];
}
static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
{ {
unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
......
...@@ -96,6 +96,27 @@ struct kfence_metadata { ...@@ -96,6 +96,27 @@ struct kfence_metadata {
extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS]; extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
{
long index;
/* The checks do not affect performance; only called from slow-paths. */
if (!is_kfence_address((void *)addr))
return NULL;
/*
* May be an invalid index if called with an address at the edge of
* __kfence_pool, in which case we would report an "invalid access"
* error.
*/
index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
return NULL;
return &kfence_metadata[index];
}
/* KFENCE error types for report generation. */ /* KFENCE error types for report generation. */
enum kfence_error_type { enum kfence_error_type {
KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */ KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */
......
...@@ -273,3 +273,50 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r ...@@ -273,3 +273,50 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r
/* We encountered a memory safety error, taint the kernel! */ /* We encountered a memory safety error, taint the kernel! */
add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK); add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
} }
#ifdef CONFIG_PRINTK
static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
{
int i, j;
i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
kp_stack[j] = (void *)track->stack_entries[i];
if (j < KS_ADDRS_COUNT)
kp_stack[j] = NULL;
}
bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{
struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
unsigned long flags;
if (!meta)
return false;
/*
* If state is UNUSED at least show the pointer requested; the rest
* would be garbage data.
*/
kpp->kp_ptr = object;
/* Requesting info an a never-used object is almost certainly a bug. */
if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
return true;
raw_spin_lock_irqsave(&meta->lock, flags);
kpp->kp_slab = slab;
kpp->kp_slab_cache = meta->cache;
kpp->kp_objp = (void *)meta->addr;
kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
if (meta->state == KFENCE_OBJECT_FREED)
kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
/* get_stack_skipnr() ensures the first entry is outside allocator. */
kpp->kp_ret = kpp->kp_stack[0];
raw_spin_unlock_irqrestore(&meta->lock, flags);
return true;
}
#endif
...@@ -1132,7 +1132,7 @@ EXPORT_SYMBOL(kmemleak_no_scan); ...@@ -1132,7 +1132,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
gfp_t gfp) gfp_t gfp)
{ {
if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
kmemleak_alloc(__va(phys), size, min_count, gfp); kmemleak_alloc(__va(phys), size, min_count, gfp);
} }
EXPORT_SYMBOL(kmemleak_alloc_phys); EXPORT_SYMBOL(kmemleak_alloc_phys);
...@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys); ...@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
*/ */
void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
{ {
if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
kmemleak_free_part(__va(phys), size); kmemleak_free_part(__va(phys), size);
} }
EXPORT_SYMBOL(kmemleak_free_part_phys); EXPORT_SYMBOL(kmemleak_free_part_phys);
...@@ -1158,7 +1158,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys); ...@@ -1158,7 +1158,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
*/ */
void __ref kmemleak_not_leak_phys(phys_addr_t phys) void __ref kmemleak_not_leak_phys(phys_addr_t phys)
{ {
if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
kmemleak_not_leak(__va(phys)); kmemleak_not_leak(__va(phys));
} }
EXPORT_SYMBOL(kmemleak_not_leak_phys); EXPORT_SYMBOL(kmemleak_not_leak_phys);
...@@ -1170,7 +1170,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys); ...@@ -1170,7 +1170,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
*/ */
void __ref kmemleak_ignore_phys(phys_addr_t phys) void __ref kmemleak_ignore_phys(phys_addr_t phys)
{ {
if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
kmemleak_ignore(__va(phys)); kmemleak_ignore(__va(phys));
} }
EXPORT_SYMBOL(kmemleak_ignore_phys); EXPORT_SYMBOL(kmemleak_ignore_phys);
......
...@@ -6131,7 +6131,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) ...@@ -6131,7 +6131,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
do { do {
zone_type--; zone_type--;
zone = pgdat->node_zones + zone_type; zone = pgdat->node_zones + zone_type;
if (managed_zone(zone)) { if (populated_zone(zone)) {
zoneref_set_zone(zone, &zonerefs[nr_zones++]); zoneref_set_zone(zone, &zonerefs[nr_zones++]);
check_highest_zone(zone_type); check_highest_zone(zone_type);
} }
......
...@@ -51,54 +51,6 @@ void end_swap_bio_write(struct bio *bio) ...@@ -51,54 +51,6 @@ void end_swap_bio_write(struct bio *bio)
bio_put(bio); bio_put(bio);
} }
static void swap_slot_free_notify(struct page *page)
{
struct swap_info_struct *sis;
struct gendisk *disk;
swp_entry_t entry;
/*
* There is no guarantee that the page is in swap cache - the software
* suspend code (at least) uses end_swap_bio_read() against a non-
* swapcache page. So we must check PG_swapcache before proceeding with
* this optimization.
*/
if (unlikely(!PageSwapCache(page)))
return;
sis = page_swap_info(page);
if (data_race(!(sis->flags & SWP_BLKDEV)))
return;
/*
* The swap subsystem performs lazy swap slot freeing,
* expecting that the page will be swapped out again.
* So we can avoid an unnecessary write if the page
* isn't redirtied.
* This is good for real swap storage because we can
* reduce unnecessary I/O and enhance wear-leveling
* if an SSD is used as the as swap device.
* But if in-memory swap device (eg zram) is used,
* this causes a duplicated copy between uncompressed
* data in VM-owned memory and compressed data in
* zram-owned memory. So let's free zram-owned memory
* and make the VM-owned decompressed page *dirty*,
* so the page should be swapped out somewhere again if
* we again wish to reclaim it.
*/
disk = sis->bdev->bd_disk;
entry.val = page_private(page);
if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
unsigned long offset;
offset = swp_offset(entry);
SetPageDirty(page);
disk->fops->swap_slot_free_notify(sis->bdev,
offset);
}
}
static void end_swap_bio_read(struct bio *bio) static void end_swap_bio_read(struct bio *bio)
{ {
struct page *page = bio_first_page_all(bio); struct page *page = bio_first_page_all(bio);
...@@ -114,7 +66,6 @@ static void end_swap_bio_read(struct bio *bio) ...@@ -114,7 +66,6 @@ static void end_swap_bio_read(struct bio *bio)
} }
SetPageUptodate(page); SetPageUptodate(page);
swap_slot_free_notify(page);
out: out:
unlock_page(page); unlock_page(page);
WRITE_ONCE(bio->bi_private, NULL); WRITE_ONCE(bio->bi_private, NULL);
...@@ -394,11 +345,6 @@ int swap_readpage(struct page *page, bool synchronous) ...@@ -394,11 +345,6 @@ int swap_readpage(struct page *page, bool synchronous)
if (sis->flags & SWP_SYNCHRONOUS_IO) { if (sis->flags & SWP_SYNCHRONOUS_IO) {
ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
if (!ret) { if (!ret) {
if (trylock_page(page)) {
swap_slot_free_notify(page);
unlock_page(page);
}
count_vm_event(PSWPIN); count_vm_event(PSWPIN);
goto out; goto out;
} }
......
...@@ -158,6 +158,22 @@ const struct address_space_operations secretmem_aops = { ...@@ -158,6 +158,22 @@ const struct address_space_operations secretmem_aops = {
.isolate_page = secretmem_isolate_page, .isolate_page = secretmem_isolate_page,
}; };
static int secretmem_setattr(struct user_namespace *mnt_userns,
struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
unsigned int ia_valid = iattr->ia_valid;
if ((ia_valid & ATTR_SIZE) && inode->i_size)
return -EINVAL;
return simple_setattr(mnt_userns, dentry, iattr);
}
static const struct inode_operations secretmem_iops = {
.setattr = secretmem_setattr,
};
static struct vfsmount *secretmem_mnt; static struct vfsmount *secretmem_mnt;
static struct file *secretmem_file_create(unsigned long flags) static struct file *secretmem_file_create(unsigned long flags)
...@@ -177,6 +193,7 @@ static struct file *secretmem_file_create(unsigned long flags) ...@@ -177,6 +193,7 @@ static struct file *secretmem_file_create(unsigned long flags)
mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
mapping_set_unevictable(inode->i_mapping); mapping_set_unevictable(inode->i_mapping);
inode->i_op = &secretmem_iops;
inode->i_mapping->a_ops = &secretmem_aops; inode->i_mapping->a_ops = &secretmem_aops;
/* pretend we are a normal file with zero size */ /* pretend we are a normal file with zero size */
......
...@@ -2513,7 +2513,6 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) ...@@ -2513,7 +2513,6 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
pgoff_t end_index; pgoff_t end_index;
unsigned long nr, ret; unsigned long nr, ret;
loff_t i_size = i_size_read(inode); loff_t i_size = i_size_read(inode);
bool got_page;
end_index = i_size >> PAGE_SHIFT; end_index = i_size >> PAGE_SHIFT;
if (index > end_index) if (index > end_index)
...@@ -2570,24 +2569,34 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) ...@@ -2570,24 +2569,34 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
*/ */
if (!offset) if (!offset)
mark_page_accessed(page); mark_page_accessed(page);
got_page = true;
} else {
page = ZERO_PAGE(0);
got_page = false;
}
/* /*
* Ok, we have the page, and it's up-to-date, so * Ok, we have the page, and it's up-to-date, so
* now we can copy it to user space... * now we can copy it to user space...
*/ */
ret = copy_page_to_iter(page, offset, nr, to); ret = copy_page_to_iter(page, offset, nr, to);
put_page(page);
} else if (iter_is_iovec(to)) {
/*
* Copy to user tends to be so well optimized, but
* clear_user() not so much, that it is noticeably
* faster to copy the zero page instead of clearing.
*/
ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
} else {
/*
* But submitting the same page twice in a row to
* splice() - or others? - can result in confusion:
* so don't attempt that optimization on pipes etc.
*/
ret = iov_iter_zero(nr, to);
}
retval += ret; retval += ret;
offset += ret; offset += ret;
index += offset >> PAGE_SHIFT; index += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK; offset &= ~PAGE_MASK;
if (got_page)
put_page(page);
if (!iov_iter_count(to)) if (!iov_iter_count(to))
break; break;
if (ret < nr) { if (ret < nr) {
......
...@@ -3665,7 +3665,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller); ...@@ -3665,7 +3665,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
unsigned int objnr; unsigned int objnr;
......
...@@ -868,7 +868,7 @@ struct kmem_obj_info { ...@@ -868,7 +868,7 @@ struct kmem_obj_info {
void *kp_stack[KS_ADDRS_COUNT]; void *kp_stack[KS_ADDRS_COUNT];
void *kp_free_stack[KS_ADDRS_COUNT]; void *kp_free_stack[KS_ADDRS_COUNT];
}; };
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
#endif #endif
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
......
...@@ -555,6 +555,13 @@ bool kmem_valid_obj(void *object) ...@@ -555,6 +555,13 @@ bool kmem_valid_obj(void *object)
} }
EXPORT_SYMBOL_GPL(kmem_valid_obj); EXPORT_SYMBOL_GPL(kmem_valid_obj);
static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{
if (__kfence_obj_info(kpp, object, slab))
return;
__kmem_obj_info(kpp, object, slab);
}
/** /**
* kmem_dump_obj - Print available slab provenance information * kmem_dump_obj - Print available slab provenance information
* @object: slab object for which to find provenance information. * @object: slab object for which to find provenance information.
...@@ -590,6 +597,8 @@ void kmem_dump_obj(void *object) ...@@ -590,6 +597,8 @@ void kmem_dump_obj(void *object)
pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name); pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
else else
pr_cont(" slab%s", cp); pr_cont(" slab%s", cp);
if (is_kfence_address(object))
pr_cont(" (kfence)");
if (kp.kp_objp) if (kp.kp_objp)
pr_cont(" start %px", kp.kp_objp); pr_cont(" start %px", kp.kp_objp);
if (kp.kp_data_offset) if (kp.kp_data_offset)
......
...@@ -463,7 +463,7 @@ static void slob_free(void *block, int size) ...@@ -463,7 +463,7 @@ static void slob_free(void *block, int size)
} }
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{ {
kpp->kp_ptr = object; kpp->kp_ptr = object;
kpp->kp_slab = slab; kpp->kp_slab = slab;
......
...@@ -4312,7 +4312,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s) ...@@ -4312,7 +4312,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
} }
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{ {
void *base; void *base;
int __maybe_unused i; int __maybe_unused i;
......
...@@ -1671,17 +1671,6 @@ static DEFINE_MUTEX(vmap_purge_lock); ...@@ -1671,17 +1671,6 @@ static DEFINE_MUTEX(vmap_purge_lock);
/* for per-CPU blocks */ /* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void); static void purge_fragmented_blocks_allcpus(void);
#ifdef CONFIG_X86_64
/*
* called before a call to iounmap() if the caller wants vm_area_struct's
* immediately freed.
*/
void set_iounmap_nonlazy(void)
{
atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
}
#endif /* CONFIG_X86_64 */
/* /*
* Purges all lazily-freed vmap areas. * Purges all lazily-freed vmap areas.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment