Commit fdb5d6ca authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "12 patches.

  Subsystems affected by this patch series: mm (documentation, kasan,
  and pagemap), csky, ia64, gcov, and lib"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  lib: remove "expecting prototype" kernel-doc warnings
  gcov: clang: fix clang-11+ build
  mm: ptdump: fix build failure
  mm/mapping_dirty_helpers: guard hugepage pud's usage
  ia64: tools: remove duplicate definition of ia64_mf() on ia64
  ia64: tools: remove inclusion of ia64-specific version of errno.h header
  ia64: fix discontig.c section mismatches
  ia64: remove duplicate entries in generic_defconfig
  csky: change a Kconfig symbol name to fix e1000 build error
  kasan: remove redundant config option
  kasan: fix hwasan build for gcc
  mm: eliminate "expecting prototype" kernel-doc warnings
parents 9cdbf646 c95c2d32
...@@ -134,7 +134,7 @@ SYM_FUNC_START(_cpu_resume) ...@@ -134,7 +134,7 @@ SYM_FUNC_START(_cpu_resume)
*/ */
bl cpu_do_resume bl cpu_do_resume
#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
mov x0, sp mov x0, sp
bl kasan_unpoison_task_stack_below bl kasan_unpoison_task_stack_below
#endif #endif
......
...@@ -314,7 +314,7 @@ config FORCE_MAX_ZONEORDER ...@@ -314,7 +314,7 @@ config FORCE_MAX_ZONEORDER
int "Maximum zone order" int "Maximum zone order"
default "11" default "11"
config RAM_BASE config DRAM_BASE
hex "DRAM start addr (the same with memory-section in dts)" hex "DRAM start addr (the same with memory-section in dts)"
default 0x0 default 0x0
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#define SSEG_SIZE 0x20000000 #define SSEG_SIZE 0x20000000
#define LOWMEM_LIMIT (SSEG_SIZE * 2) #define LOWMEM_LIMIT (SSEG_SIZE * 2)
#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1)) #define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1))
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -55,8 +55,6 @@ CONFIG_CHR_DEV_SG=m ...@@ -55,8 +55,6 @@ CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SYM53C8XX_2=y CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_QLOGIC_1280=y CONFIG_SCSI_QLOGIC_1280=y
CONFIG_ATA=y
CONFIG_ATA_PIIX=y
CONFIG_SATA_VITESSE=y CONFIG_SATA_VITESSE=y
CONFIG_MD=y CONFIG_MD=y
CONFIG_BLK_DEV_MD=m CONFIG_BLK_DEV_MD=m
......
...@@ -95,7 +95,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len, ...@@ -95,7 +95,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
* acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
* called yet. Note that node 0 will also count all non-existent cpus. * called yet. Note that node 0 will also count all non-existent cpus.
*/ */
static int __meminit early_nr_cpus_node(int node) static int early_nr_cpus_node(int node)
{ {
int cpu, n = 0; int cpu, n = 0;
...@@ -110,7 +110,7 @@ static int __meminit early_nr_cpus_node(int node) ...@@ -110,7 +110,7 @@ static int __meminit early_nr_cpus_node(int node)
* compute_pernodesize - compute size of pernode data * compute_pernodesize - compute size of pernode data
* @node: the node id. * @node: the node id.
*/ */
static unsigned long __meminit compute_pernodesize(int node) static unsigned long compute_pernodesize(int node)
{ {
unsigned long pernodesize = 0, cpus; unsigned long pernodesize = 0, cpus;
...@@ -367,7 +367,7 @@ static void __init reserve_pernode_space(void) ...@@ -367,7 +367,7 @@ static void __init reserve_pernode_space(void)
} }
} }
static void __meminit scatter_node_data(void) static void scatter_node_data(void)
{ {
pg_data_t **dst; pg_data_t **dst;
int node; int node;
......
...@@ -115,7 +115,7 @@ SYM_FUNC_START(do_suspend_lowlevel) ...@@ -115,7 +115,7 @@ SYM_FUNC_START(do_suspend_lowlevel)
movq pt_regs_r14(%rax), %r14 movq pt_regs_r14(%rax), %r14
movq pt_regs_r15(%rax), %r15 movq pt_regs_r15(%rax), %r15
#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
/* /*
* The suspend path may have poisoned some areas deeper in the stack, * The suspend path may have poisoned some areas deeper in the stack,
* which we now need to unpoison. * which we now need to unpoison.
......
...@@ -330,7 +330,7 @@ static inline bool kasan_check_byte(const void *address) ...@@ -330,7 +330,7 @@ static inline bool kasan_check_byte(const void *address)
#endif /* CONFIG_KASAN */ #endif /* CONFIG_KASAN */
#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
void kasan_unpoison_task_stack(struct task_struct *task); void kasan_unpoison_task_stack(struct task_struct *task);
#else #else
static inline void kasan_unpoison_task_stack(struct task_struct *task) {} static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
......
...@@ -369,7 +369,7 @@ static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn) ...@@ -369,7 +369,7 @@ static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
INIT_LIST_HEAD(&fn_dup->head); INIT_LIST_HEAD(&fn_dup->head);
cv_size = fn->num_counters * sizeof(fn->counters[0]); cv_size = fn->num_counters * sizeof(fn->counters[0]);
fn_dup->counters = vmalloc(cv_size); fn_dup->counters = kvmalloc(cv_size, GFP_KERNEL);
if (!fn_dup->counters) { if (!fn_dup->counters) {
kfree(fn_dup); kfree(fn_dup);
return NULL; return NULL;
......
...@@ -138,9 +138,10 @@ config KASAN_INLINE ...@@ -138,9 +138,10 @@ config KASAN_INLINE
endchoice endchoice
config KASAN_STACK_ENABLE config KASAN_STACK
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
depends on KASAN_GENERIC || KASAN_SW_TAGS depends on KASAN_GENERIC || KASAN_SW_TAGS
default y if CC_IS_GCC
help help
The LLVM stack address sanitizer has a know problem that The LLVM stack address sanitizer has a know problem that
causes excessive stack usage in a lot of functions, see causes excessive stack usage in a lot of functions, see
...@@ -154,12 +155,6 @@ config KASAN_STACK_ENABLE ...@@ -154,12 +155,6 @@ config KASAN_STACK_ENABLE
CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe
to use and enabled by default. to use and enabled by default.
config KASAN_STACK
int
depends on KASAN_GENERIC || KASAN_SW_TAGS
default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
default 0
config KASAN_SW_TAGS_IDENTIFY config KASAN_SW_TAGS_IDENTIFY
bool "Enable memory corruption identification" bool "Enable memory corruption identification"
depends on KASAN_SW_TAGS depends on KASAN_SW_TAGS
......
...@@ -40,7 +40,7 @@ enum cpio_fields { ...@@ -40,7 +40,7 @@ enum cpio_fields {
}; };
/** /**
* cpio_data find_cpio_data - Search for files in an uncompressed cpio * find_cpio_data - Search for files in an uncompressed cpio
* @path: The directory to search for, including a slash at the end * @path: The directory to search for, including a slash at the end
* @data: Pointer to the cpio archive or a header inside * @data: Pointer to the cpio archive or a header inside
* @len: Remaining length of the cpio based on data pointer * @len: Remaining length of the cpio based on data pointer
...@@ -49,7 +49,7 @@ enum cpio_fields { ...@@ -49,7 +49,7 @@ enum cpio_fields {
* matching file itself. It can be used to iterate through the cpio * matching file itself. It can be used to iterate through the cpio
* to find all files inside of a directory path. * to find all files inside of a directory path.
* *
* @return: struct cpio_data containing the address, length and * Return: &struct cpio_data containing the address, length and
* filename (with the directory path cut off) of the found file. * filename (with the directory path cut off) of the found file.
* If you search for a filename and not for files in a directory, * If you search for a filename and not for files in a directory,
* pass the absolute path of the filename in the cpio and make sure * pass the absolute path of the filename in the cpio and make sure
......
...@@ -76,6 +76,7 @@ int lc_try_lock(struct lru_cache *lc) ...@@ -76,6 +76,7 @@ int lc_try_lock(struct lru_cache *lc)
/** /**
* lc_create - prepares to track objects in an active set * lc_create - prepares to track objects in an active set
* @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
* @cache: cache root pointer
* @max_pending_changes: maximum changes to accumulate until a transaction is required * @max_pending_changes: maximum changes to accumulate until a transaction is required
* @e_count: number of elements allowed to be active simultaneously * @e_count: number of elements allowed to be active simultaneously
* @e_size: size of the tracked objects * @e_size: size of the tracked objects
...@@ -627,7 +628,7 @@ void lc_set(struct lru_cache *lc, unsigned int enr, int index) ...@@ -627,7 +628,7 @@ void lc_set(struct lru_cache *lc, unsigned int enr, int index)
} }
/** /**
* lc_dump - Dump a complete LRU cache to seq in textual form. * lc_seq_dump_details - Dump a complete LRU cache to seq in textual form.
* @lc: the lru cache to operate on * @lc: the lru cache to operate on
* @seq: the &struct seq_file pointer to seq_printf into * @seq: the &struct seq_file pointer to seq_printf into
* @utext: user supplied additional "heading" or other info * @utext: user supplied additional "heading" or other info
......
...@@ -297,7 +297,7 @@ EXPORT_SYMBOL(parman_destroy); ...@@ -297,7 +297,7 @@ EXPORT_SYMBOL(parman_destroy);
* parman_prio_init - initializes a parman priority chunk * parman_prio_init - initializes a parman priority chunk
* @parman: parman instance * @parman: parman instance
* @prio: parman prio structure to be initialized * @prio: parman prio structure to be initialized
* @prority: desired priority of the chunk * @priority: desired priority of the chunk
* *
* Note: all locking must be provided by the caller. * Note: all locking must be provided by the caller.
* *
...@@ -356,7 +356,7 @@ int parman_item_add(struct parman *parman, struct parman_prio *prio, ...@@ -356,7 +356,7 @@ int parman_item_add(struct parman *parman, struct parman_prio *prio,
EXPORT_SYMBOL(parman_item_add); EXPORT_SYMBOL(parman_item_add);
/** /**
* parman_item_del - deletes parman item * parman_item_remove - deletes parman item
* @parman: parman instance * @parman: parman instance
* @prio: parman prio instance to delete the item from * @prio: parman prio instance to delete the item from
* @item: parman item instance * @item: parman item instance
......
...@@ -166,9 +166,9 @@ static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag) ...@@ -166,9 +166,9 @@ static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
/** /**
* radix_tree_find_next_bit - find the next set bit in a memory region * radix_tree_find_next_bit - find the next set bit in a memory region
* *
* @addr: The address to base the search on * @node: where to begin the search
* @size: The bitmap size in bits * @tag: the tag index
* @offset: The bitnumber to start searching at * @offset: the bitnumber to start searching at
* *
* Unrollable variant of find_next_bit() for constant size arrays. * Unrollable variant of find_next_bit() for constant size arrays.
* Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
...@@ -461,7 +461,7 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, ...@@ -461,7 +461,7 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
/** /**
* radix_tree_shrink - shrink radix tree to minimum height * radix_tree_shrink - shrink radix tree to minimum height
* @root radix tree root * @root: radix tree root
*/ */
static inline bool radix_tree_shrink(struct radix_tree_root *root) static inline bool radix_tree_shrink(struct radix_tree_root *root)
{ {
...@@ -691,7 +691,7 @@ static inline int insert_entries(struct radix_tree_node *node, ...@@ -691,7 +691,7 @@ static inline int insert_entries(struct radix_tree_node *node,
} }
/** /**
* __radix_tree_insert - insert into a radix tree * radix_tree_insert - insert into a radix tree
* @root: radix tree root * @root: radix tree root
* @index: index key * @index: index key
* @item: item to insert * @item: item to insert
...@@ -919,6 +919,7 @@ EXPORT_SYMBOL(radix_tree_replace_slot); ...@@ -919,6 +919,7 @@ EXPORT_SYMBOL(radix_tree_replace_slot);
/** /**
* radix_tree_iter_replace - replace item in a slot * radix_tree_iter_replace - replace item in a slot
* @root: radix tree root * @root: radix tree root
* @iter: iterator state
* @slot: pointer to slot * @slot: pointer to slot
* @item: new item to store in the slot. * @item: new item to store in the slot.
* *
......
...@@ -63,7 +63,7 @@ void __kasan_unpoison_range(const void *address, size_t size) ...@@ -63,7 +63,7 @@ void __kasan_unpoison_range(const void *address, size_t size)
kasan_unpoison(address, size); kasan_unpoison(address, size);
} }
#if CONFIG_KASAN_STACK #ifdef CONFIG_KASAN_STACK
/* Unpoison the entire stack for a task. */ /* Unpoison the entire stack for a task. */
void kasan_unpoison_task_stack(struct task_struct *task) void kasan_unpoison_task_stack(struct task_struct *task)
{ {
......
...@@ -231,7 +231,7 @@ void *kasan_find_first_bad_addr(void *addr, size_t size); ...@@ -231,7 +231,7 @@ void *kasan_find_first_bad_addr(void *addr, size_t size);
const char *kasan_get_bug_type(struct kasan_access_info *info); const char *kasan_get_bug_type(struct kasan_access_info *info);
void kasan_metadata_fetch_row(char *buffer, void *row); void kasan_metadata_fetch_row(char *buffer, void *row);
#if defined(CONFIG_KASAN_GENERIC) && CONFIG_KASAN_STACK #if defined(CONFIG_KASAN_GENERIC) && defined(CONFIG_KASAN_STACK)
void kasan_print_address_stack_frame(const void *addr); void kasan_print_address_stack_frame(const void *addr);
#else #else
static inline void kasan_print_address_stack_frame(const void *addr) { } static inline void kasan_print_address_stack_frame(const void *addr) { }
......
...@@ -128,7 +128,7 @@ void kasan_metadata_fetch_row(char *buffer, void *row) ...@@ -128,7 +128,7 @@ void kasan_metadata_fetch_row(char *buffer, void *row)
memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW); memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW);
} }
#if CONFIG_KASAN_STACK #ifdef CONFIG_KASAN_STACK
static bool __must_check tokenize_frame_descr(const char **frame_descr, static bool __must_check tokenize_frame_descr(const char **frame_descr,
char *token, size_t max_tok_len, char *token, size_t max_tok_len,
unsigned long *value) unsigned long *value)
......
...@@ -165,10 +165,12 @@ static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end, ...@@ -165,10 +165,12 @@ static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end,
return 0; return 0;
} }
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
/* Huge pud */ /* Huge pud */
walk->action = ACTION_CONTINUE; walk->action = ACTION_CONTINUE;
if (pud_trans_huge(pudval) || pud_devmap(pudval)) if (pud_trans_huge(pudval) || pud_devmap(pudval))
WARN_ON(pud_write(pudval) || pud_dirty(pudval)); WARN_ON(pud_write(pudval) || pud_dirty(pudval));
#endif
return 0; return 0;
} }
......
...@@ -249,16 +249,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb) ...@@ -249,16 +249,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
tlb_flush_mmu_free(tlb); tlb_flush_mmu_free(tlb);
} }
/**
* tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
* @tlb: the mmu_gather structure to initialize
* @mm: the mm_struct of the target address space
* @fullmm: @mm is without users and we're going to destroy the full address
* space (exit/execve)
*
* Called to initialize an (on-stack) mmu_gather structure for page-table
* tear-down from @mm.
*/
static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
bool fullmm) bool fullmm)
{ {
...@@ -283,11 +273,30 @@ static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, ...@@ -283,11 +273,30 @@ static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
inc_tlb_flush_pending(tlb->mm); inc_tlb_flush_pending(tlb->mm);
} }
/**
* tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
* @tlb: the mmu_gather structure to initialize
* @mm: the mm_struct of the target address space
*
* Called to initialize an (on-stack) mmu_gather structure for page-table
* tear-down from @mm.
*/
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm) void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
{ {
__tlb_gather_mmu(tlb, mm, false); __tlb_gather_mmu(tlb, mm, false);
} }
/**
* tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down
* @tlb: the mmu_gather structure to initialize
* @mm: the mm_struct of the target address space
*
* In this case, @mm is without users and we're going to destroy the
* full address space (exit/execve).
*
* Called to initialize an (on-stack) mmu_gather structure for page-table
* tear-down from @mm.
*/
void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm) void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm)
{ {
__tlb_gather_mmu(tlb, mm, true); __tlb_gather_mmu(tlb, mm, true);
......
...@@ -170,7 +170,7 @@ static bool oom_unkillable_task(struct task_struct *p) ...@@ -170,7 +170,7 @@ static bool oom_unkillable_task(struct task_struct *p)
return false; return false;
} }
/** /*
* Check whether unreclaimable slab amount is greater than * Check whether unreclaimable slab amount is greater than
* all user memory(LRU pages). * all user memory(LRU pages).
* dump_unreclaimable_slab() could help in the case that * dump_unreclaimable_slab() could help in the case that
......
...@@ -111,7 +111,7 @@ static int ptdump_pte_entry(pte_t *pte, unsigned long addr, ...@@ -111,7 +111,7 @@ static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk) unsigned long next, struct mm_walk *walk)
{ {
struct ptdump_state *st = walk->private; struct ptdump_state *st = walk->private;
pte_t val = READ_ONCE(*pte); pte_t val = ptep_get(pte);
if (st->effective_prot) if (st->effective_prot)
st->effective_prot(st, 4, pte_val(val)); st->effective_prot(st, 4, pte_val(val));
......
...@@ -147,8 +147,8 @@ void __meminit __shuffle_zone(struct zone *z) ...@@ -147,8 +147,8 @@ void __meminit __shuffle_zone(struct zone *z)
spin_unlock_irqrestore(&z->lock, flags); spin_unlock_irqrestore(&z->lock, flags);
} }
/** /*
* shuffle_free_memory - reduce the predictability of the page allocator * __shuffle_free_memory - reduce the predictability of the page allocator
* @pgdat: node page data * @pgdat: node page data
*/ */
void __meminit __shuffle_free_memory(pg_data_t *pgdat) void __meminit __shuffle_free_memory(pg_data_t *pgdat)
......
...@@ -2,6 +2,14 @@ ...@@ -2,6 +2,14 @@
CFLAGS_KASAN_NOSANITIZE := -fno-builtin CFLAGS_KASAN_NOSANITIZE := -fno-builtin
KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET) KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
ifdef CONFIG_KASAN_STACK
stack_enable := 1
else
stack_enable := 0
endif
ifdef CONFIG_KASAN_GENERIC ifdef CONFIG_KASAN_GENERIC
ifdef CONFIG_KASAN_INLINE ifdef CONFIG_KASAN_INLINE
...@@ -12,8 +20,6 @@ endif ...@@ -12,8 +20,6 @@ endif
CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
# -fasan-shadow-offset fails without -fsanitize # -fasan-shadow-offset fails without -fsanitize
CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \ CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \
-fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \ -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \
...@@ -27,7 +33,7 @@ else ...@@ -27,7 +33,7 @@ else
CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \ CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
$(call cc-param,asan-globals=1) \ $(call cc-param,asan-globals=1) \
$(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \ $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
$(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \ $(call cc-param,asan-stack=$(stack_enable)) \
$(call cc-param,asan-instrument-allocas=1) $(call cc-param,asan-instrument-allocas=1)
endif endif
...@@ -36,14 +42,14 @@ endif # CONFIG_KASAN_GENERIC ...@@ -36,14 +42,14 @@ endif # CONFIG_KASAN_GENERIC
ifdef CONFIG_KASAN_SW_TAGS ifdef CONFIG_KASAN_SW_TAGS
ifdef CONFIG_KASAN_INLINE ifdef CONFIG_KASAN_INLINE
instrumentation_flags := -mllvm -hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET) instrumentation_flags := $(call cc-param,hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET))
else else
instrumentation_flags := -mllvm -hwasan-instrument-with-calls=1 instrumentation_flags := $(call cc-param,hwasan-instrument-with-calls=1)
endif endif
CFLAGS_KASAN := -fsanitize=kernel-hwaddress \ CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
-mllvm -hwasan-instrument-stack=$(CONFIG_KASAN_STACK) \ $(call cc-param,hwasan-instrument-stack=$(stack_enable)) \
-mllvm -hwasan-use-short-granules=0 \ $(call cc-param,hwasan-use-short-granules=0) \
$(instrumentation_flags) $(instrumentation_flags)
endif # CONFIG_KASAN_SW_TAGS endif # CONFIG_KASAN_SW_TAGS
......
...@@ -64,7 +64,7 @@ choice ...@@ -64,7 +64,7 @@ choice
config GCC_PLUGIN_STRUCTLEAK_BYREF config GCC_PLUGIN_STRUCTLEAK_BYREF
bool "zero-init structs passed by reference (strong)" bool "zero-init structs passed by reference (strong)"
depends on GCC_PLUGINS depends on GCC_PLUGINS
depends on !(KASAN && KASAN_STACK=1) depends on !(KASAN && KASAN_STACK)
select GCC_PLUGIN_STRUCTLEAK select GCC_PLUGIN_STRUCTLEAK
help help
Zero-initialize any structures on the stack that may Zero-initialize any structures on the stack that may
...@@ -82,7 +82,7 @@ choice ...@@ -82,7 +82,7 @@ choice
config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
bool "zero-init anything passed by reference (very strong)" bool "zero-init anything passed by reference (very strong)"
depends on GCC_PLUGINS depends on GCC_PLUGINS
depends on !(KASAN && KASAN_STACK=1) depends on !(KASAN && KASAN_STACK)
select GCC_PLUGIN_STRUCTLEAK select GCC_PLUGIN_STRUCTLEAK
help help
Zero-initialize any stack variables that may be passed Zero-initialize any stack variables that may be passed
......
...@@ -39,9 +39,6 @@ ...@@ -39,9 +39,6 @@
* sequential memory pages only. * sequential memory pages only.
*/ */
/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */
#define ia64_mf() asm volatile ("mf" ::: "memory")
#define mb() ia64_mf() #define mb() ia64_mf()
#define rmb() mb() #define rmb() mb()
#define wmb() mb() #define wmb() mb()
......
...@@ -9,8 +9,6 @@ ...@@ -9,8 +9,6 @@
#include "../../../arch/alpha/include/uapi/asm/errno.h" #include "../../../arch/alpha/include/uapi/asm/errno.h"
#elif defined(__mips__) #elif defined(__mips__)
#include "../../../arch/mips/include/uapi/asm/errno.h" #include "../../../arch/mips/include/uapi/asm/errno.h"
#elif defined(__ia64__)
#include "../../../arch/ia64/include/uapi/asm/errno.h"
#elif defined(__xtensa__) #elif defined(__xtensa__)
#include "../../../arch/xtensa/include/uapi/asm/errno.h" #include "../../../arch/xtensa/include/uapi/asm/errno.h"
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment