Commit 33def849 authored by Joe Perches's avatar Joe Perches Committed by Linus Torvalds

treewide: Convert macro and uses of __section(foo) to __section("foo")

Use a more generic form for __section that requires quotes to avoid
complications with clang and gcc differences.

Remove the quote operator # from compiler_attributes.h __section macro.

Convert all unquoted __section(foo) uses to quoted __section("foo").
Also convert __attribute__((section("foo"))) uses to __section("foo")
even if the __attribute__ has multiple list entry forms.

Conversion done using the script at:

    https://lore.kernel.org/lkml/75393e5ddc272dc7403de74d645e6c6e0f4e70eb.camel@perches.com/2-convert_section.plSigned-off-by: default avatarJoe Perches <joe@perches.com>
Reviewed-by: default avatarNick Desaulniers <ndesaulniers@gooogle.com>
Reviewed-by: default avatarMiguel Ojeda <ojeda@kernel.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 986b9eac
......@@ -64,15 +64,15 @@
#else /* !__ASSEMBLY__ */
#ifdef CONFIG_ARC_HAS_ICCM
#define __arcfp_code __section(.text.arcfp)
#define __arcfp_code __section(".text.arcfp")
#else
#define __arcfp_code __section(.text)
#define __arcfp_code __section(".text")
#endif
#ifdef CONFIG_ARC_HAS_DCCM
#define __arcfp_data __section(.data.arcfp)
#define __arcfp_data __section(".data.arcfp")
#else
#define __arcfp_data __section(.data)
#define __arcfp_data __section(".data")
#endif
#endif /* __ASSEMBLY__ */
......
......@@ -53,7 +53,7 @@ extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
*/
#define MACHINE_START(_type, _name) \
static const struct machine_desc __mach_desc_##_type \
__used __section(.arch.info.init) = { \
__used __section(".arch.info.init") = { \
.name = _name,
#define MACHINE_END \
......
......@@ -13,7 +13,7 @@
#include <asm/io.h>
#include <asm/mach_desc.h>
int arc_hsdk_axi_dmac_coherent __section(.data) = 0;
int arc_hsdk_axi_dmac_coherent __section(".data") = 0;
#define ARC_CCM_UNUSED_ADDR 0x60000000
......
......@@ -24,6 +24,6 @@
#define ARCH_SLAB_MINALIGN 8
#endif
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#define __read_mostly __section(".data..read_mostly")
#endif
......@@ -42,7 +42,7 @@ struct of_cpuidle_method {
#define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \
static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
__used __section(__cpuidle_method_of_table) \
__used __section("__cpuidle_method_of_table") \
= { .method = _method, .ops = _ops }
extern int arm_cpuidle_suspend(int index);
......
......@@ -6,7 +6,7 @@
#include <linux/pgtable.h>
/* Tag a function as requiring to be executed via an identity mapping. */
#define __idmap __section(.idmap.text) noinline notrace
#define __idmap __section(".idmap.text") noinline notrace
extern pgd_t *idmap_pgd;
......
......@@ -81,7 +81,7 @@ extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
#define MACHINE_START(_type,_name) \
static const struct machine_desc __mach_desc_##_type \
__used \
__attribute__((__section__(".arch.info.init"))) = { \
__section(".arch.info.init") = { \
.nr = MACH_TYPE_##_type, \
.name = _name,
......@@ -91,7 +91,7 @@ static const struct machine_desc __mach_desc_##_type \
#define DT_MACHINE_START(_name, _namestr) \
static const struct machine_desc __mach_desc_##_name \
__used \
__attribute__((__section__(".arch.info.init"))) = { \
__section(".arch.info.init") = { \
.nr = ~0, \
.name = _namestr,
......
......@@ -14,7 +14,7 @@
#include <uapi/asm/setup.h>
#define __tag __used __attribute__((__section__(".taglist.init")))
#define __tag __used __section(".taglist.init")
#define __tagtable(tag, fn) \
static const struct tagtable __tagtable_##fn __tag = { tag, fn }
......
......@@ -112,7 +112,7 @@ struct of_cpu_method {
#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
static const struct of_cpu_method __cpu_method_of_table_##name \
__used __section(__cpu_method_of_table) \
__used __section("__cpu_method_of_table") \
= { .method = _method, .ops = _ops }
/*
* set platform specific SMP operations
......
......@@ -16,13 +16,13 @@
#include <linux/compiler.h>
/* Tag variables with this */
#define __tcmdata __section(.tcm.data)
#define __tcmdata __section(".tcm.data")
/* Tag constants with this */
#define __tcmconst __section(.tcm.rodata)
#define __tcmconst __section(".tcm.rodata")
/* Tag functions inside TCM called from outside TCM with this */
#define __tcmfunc __attribute__((long_call)) __section(.tcm.text) noinline
#define __tcmfunc __attribute__((long_call)) __section(".tcm.text") noinline
/* Tag function inside TCM called from inside TCM with this */
#define __tcmlocalfunc __section(.tcm.text)
#define __tcmlocalfunc __section(".tcm.text")
void *tcm_alloc(size_t len);
void tcm_free(void *addr, size_t len);
......
......@@ -11,7 +11,7 @@
extern struct of_cpuidle_method __cpuidle_method_of_table[];
static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
__used __section(__cpuidle_method_of_table_end);
__used __section("__cpuidle_method_of_table_end");
static struct cpuidle_ops cpuidle_ops[NR_CPUS] __ro_after_init;
......
......@@ -29,7 +29,7 @@
extern struct of_cpu_method __cpu_method_of_table[];
static const struct of_cpu_method __cpu_method_of_table_sentinel
__used __section(__cpu_method_of_table_end);
__used __section("__cpu_method_of_table_end");
static int __init set_smp_ops_by_method(struct device_node *node)
......
......@@ -79,7 +79,7 @@ static inline u32 cache_type_cwg(void)
return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
}
#define __read_mostly __section(.data..read_mostly)
#define __read_mostly __section(".data..read_mostly")
static inline int cache_line_size_of_cpu(void)
{
......
......@@ -54,7 +54,7 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
}
/* we will fill this structure from the stub, so don't put it in .bss */
struct screen_info screen_info __section(.data);
struct screen_info screen_info __section(".data");
int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
{
......
......@@ -19,7 +19,7 @@
#include <asm/smp_plat.h>
extern void secondary_holding_pen(void);
volatile unsigned long __section(.mmuoff.data.read)
volatile unsigned long __section(".mmuoff.data.read")
secondary_holding_pen_release = INVALID_HWID;
static phys_addr_t cpu_release_addr[NR_CPUS];
......
......@@ -43,7 +43,7 @@
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
u64 __section(.mmuoff.data.write) vabits_actual;
u64 __section(".mmuoff.data.write") vabits_actual;
EXPORT_SYMBOL(vabits_actual);
u64 kimage_voffset __ro_after_init;
......
......@@ -10,13 +10,13 @@
#include <linux/compiler.h>
/* Tag variables with this */
#define __tcmdata __section(.tcm.data)
#define __tcmdata __section(".tcm.data")
/* Tag constants with this */
#define __tcmconst __section(.tcm.rodata)
#define __tcmconst __section(".tcm.rodata")
/* Tag functions inside TCM called from outside TCM with this */
#define __tcmfunc __section(.tcm.text) noinline
#define __tcmfunc __section(".tcm.text") noinline
/* Tag function inside TCM called from inside TCM with this */
#define __tcmlocalfunc __section(.tcm.text)
#define __tcmlocalfunc __section(".tcm.text")
void *tcm_alloc(size_t len);
void tcm_free(void *addr, size_t len);
......
......@@ -25,6 +25,6 @@
# define SMP_CACHE_BYTES (1 << 3)
#endif
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#define __read_mostly __section(".data..read_mostly")
#endif /* _ASM_IA64_CACHE_H */
......@@ -46,7 +46,7 @@ DEFINE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */
* ASM code. Default position is BSS section which is cleared
* in machine_early_init().
*/
char cmd_line[COMMAND_LINE_SIZE] __attribute__ ((section(".data")));
char cmd_line[COMMAND_LINE_SIZE] __section(".data");
void __init setup_arch(char **cmdline_p)
{
......
......@@ -14,6 +14,6 @@
#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#define __read_mostly __section(".data..read_mostly")
#endif /* _ASM_CACHE_H */
......@@ -23,7 +23,7 @@ extern long __mips_machines_end;
#define MIPS_MACHINE(name) \
static const struct mips_machine __mips_mach_##name \
__used __section(.mips.machines.init)
__used __section(".mips.machines.init")
#define for_each_mips_machine(mach) \
for ((mach) = (struct mips_machine *)&__mips_machines_start; \
......
......@@ -44,7 +44,7 @@
#include <asm/prom.h>
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
const char __section(.appended_dtb) __appended_dtb[0x100000];
const char __section(".appended_dtb") __appended_dtb[0x100000];
#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
......
......@@ -569,7 +569,7 @@ unsigned long pgd_current[NR_CPUS];
* size, and waste space. So we place it in its own section and align
* it in the linker script.
*/
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
#ifndef __PAGETABLE_PUD_FOLDED
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
#endif
......
......@@ -22,7 +22,7 @@
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#define __read_mostly __section(.data..read_mostly)
#define __read_mostly __section(".data..read_mostly")
void parisc_cache_init(void); /* initializes cache-flushing */
void disable_sr_hashing_asm(int); /* low level support for above */
......
......@@ -52,7 +52,7 @@
})
#ifdef CONFIG_SMP
# define __lock_aligned __section(.data..lock_aligned)
# define __lock_aligned __section(".data..lock_aligned")
#endif
#endif /* __PARISC_LDCW_H */
......@@ -21,7 +21,7 @@
#include <asm/ftrace.h>
#include <asm/patch.h>
#define __hot __attribute__ ((__section__ (".text.hot")))
#define __hot __section(".text.hot")
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
......
......@@ -42,11 +42,11 @@ extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
* guarantee that global objects will be laid out in memory in the same order
* as the order of declaration, so put these in different sections and use
* the linker script to order them. */
pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE)));
#endif
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".data..vm0.pgd") __attribute__ ((aligned(PAGE_SIZE)));
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte") __attribute__ ((aligned(PAGE_SIZE)));
static struct resource data_resource = {
.name = "Kernel data",
......
......@@ -97,7 +97,7 @@ static inline u32 l1_icache_bytes(void)
#endif
#define __read_mostly __section(.data..read_mostly)
#define __read_mostly __section(".data..read_mostly")
#ifdef CONFIG_PPC_BOOK3S_32
extern long _get_L2CR(void);
......
......@@ -232,7 +232,7 @@ extern void book3e_idle(void);
extern struct machdep_calls ppc_md;
extern struct machdep_calls *machine_id;
#define __machine_desc __attribute__ ((__section__ (".machine.desc")))
#define __machine_desc __section(".machine.desc")
#define define_machine(name) \
extern struct machdep_calls mach_##name; \
......
......@@ -26,7 +26,7 @@
static void scrollscreen(void);
#endif
#define __force_data __section(.data)
#define __force_data __section(".data")
static int g_loc_X __force_data;
static int g_loc_Y __force_data;
......
......@@ -45,7 +45,7 @@
#include <linux/linux_logo.h>
/* All of prom_init bss lives here */
#define __prombss __section(.bss.prominit)
#define __prombss __section(".bss.prominit")
/*
* Eventually bump that one up
......
......@@ -32,7 +32,7 @@
#ifdef CONFIG_BUG
#define WARN_ON_ONCE_RM(condition) ({ \
static bool __section(.data.unlikely) __warned; \
static bool __section(".data.unlikely") __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
......
......@@ -13,7 +13,7 @@
#define SOC_EARLY_INIT_DECLARE(name, compat, fn) \
static const struct of_device_id __soc_early_init__##name \
__used __section(__soc_early_init_table) \
__used __section("__soc_early_init_table") \
= { .compatible = compat, .data = fn }
void soc_early_init(void);
......@@ -46,7 +46,7 @@ struct soc_builtin_dtb {
} \
\
static const struct soc_builtin_dtb __soc_builtin_dtb__##name \
__used __section(__soc_builtin_dtb_table) = \
__used __section("__soc_builtin_dtb_table") = \
{ \
.vendor_id = vendor, \
.arch_id = arch, \
......
......@@ -15,8 +15,8 @@
const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
void *__cpu_up_stack_pointer[NR_CPUS] __section(.data);
void *__cpu_up_task_pointer[NR_CPUS] __section(.data);
void *__cpu_up_stack_pointer[NR_CPUS] __section(".data");
void *__cpu_up_task_pointer[NR_CPUS] __section(".data");
extern const struct cpu_operations cpu_ops_sbi;
extern const struct cpu_operations cpu_ops_spinwait;
......
......@@ -32,7 +32,7 @@
#include "head.h"
#if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_EFI)
struct screen_info screen_info __section(.data) = {
struct screen_info screen_info __section(".data") = {
.orig_video_lines = 30,
.orig_video_cols = 80,
.orig_video_mode = 0,
......@@ -47,7 +47,7 @@ struct screen_info screen_info __section(.data) = {
* This is used before the kernel initializes the BSS so it can't be in the
* BSS.
*/
atomic_t hart_lottery __section(.sdata);
atomic_t hart_lottery __section(".sdata");
unsigned long boot_cpu_hartid;
static DEFINE_PER_CPU(struct cpu, cpu_devices);
......
......@@ -46,7 +46,7 @@ struct diag_ops __bootdata_preserved(diag_dma_ops) = {
.diag0c = _diag0c_dma,
.diag308_reset = _diag308_reset_dma
};
static struct diag210 _diag210_tmp_dma __section(.dma.data);
static struct diag210 _diag210_tmp_dma __section(".dma.data");
struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
void error(char *x)
......
......@@ -14,6 +14,6 @@
#define L1_CACHE_SHIFT 8
#define NET_SKB_PAD 32
#define __read_mostly __section(.data..read_mostly)
#define __read_mostly __section(".data..read_mostly")
#endif
......@@ -26,14 +26,14 @@ static inline int arch_is_kernel_initmem_freed(unsigned long addr)
* final .boot.data section, which should be identical in the decompressor and
* the decompressed kernel (that is checked during the build).
*/
#define __bootdata(var) __section(.boot.data.var) var
#define __bootdata(var) __section(".boot.data.var") var
/*
* .boot.preserved.data is similar to .boot.data, but it is not part of the
* .init section and thus will be preserved for later use in the decompressed
* kernel.
*/
#define __bootdata_preserved(var) __section(.boot.preserved.data.var) var
#define __bootdata_preserved(var) __section(".boot.preserved.data.var") var
extern unsigned long __sdma, __edma;
extern unsigned long __stext_dma, __etext_dma;
......
......@@ -48,7 +48,7 @@
#include <asm/uv.h>
#include <linux/virtio_config.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);
......
......@@ -49,7 +49,7 @@ static struct plat_smp_ops dummy_smp_ops = {
extern const struct of_cpu_method __cpu_method_of_table[];
const struct of_cpu_method __cpu_method_of_table_sentinel
__section(__cpu_method_of_table_end);
__section("__cpu_method_of_table_end");
static void sh_of_smp_probe(void)
{
......
......@@ -14,7 +14,7 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#define __read_mostly __section(".data..read_mostly")
#ifndef __ASSEMBLY__
struct cache_info {
......
......@@ -36,6 +36,6 @@ extern struct sh_machine_vector sh_mv;
#define get_system_type() sh_mv.mv_name
#define __initmv \
__used __section(.machvec.init)
__used __section(".machvec.init")
#endif /* _ASM_SH_MACHVEC_H */
......@@ -71,7 +71,7 @@ struct of_cpu_method {
#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
static const struct of_cpu_method __cpu_method_of_table_##name \
__used __section(__cpu_method_of_table) \
__used __section("__cpu_method_of_table") \
= { .method = _method, .ops = _ops }
#else
......
......@@ -21,6 +21,6 @@
#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT)
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#define __read_mostly __section(".data..read_mostly")
#endif /* !(_SPARC_CACHE_H) */
......@@ -24,7 +24,7 @@ static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
#define __force_data __attribute__((__section__(".data")))
#define __force_data __section(".data")
static int g_loc_X __force_data;
static int g_loc_Y __force_data;
......
......@@ -45,15 +45,15 @@ typedef void (*exitcall_t)(void);
/* These are for everybody (although not all archs will actually
discard it in modules) */
#define __init __section(.init.text)
#define __initdata __section(.init.data)
#define __exitdata __section(.exit.data)
#define __exit_call __used __section(.exitcall.exit)
#define __init __section(".init.text")
#define __initdata __section(".init.data")
#define __exitdata __section(".exit.data")
#define __exit_call __used __section(".exitcall.exit")
#ifdef MODULE
#define __exit __section(.exit.text)
#define __exit __section(".exit.text")
#else
#define __exit __used __section(.exit.text)
#define __exit __used __section(".exit.text")
#endif
#endif
......@@ -102,10 +102,10 @@ extern struct uml_param __uml_setup_start, __uml_setup_end;
* Mark functions and data as being only used at initialization
* or exit time.
*/
#define __uml_init_setup __used __section(.uml.setup.init)
#define __uml_setup_help __used __section(.uml.help.init)
#define __uml_postsetup_call __used __section(.uml.postsetup.init)
#define __uml_exit_call __used __section(.uml.exitcall.exit)
#define __uml_init_setup __used __section(".uml.setup.init")
#define __uml_setup_help __used __section(".uml.help.init")
#define __uml_postsetup_call __used __section(".uml.postsetup.init")
#define __uml_exit_call __used __section(".uml.exitcall.exit")
#ifdef __UM_HOST__
......@@ -120,7 +120,7 @@ extern struct uml_param __uml_setup_start, __uml_setup_end;
#define __exitcall(fn) static exitcall_t __exitcall_##fn __exit_call = fn
#define __init_call __used __section(.initcall.init)
#define __init_call __used __section(".initcall.init")
#endif
......
......@@ -21,7 +21,7 @@
* on some systems.
*/
void __attribute__ ((__section__ (".__syscall_stub")))
void __section(".__syscall_stub")
stub_clone_handler(void)
{
struct stub_data *data = (struct stub_data *) STUB_DATA;
......
......@@ -52,7 +52,7 @@ struct cpuinfo_um boot_cpu_data = {
};
union thread_union cpu0_irqstack
__attribute__((__section__(".data..init_irqstack"))) =
__section(".data..init_irqstack") =
{ .thread_info = INIT_THREAD_INFO(init_task) };
/* Changed in setup_arch, which is called in early boot */
......
......@@ -10,9 +10,9 @@
#ifdef CONFIG_X86_5LEVEL
/* __pgtable_l5_enabled needs to be in .data to avoid being cleared along with .bss */
unsigned int __section(.data) __pgtable_l5_enabled;
unsigned int __section(.data) pgdir_shift = 39;
unsigned int __section(.data) ptrs_per_p4d = 1;
unsigned int __section(".data") __pgtable_l5_enabled;
unsigned int __section(".data") pgdir_shift = 39;
unsigned int __section(".data") ptrs_per_p4d = 1;
#endif
struct paging_config {
......@@ -30,7 +30,7 @@ static char trampoline_save[TRAMPOLINE_32BIT_SIZE];
* Avoid putting the pointer into .bss as it will be cleared between
* paging_prepare() and extract_kernel().
*/
unsigned long *trampoline_32bit __section(.data);
unsigned long *trampoline_32bit __section(".data");
extern struct boot_params *boot_params;
int cmdline_find_option_bool(const char *option);
......
......@@ -25,7 +25,7 @@ int early_serial_base;
* error during initialization.
*/
static void __attribute__((section(".inittext"))) serial_putchar(int ch)
static void __section(".inittext") serial_putchar(int ch)
{
unsigned timeout = 0xffff;
......@@ -35,7 +35,7 @@ static void __attribute__((section(".inittext"))) serial_putchar(int ch)
outb(ch, early_serial_base + TXR);
}
static void __attribute__((section(".inittext"))) bios_putchar(int ch)
static void __section(".inittext") bios_putchar(int ch)
{
struct biosregs ireg;
......@@ -47,7 +47,7 @@ static void __attribute__((section(".inittext"))) bios_putchar(int ch)
intcall(0x10, &ireg, NULL);
}
void __attribute__((section(".inittext"))) putchar(int ch)
void __section(".inittext") putchar(int ch)
{
if (ch == '\n')
putchar('\r'); /* \n -> \r\n */
......@@ -58,7 +58,7 @@ void __attribute__((section(".inittext"))) putchar(int ch)
serial_putchar(ch);
}
void __attribute__((section(".inittext"))) puts(const char *str)
void __section(".inittext") puts(const char *str)
{
while (*str)
putchar(*str++);
......
......@@ -78,7 +78,7 @@ struct card_info {
u16 xmode_n; /* Size of unprobed mode range */
};
#define __videocard struct card_info __attribute__((used,section(".videocards")))
#define __videocard struct card_info __section(".videocards") __attribute__((used))
extern struct card_info video_cards[], video_cards_end[];
int mode_defined(u16 mode); /* video.c */
......
......@@ -374,12 +374,12 @@ extern struct apic *apic;
#define apic_driver(sym) \
static const struct apic *__apicdrivers_##sym __used \
__aligned(sizeof(struct apic *)) \
__section(.apicdrivers) = { &sym }
__section(".apicdrivers") = { &sym }
#define apic_drivers(sym1, sym2) \
static struct apic *__apicdrivers_##sym1##sym2[2] __used \
__aligned(sizeof(struct apic *)) \
__section(.apicdrivers) = { &sym1, &sym2 }
__section(".apicdrivers") = { &sym1, &sym2 }
extern struct apic *__apicdrivers[], *__apicdrivers_end[];
......
......@@ -8,7 +8,7 @@
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#define __read_mostly __section(".data..read_mostly")
#define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
......
......@@ -43,7 +43,7 @@ struct devs_id {
#define sfi_device(i) \
static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \
__attribute__((__section__(".x86_intel_mid_dev.init"))) = &i
__section(".x86_intel_mid_dev.init") = &i
/**
* struct mid_sd_board_info - template for SD device creation
......
......@@ -9,7 +9,7 @@
#include <asm/nospec-branch.h>
/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
#define __cpuidle __attribute__((__section__(".cpuidle.text")))
#define __cpuidle __section(".cpuidle.text")
/*
* Interrupt control:
......
......@@ -54,7 +54,7 @@ bool sme_active(void);
bool sev_active(void);
bool sev_es_active(void);
#define __bss_decrypted __attribute__((__section__(".bss..decrypted")))
#define __bss_decrypted __section(".bss..decrypted")
#else /* !CONFIG_AMD_MEM_ENCRYPT */
......
......@@ -119,7 +119,7 @@ void *extend_brk(size_t size, size_t align);
* executable.)
*/
#define RESERVE_BRK(name,sz) \
static void __section(.discard.text) __used notrace \
static void __section(".discard.text") __used notrace \
__brk_reservation_fn_##name##__(void) { \
asm volatile ( \
".pushsection .brk_reservation,\"aw\",@nobits;" \
......
......@@ -38,7 +38,7 @@ struct _tlb_table {
#define cpu_dev_register(cpu_devX) \
static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
__attribute__((__section__(".x86_cpu_dev.init"))) = \
__section(".x86_cpu_dev.init") = \
&cpu_devX;
extern const struct cpu_dev *const __x86_cpu_dev_start[],
......
......@@ -84,7 +84,7 @@ static struct desc_ptr startup_gdt_descr = {
.address = 0,
};
#define __head __section(.head.text)
#define __head __section(".head.text")
static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
{
......
......@@ -37,13 +37,13 @@
* reside in the .data section so as not to be zeroed out when the .bss
* section is later cleared.
*/
u64 sme_me_mask __section(.data) = 0;
u64 sev_status __section(.data) = 0;
u64 sme_me_mask __section(".data") = 0;
u64 sev_status __section(".data") = 0;
EXPORT_SYMBOL(sme_me_mask);
DEFINE_STATIC_KEY_FALSE(sev_enable_key);
EXPORT_SYMBOL_GPL(sev_enable_key);
bool sev_enabled __section(.data);
bool sev_enabled __section(".data");
/* Buffer used for early in-place encryption by BSP, no locking needed */
static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
......
......@@ -81,7 +81,7 @@ struct sme_populate_pgd_data {
* section is 2MB aligned to allow for simple pagetable setup using only
* PMD entries (see vmlinux.lds.S).
*/
static char sme_workarea[2 * PMD_PAGE_SIZE] __section(.init.scratch);
static char sme_workarea[2 * PMD_PAGE_SIZE] __section(".init.scratch");
static char sme_cmdline_arg[] __initdata = "mem_encrypt";
static char sme_cmdline_on[] __initdata = "on";
......
......@@ -19,8 +19,8 @@
* pvh_bootparams and pvh_start_info need to live in the data segment since
* they are used after startup_{32|64}, which clear .bss, are invoked.
*/
struct boot_params pvh_bootparams __attribute__((section(".data")));
struct hvm_start_info pvh_start_info __attribute__((section(".data")));
struct boot_params pvh_bootparams __section(".data");
struct hvm_start_info pvh_start_info __section(".data");
unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
......
......@@ -14,9 +14,9 @@
#include "../boot/string.h"
u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(.kexec-purgatory);
u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(".kexec-purgatory");
struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(.kexec-purgatory);
struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(".kexec-purgatory");
static int verify_sha256_digest(void)
{
......
......@@ -8,7 +8,7 @@
#include <sysdep/mcontext.h>
#include <sys/ucontext.h>
void __attribute__ ((__section__ (".__syscall_stub")))
void __section(".__syscall_stub")
stub_segv_handler(int sig, siginfo_t *info, void *p)
{
ucontext_t *uc = p;
......
......@@ -71,7 +71,7 @@ EXPORT_SYMBOL_GPL(xen_have_vector_callback);
* NB: needs to live in .data because it's used by xen_prepare_pvh which runs
* before clearing the bss.
*/
uint32_t xen_start_flags __attribute__((section(".data"))) = 0;
uint32_t xen_start_flags __section(".data") = 0;
EXPORT_SYMBOL(xen_start_flags);
/*
......
......@@ -21,7 +21,7 @@
* The variable xen_pvh needs to live in the data segment since it is used
* after startup_{32|64} is invoked, which will clear the .bss segment.
*/
bool xen_pvh __attribute__((section(".data"))) = 0;
bool xen_pvh __section(".data") = 0;
void __init xen_pvh_init(struct boot_params *boot_params)
{
......
......@@ -93,7 +93,7 @@ typedef struct tagtable {
} tagtable_t;
#define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \
__attribute__((used, section(".taglist"))) = { tag, fn }
__section(".taglist") __attribute__((used)) = { tag, fn }
/* parse current tag */
......
......@@ -4363,7 +4363,7 @@ struct of_clk_provider {
extern struct of_device_id __clk_of_table;
static const struct of_device_id __clk_of_table_sentinel
__used __section(__clk_of_table_end);
__used __section("__clk_of_table_end");
static LIST_HEAD(of_clk_providers);
static DEFINE_MUTEX(of_clk_mutex);
......
......@@ -11,7 +11,7 @@
extern struct of_device_id __timer_of_table[];
static const struct of_device_id __timer_of_table_sentinel
__used __section(__timer_of_table_end);
__used __section("__timer_of_table_end");
void __init timer_probe(void)
{
......
......@@ -22,7 +22,7 @@
* special section.
*/
static const struct of_device_id
irqchip_of_match_end __used __section(__irqchip_of_table_end);
irqchip_of_match_end __used __section("__irqchip_of_table_end");
extern struct of_device_id __irqchip_of_table[];
......
......@@ -162,7 +162,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
}
static const struct of_device_id __rmem_of_table_sentinel
__used __section(__reservedmem_of_table_end);
__used __section("__reservedmem_of_table_end");
/**
* __reserved_mem_init_node() - call region specific reserved memory init code
......
......@@ -34,7 +34,7 @@ extern struct thermal_governor *__governor_thermal_table_end[];
#define THERMAL_TABLE_ENTRY(table, name) \
static typeof(name) *__thermal_table_entry_##name \
__used __section(__##table##_thermal_table) = &name
__used __section("__" #table "_thermal_table") = &name
#define THERMAL_GOVERNOR_DECLARE(name) THERMAL_TABLE_ENTRY(governor, name)
......
......@@ -42,7 +42,7 @@ do { \
#define xfs_printk_once(func, dev, fmt, ...) \
({ \
static bool __section(.data.once) __print_once; \
static bool __section(".data.once") __print_once; \
bool __ret_print_once = !__print_once; \
\
if (!__print_once) { \
......
......@@ -141,7 +141,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#ifndef WARN_ON_ONCE
#define WARN_ON_ONCE(condition) ({ \
static bool __section(.data.once) __warned; \
static bool __section(".data.once") __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
......@@ -153,7 +153,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#endif
#define WARN_ONCE(condition, format...) ({ \
static bool __section(.data.once) __warned; \
static bool __section(".data.once") __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
......@@ -164,7 +164,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
})
#define WARN_TAINT_ONCE(condition, taint, format...) ({ \
static bool __section(.data.once) __warned; \
static bool __section(".data.once") __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
......
......@@ -25,7 +25,7 @@ struct pt_regs;
*/
#define ALLOW_ERROR_INJECTION(fname, _etype) \
static struct error_injection_entry __used \
__attribute__((__section__("_error_injection_whitelist"))) \
__section("_error_injection_whitelist") \
_eil_addr_##fname = { \
.addr = (unsigned long)fname, \
.etype = EI_ETYPE_##_etype, \
......
......@@ -10,11 +10,11 @@
*/
# define __NOKPROBE_SYMBOL(fname) \
static unsigned long __used \
__attribute__((__section__("_kprobe_blacklist"))) \
__section("_kprobe_blacklist") \
_kbl_addr_##fname = (unsigned long)fname;
# define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname)
/* Use this to forbid a kprobes attach on very low level functions */
# define __kprobes __attribute__((__section__(".kprobes.text")))
# define __kprobes __section(".kprobes.text")
# define nokprobe_inline __always_inline
#else
# define NOKPROBE_SYMBOL(fname)
......
......@@ -288,7 +288,7 @@ static inline int kunit_run_all_tests(void)
static struct kunit_suite *unique_array[] = { __VA_ARGS__, NULL }; \
kunit_test_suites_for_module(unique_array); \
static struct kunit_suite **unique_suites \
__used __section(.kunit_test_suites) = unique_array
__used __section(".kunit_test_suites") = unique_array
/**
* kunit_test_suites() - used to register one or more &struct kunit_suite
......
......@@ -1153,7 +1153,7 @@ struct acpi_probe_entry {
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \
valid, data, fn) \
static const struct acpi_probe_entry __acpi_probe_##name \
__used __section(__##table##_acpi_probe_table) = { \
__used __section("__" #table "_acpi_probe_table") = { \
.id = table_id, \
.type = subtable, \
.subtable_valid = valid, \
......@@ -1164,7 +1164,7 @@ struct acpi_probe_entry {
#define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \
subtable, valid, data, fn) \
static const struct acpi_probe_entry __acpi_probe_##name \
__used __section(__##table##_acpi_probe_table) = { \
__used __section("__" #table "_acpi_probe_table") = { \
.id = table_id, \
.type = subtable, \
.subtable_valid = valid, \
......
......@@ -34,7 +34,7 @@
* but may get written to during init, so can't live in .rodata (via "const").
*/
#ifndef __ro_after_init
#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
#define __ro_after_init __section(".data..ro_after_init")
#endif
#ifndef ____cacheline_aligned
......
......@@ -24,7 +24,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
long ______r; \
static struct ftrace_likely_data \
__aligned(4) \
__section(_ftrace_annotated_branch) \
__section("_ftrace_annotated_branch") \
______f = { \
.data.func = __func__, \
.data.file = __FILE__, \
......@@ -60,7 +60,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define __trace_if_value(cond) ({ \
static struct ftrace_branch_data \
__aligned(4) \
__section(_ftrace_branch) \
__section("_ftrace_branch") \
__if_trace = { \
.func = __func__, \
.file = __FILE__, \
......@@ -118,7 +118,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
".popsection\n\t"
/* Annotate a C jump table to allow objtool to follow the code flow */
#define __annotate_jump_table __section(.rodata..c_jump_table)
#define __annotate_jump_table __section(".rodata..c_jump_table")
#else
#define annotate_reachable()
......@@ -206,7 +206,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* visible to the compiler.
*/
#define __ADDRESSABLE(sym) \
static void * __section(.discard.addressable) __used \
static void * __section(".discard.addressable") __used \
__UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
/**
......
......@@ -254,7 +254,7 @@
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-section-variable-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#section-declspec-allocate
*/
#define __section(S) __attribute__((__section__(#S)))
#define __section(section) __attribute__((__section__(section)))
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute
......
......@@ -173,7 +173,7 @@ void cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
/* Attach to any functions which should be considered cpuidle. */
#define __cpuidle __attribute__((__section__(".cpuidle.text")))
#define __cpuidle __section(".cpuidle.text")
bool cpu_in_idle(unsigned long pc);
......
......@@ -84,7 +84,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
static struct _ddebug __aligned(8) \
__section(__dyndbg) name = { \
__section("__dyndbg") name = { \
.modname = KBUILD_MODNAME, \
.function = __func__, \
.filename = __FILE__, \
......
......@@ -130,7 +130,7 @@ struct kernel_symbol {
* discarded in the final link stage.
*/
#define __ksym_marker(sym) \
static int __ksym_marker_##sym[0] __section(.discard.ksym) __used
static int __ksym_marker_##sym[0] __section(".discard.ksym") __used
#define __EXPORT_SYMBOL(sym, sec, ns) \
__ksym_marker(sym); \
......
......@@ -36,7 +36,7 @@ struct builtin_fw {
#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \
static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \
__used __section(.builtin_fw) = { name, blob, size }
__used __section(".builtin_fw") = { name, blob, size }
#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
int request_firmware(const struct firmware **fw, const char *name,
......
......@@ -47,11 +47,11 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
#define __init __section(.init.text) __cold __latent_entropy __noinitretpoline
#define __initdata __section(.init.data)
#define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data)
#define __exit_call __used __section(.exitcall.exit)
#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline
#define __initdata __section(".init.data")
#define __initconst __section(".init.rodata")
#define __exitdata __section(".exit.data")
#define __exit_call __used __section(".exitcall.exit")
/*
* modpost check for section mismatches during the kernel build.
......@@ -70,9 +70,9 @@
*
* The markers follow same syntax rules as __init / __initdata.
*/
#define __ref __section(.ref.text) noinline
#define __refdata __section(.ref.data)
#define __refconst __section(.ref.rodata)
#define __ref __section(".ref.text") noinline
#define __refdata __section(".ref.data")
#define __refconst __section(".ref.rodata")
#ifdef MODULE
#define __exitused
......@@ -80,16 +80,16 @@
#define __exitused __used
#endif
#define __exit __section(.exit.text) __exitused __cold notrace
#define __exit __section(".exit.text") __exitused __cold notrace
/* Used for MEMORY_HOTPLUG */
#define __meminit __section(.meminit.text) __cold notrace \
#define __meminit __section(".meminit.text") __cold notrace \
__latent_entropy
#define __meminitdata __section(.meminit.data)
#define __meminitconst __section(.meminit.rodata)
#define __memexit __section(.memexit.text) __exitused __cold notrace
#define __memexitdata __section(.memexit.data)
#define __memexitconst __section(.memexit.rodata)
#define __meminitdata __section(".meminit.data")
#define __meminitconst __section(".meminit.rodata")
#define __memexit __section(".memexit.text") __exitused __cold notrace
#define __memexitdata __section(".memexit.data")
#define __memexitconst __section(".memexit.rodata")
/* For assembly routines */
#define __HEAD .section ".head.text","ax"
......@@ -254,7 +254,7 @@ struct obs_kernel_param {
static const char __setup_str_##unique_id[] __initconst \
__aligned(1) = str; \
static struct obs_kernel_param __setup_##unique_id \
__used __section(.init.setup) \
__used __section(".init.setup") \
__attribute__((aligned((sizeof(long))))) \
= { __setup_str_##unique_id, fn, early }
......@@ -298,7 +298,7 @@ void __init parse_early_options(char *cmdline);
#endif
/* Data marked not to be saved by software suspend */
#define __nosavedata __section(.data..nosave)
#define __nosavedata __section(".data..nosave")
#ifdef MODULE
#define __exit_p(x) x
......
......@@ -40,12 +40,12 @@ extern struct cred init_cred;
/* Attach to the init_task data structure for proper alignment */
#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
#define __init_task_data __attribute__((__section__(".data..init_task")))
#define __init_task_data __section(".data..init_task")
#else
#define __init_task_data /**/
#endif
/* Attach to the thread_info data structure for proper alignment */
#define __init_thread_info __attribute__((__section__(".data..init_thread_info")))
#define __init_thread_info __section(".data..init_thread_info")
#endif
......@@ -792,9 +792,9 @@ extern int arch_early_irq_init(void);
* We want to know which function is an entrypoint of a hardirq or a softirq.
*/
#ifndef __irq_entry
# define __irq_entry __attribute__((__section__(".irqentry.text")))
# define __irq_entry __section(".irqentry.text")
#endif
#define __softirq_entry __attribute__((__section__(".softirqentry.text")))
#define __softirq_entry __section(".softirqentry.text")
#endif
......@@ -729,7 +729,7 @@ do { \
#define do_trace_printk(fmt, args...) \
do { \
static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__section("__trace_printk_fmt") = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
__trace_printk_check_format(fmt, ##args); \
......@@ -773,7 +773,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
#define trace_puts(str) ({ \
static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__section("__trace_printk_fmt") = \
__builtin_constant_p(str) ? str : NULL; \
\
if (__builtin_constant_p(str)) \
......@@ -795,7 +795,7 @@ extern void trace_dump_stack(int skip);
do { \
if (__builtin_constant_p(fmt)) { \
static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__section("__trace_printk_fmt") = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
__ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
......
......@@ -36,8 +36,8 @@
__stringify(name))
#endif
#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
#define __page_aligned_data __section(".data..page_aligned") __aligned(PAGE_SIZE)
#define __page_aligned_bss __section(".bss..page_aligned") __aligned(PAGE_SIZE)
/*
* For assembly routines.
......
......@@ -1611,12 +1611,12 @@ extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
#define DEFINE_LSM(lsm) \
static struct lsm_info __lsm_##lsm \
__used __section(.lsm_info.init) \
__used __section(".lsm_info.init") \
__aligned(sizeof(unsigned long))
#define DEFINE_EARLY_LSM(lsm) \
static struct lsm_info __early_lsm_##lsm \
__used __section(.early_lsm_info.init) \
__used __section(".early_lsm_info.init") \
__aligned(sizeof(unsigned long))
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
......
......@@ -278,7 +278,7 @@ extern typeof(name) __mod_##type##__##name##_device_table \
.version = _version, \
}; \
static const struct module_version_attribute \
__used __attribute__ ((__section__ ("__modver"))) \
__used __section("__modver") \
* __moduleparam_const __modver_attr = &___modver_attr
#endif
......
......@@ -22,7 +22,7 @@
#define __MODULE_INFO(tag, name, info) \
static const char __UNIQUE_ID(name)[] \
__used __attribute__((section(".modinfo"), unused, aligned(1))) \
__used __section(".modinfo") __attribute__((unused, aligned(1))) \
= __MODULE_INFO_PREFIX __stringify(tag) "=" info
#define __MODULE_PARM_TYPE(name, _type) \
......@@ -289,7 +289,7 @@ struct kparam_array
static const char __param_str_##name[] = prefix #name; \
static struct kernel_param __moduleparam_const __param_##name \
__used \
__attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
__section("__param") __attribute__ ((unused, aligned(sizeof(void *)))) \
= { __param_str_##name, THIS_MODULE, ops, \
VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } }
......
......@@ -28,7 +28,7 @@
* those functions so they get relocated to ram.
*/
#ifdef CONFIG_XIP_KERNEL
#define __xipram noinline __attribute__ ((__section__ (".xiptext")))
#define __xipram noinline __section(".xiptext")
#endif
/*
......
......@@ -60,7 +60,7 @@ struct unwind_hint {
* For more information, see tools/objtool/Documentation/stack-validation.txt.
*/
#define STACK_FRAME_NON_STANDARD(func) \
static void __used __section(.discard.func_stack_frame_non_standard) \
static void __used __section(".discard.func_stack_frame_non_standard") \
*__func_stack_frame_non_standard_##func = func
#else /* __ASSEMBLY__ */
......
......@@ -1299,7 +1299,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
#if defined(CONFIG_OF) && !defined(MODULE)
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
__used __section(__##table##_of_table) \
__used __section("__" #table "_of_table") \
= { .compatible = compat, \
.data = (fn == (fn_type)NULL) ? fn : fn }
#else
......
......@@ -51,7 +51,7 @@
PER_CPU_ATTRIBUTES
#define __PCPU_DUMMY_ATTRS \
__attribute__((section(".discard"), unused))
__section(".discard") __attribute__((unused))
/*
* s390 and alpha modules require percpu variables to be defined as
......
......@@ -437,7 +437,7 @@ extern int kptr_restrict;
#ifdef CONFIG_PRINTK
#define printk_once(fmt, ...) \
({ \
static bool __section(.data.once) __print_once; \
static bool __section(".data.once") __print_once; \
bool __ret_print_once = !__print_once; \
\
if (!__print_once) { \
......@@ -448,7 +448,7 @@ extern int kptr_restrict;
})
#define printk_deferred_once(fmt, ...) \
({ \
static bool __section(.data.once) __print_once; \
static bool __section(".data.once") __print_once; \
bool __ret_print_once = !__print_once; \
\
if (!__print_once) { \
......
......@@ -299,7 +299,7 @@ static inline int rcu_read_lock_any_held(void)
*/
#define RCU_LOCKDEP_WARN(c, s) \
do { \
static bool __section(.data.unlikely) __warned; \
static bool __section(".data.unlikely") __warned; \
if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
......
......@@ -43,7 +43,7 @@ extern void proc_sched_set_task(struct task_struct *p);
#endif
/* Attach to any functions which should be ignored in wchan output. */
#define __sched __attribute__((__section__(".sched.text")))
#define __sched __section(".sched.text")
/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment