Commit f24f21c4 authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'topic/objtool' into next

Merge the powerpc objtool support, which we were keeping in a topic
branch in case of any merge conflicts.
parents 64fdcbcc a39818a3
...@@ -933,7 +933,9 @@ ifdef CONFIG_FTRACE_MCOUNT_USE_CC ...@@ -933,7 +933,9 @@ ifdef CONFIG_FTRACE_MCOUNT_USE_CC
endif endif
endif endif
ifdef CONFIG_FTRACE_MCOUNT_USE_OBJTOOL ifdef CONFIG_FTRACE_MCOUNT_USE_OBJTOOL
CC_FLAGS_USING += -DCC_USING_NOP_MCOUNT ifdef CONFIG_HAVE_OBJTOOL_NOP_MCOUNT
CC_FLAGS_USING += -DCC_USING_NOP_MCOUNT
endif
endif endif
ifdef CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT ifdef CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT
ifdef CONFIG_HAVE_C_RECORDMCOUNT ifdef CONFIG_HAVE_C_RECORDMCOUNT
......
...@@ -241,6 +241,8 @@ config PPC ...@@ -241,6 +241,8 @@ config PPC
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S) select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
select HAVE_OPTPROBES select HAVE_OPTPROBES
select HAVE_OBJTOOL if PPC32 || MPROFILE_KERNEL
select HAVE_OBJTOOL_MCOUNT if HAVE_OBJTOOL
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if PPC64 select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_PERF_REGS select HAVE_PERF_REGS
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_ASM_H
#define _ASM_POWERPC_ASM_H
#define _ASM_PTR " .long "
#endif /* _ASM_POWERPC_ASM_H */
...@@ -99,7 +99,8 @@ ...@@ -99,7 +99,8 @@
__label__ __label_warn_on; \ __label__ __label_warn_on; \
\ \
WARN_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags), __label_warn_on); \ WARN_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags), __label_warn_on); \
unreachable(); \ barrier_before_unreachable(); \
__builtin_unreachable(); \
\ \
__label_warn_on: \ __label_warn_on: \
break; \ break; \
......
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
#include <asm/types.h> #include <asm/types.h>
#define __ALIGN .align 2
#define __ALIGN_STR ".align 2"
#ifdef CONFIG_PPC64_ELF_ABI_V1 #ifdef CONFIG_PPC64_ELF_ABI_V1
#define cond_syscall(x) \ #define cond_syscall(x) \
asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \ asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
* Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
*/ */
#include <linux/linkage.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cputable.h> #include <asm/cputable.h>
...@@ -81,7 +83,7 @@ _GLOBAL(__setup_cpu_745x) ...@@ -81,7 +83,7 @@ _GLOBAL(__setup_cpu_745x)
blr blr
/* Enable caches for 603's, 604, 750 & 7400 */ /* Enable caches for 603's, 604, 750 & 7400 */
setup_common_caches: SYM_FUNC_START_LOCAL(setup_common_caches)
mfspr r11,SPRN_HID0 mfspr r11,SPRN_HID0
andi. r0,r11,HID0_DCE andi. r0,r11,HID0_DCE
ori r11,r11,HID0_ICE|HID0_DCE ori r11,r11,HID0_ICE|HID0_DCE
...@@ -95,11 +97,12 @@ setup_common_caches: ...@@ -95,11 +97,12 @@ setup_common_caches:
sync sync
isync isync
blr blr
SYM_FUNC_END(setup_common_caches)
/* 604, 604e, 604ev, ... /* 604, 604e, 604ev, ...
* Enable superscalar execution & branch history table * Enable superscalar execution & branch history table
*/ */
setup_604_hid0: SYM_FUNC_START_LOCAL(setup_604_hid0)
mfspr r11,SPRN_HID0 mfspr r11,SPRN_HID0
ori r11,r11,HID0_SIED|HID0_BHTE ori r11,r11,HID0_SIED|HID0_BHTE
ori r8,r11,HID0_BTCD ori r8,r11,HID0_BTCD
...@@ -110,6 +113,7 @@ setup_604_hid0: ...@@ -110,6 +113,7 @@ setup_604_hid0:
sync sync
isync isync
blr blr
SYM_FUNC_END(setup_604_hid0)
/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some /* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
* erratas we work around here. * erratas we work around here.
...@@ -125,13 +129,14 @@ setup_604_hid0: ...@@ -125,13 +129,14 @@ setup_604_hid0:
* needed once we have applied workaround #5 (though it's * needed once we have applied workaround #5 (though it's
* not set by Apple's firmware at least). * not set by Apple's firmware at least).
*/ */
setup_7400_workarounds: SYM_FUNC_START_LOCAL(setup_7400_workarounds)
mfpvr r3 mfpvr r3
rlwinm r3,r3,0,20,31 rlwinm r3,r3,0,20,31
cmpwi 0,r3,0x0207 cmpwi 0,r3,0x0207
ble 1f ble 1f
blr blr
setup_7410_workarounds: SYM_FUNC_END(setup_7400_workarounds)
SYM_FUNC_START_LOCAL(setup_7410_workarounds)
mfpvr r3 mfpvr r3
rlwinm r3,r3,0,20,31 rlwinm r3,r3,0,20,31
cmpwi 0,r3,0x0100 cmpwi 0,r3,0x0100
...@@ -151,6 +156,7 @@ setup_7410_workarounds: ...@@ -151,6 +156,7 @@ setup_7410_workarounds:
sync sync
isync isync
blr blr
SYM_FUNC_END(setup_7410_workarounds)
/* 740/750/7400/7410 /* 740/750/7400/7410
* Enable Store Gathering (SGE), Address Broadcast (ABE), * Enable Store Gathering (SGE), Address Broadcast (ABE),
...@@ -158,7 +164,7 @@ setup_7410_workarounds: ...@@ -158,7 +164,7 @@ setup_7410_workarounds:
* Dynamic Power Management (DPM), Speculative (SPD) * Dynamic Power Management (DPM), Speculative (SPD)
* Clear Instruction cache throttling (ICTC) * Clear Instruction cache throttling (ICTC)
*/ */
setup_750_7400_hid0: SYM_FUNC_START_LOCAL(setup_750_7400_hid0)
mfspr r11,SPRN_HID0 mfspr r11,SPRN_HID0
ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
oris r11,r11,HID0_DPM@h oris r11,r11,HID0_DPM@h
...@@ -177,12 +183,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) ...@@ -177,12 +183,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
sync sync
isync isync
blr blr
SYM_FUNC_END(setup_750_7400_hid0)
/* 750cx specific /* 750cx specific
* Looks like we have to disable NAP feature for some PLL settings... * Looks like we have to disable NAP feature for some PLL settings...
* (waiting for confirmation) * (waiting for confirmation)
*/ */
setup_750cx: SYM_FUNC_START_LOCAL(setup_750cx)
mfspr r10, SPRN_HID1 mfspr r10, SPRN_HID1
rlwinm r10,r10,4,28,31 rlwinm r10,r10,4,28,31
cmpwi cr0,r10,7 cmpwi cr0,r10,7
...@@ -196,11 +203,13 @@ setup_750cx: ...@@ -196,11 +203,13 @@ setup_750cx:
andc r6,r6,r7 andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r4) stw r6,CPU_SPEC_FEATURES(r4)
blr blr
SYM_FUNC_END(setup_750cx)
/* 750fx specific /* 750fx specific
*/ */
setup_750fx: SYM_FUNC_START_LOCAL(setup_750fx)
blr blr
SYM_FUNC_END(setup_750fx)
/* MPC 745x /* MPC 745x
* Enable Store Gathering (SGE), Branch Folding (FOLD) * Enable Store Gathering (SGE), Branch Folding (FOLD)
...@@ -212,7 +221,7 @@ setup_750fx: ...@@ -212,7 +221,7 @@ setup_750fx:
* Clear Instruction cache throttling (ICTC) * Clear Instruction cache throttling (ICTC)
* Enable L2 HW prefetch * Enable L2 HW prefetch
*/ */
setup_745x_specifics: SYM_FUNC_START_LOCAL(setup_745x_specifics)
/* We check for the presence of an L3 cache setup by /* We check for the presence of an L3 cache setup by
* the firmware. If any, we disable NAP capability as * the firmware. If any, we disable NAP capability as
* it's known to be bogus on rev 2.1 and earlier * it's known to be bogus on rev 2.1 and earlier
...@@ -270,6 +279,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) ...@@ -270,6 +279,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
sync sync
isync isync
blr blr
SYM_FUNC_END(setup_745x_specifics)
/* /*
* Initialize the FPU registers. This is needed to work around an errata * Initialize the FPU registers. This is needed to work around an errata
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
* Benjamin Herrenschmidt <benh@kernel.crashing.org> * Benjamin Herrenschmidt <benh@kernel.crashing.org>
*/ */
#include <linux/linkage.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cputable.h> #include <asm/cputable.h>
...@@ -274,7 +276,7 @@ _GLOBAL(flush_dcache_L1) ...@@ -274,7 +276,7 @@ _GLOBAL(flush_dcache_L1)
blr blr
has_L2_cache: SYM_FUNC_START_LOCAL(has_L2_cache)
/* skip L2 cache on P2040/P2040E as they have no L2 cache */ /* skip L2 cache on P2040/P2040E as they have no L2 cache */
mfspr r3, SPRN_SVR mfspr r3, SPRN_SVR
/* shift right by 8 bits and clear E bit of SVR */ /* shift right by 8 bits and clear E bit of SVR */
...@@ -290,9 +292,10 @@ has_L2_cache: ...@@ -290,9 +292,10 @@ has_L2_cache:
1: 1:
li r3, 0 li r3, 0
blr blr
SYM_FUNC_END(has_L2_cache)
/* flush backside L2 cache */ /* flush backside L2 cache */
flush_backside_L2_cache: SYM_FUNC_START_LOCAL(flush_backside_L2_cache)
mflr r10 mflr r10
bl has_L2_cache bl has_L2_cache
mtlr r10 mtlr r10
...@@ -313,6 +316,7 @@ flush_backside_L2_cache: ...@@ -313,6 +316,7 @@ flush_backside_L2_cache:
bne 1b bne 1b
2: 2:
blr blr
SYM_FUNC_END(flush_backside_L2_cache)
_GLOBAL(cpu_down_flush_e500v2) _GLOBAL(cpu_down_flush_e500v2)
mflr r0 mflr r0
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/sys.h> #include <linux/sys.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/linkage.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -74,17 +76,18 @@ _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) ...@@ -74,17 +76,18 @@ _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */ #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */
#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32) #if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
.globl __kuep_lock SYM_FUNC_START(__kuep_lock)
__kuep_lock:
lwz r9, THREAD+THSR0(r2) lwz r9, THREAD+THSR0(r2)
update_user_segments_by_4 r9, r10, r11, r12 update_user_segments_by_4 r9, r10, r11, r12
blr blr
SYM_FUNC_END(__kuep_lock)
__kuep_unlock: SYM_FUNC_START_LOCAL(__kuep_unlock)
lwz r9, THREAD+THSR0(r2) lwz r9, THREAD+THSR0(r2)
rlwinm r9,r9,0,~SR_NX rlwinm r9,r9,0,~SR_NX
update_user_segments_by_4 r9, r10, r11, r12 update_user_segments_by_4 r9, r10, r11, r12
blr blr
SYM_FUNC_END(__kuep_unlock)
.macro kuep_lock .macro kuep_lock
bl __kuep_lock bl __kuep_lock
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
* code, and exception/interrupt return code for PowerPC. * code, and exception/interrupt return code for PowerPC.
*/ */
#include <linux/objtool.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <asm/cache.h> #include <asm/cache.h>
...@@ -73,6 +74,7 @@ flush_branch_caches: ...@@ -73,6 +74,7 @@ flush_branch_caches:
// Flush the link stack // Flush the link stack
.rept 64 .rept 64
ANNOTATE_INTRA_FUNCTION_CALL
bl .+4 bl .+4
.endr .endr
b 1f b 1f
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
* *
*/ */
#include <linux/linkage.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/exception-64s.h> #include <asm/exception-64s.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -3140,7 +3141,7 @@ _GLOBAL(enable_machine_check) ...@@ -3140,7 +3141,7 @@ _GLOBAL(enable_machine_check)
blr blr
/* MSR[RI] should be clear because this uses SRR[01] */ /* MSR[RI] should be clear because this uses SRR[01] */
disable_machine_check: SYM_FUNC_START_LOCAL(disable_machine_check)
mflr r0 mflr r0
bcl 20,31,$+4 bcl 20,31,$+4
0: mflr r3 0: mflr r3
...@@ -3153,3 +3154,4 @@ disable_machine_check: ...@@ -3153,3 +3154,4 @@ disable_machine_check:
RFI_TO_KERNEL RFI_TO_KERNEL
1: mtlr r0 1: mtlr r0
blr blr
SYM_FUNC_END(disable_machine_check)
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/linkage.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -662,7 +664,7 @@ start_here: ...@@ -662,7 +664,7 @@ start_here:
* kernel initialization. This maps the first 32 MBytes of memory 1:1 * kernel initialization. This maps the first 32 MBytes of memory 1:1
* virtual to physical and more importantly sets the cache mode. * virtual to physical and more importantly sets the cache mode.
*/ */
initial_mmu: SYM_FUNC_START_LOCAL(initial_mmu)
tlbia /* Invalidate all TLB entries */ tlbia /* Invalidate all TLB entries */
isync isync
...@@ -711,6 +713,7 @@ initial_mmu: ...@@ -711,6 +713,7 @@ initial_mmu:
mtspr SPRN_EVPR,r0 mtspr SPRN_EVPR,r0
blr blr
SYM_FUNC_END(initial_mmu)
_GLOBAL(abort) _GLOBAL(abort)
mfspr r13,SPRN_DBCR0 mfspr r13,SPRN_DBCR0
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
* variants. * variants.
*/ */
#include <linux/linkage.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/reg.h> #include <asm/reg.h>
...@@ -462,7 +463,7 @@ generic_secondary_common_init: ...@@ -462,7 +463,7 @@ generic_secondary_common_init:
* Assumes we're mapped EA == RA if the MMU is on. * Assumes we're mapped EA == RA if the MMU is on.
*/ */
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
__mmu_off: SYM_FUNC_START_LOCAL(__mmu_off)
mfmsr r3 mfmsr r3
andi. r0,r3,MSR_IR|MSR_DR andi. r0,r3,MSR_IR|MSR_DR
beqlr beqlr
...@@ -473,6 +474,7 @@ __mmu_off: ...@@ -473,6 +474,7 @@ __mmu_off:
sync sync
rfid rfid
b . /* prevent speculative execution */ b . /* prevent speculative execution */
SYM_FUNC_END(__mmu_off)
#endif #endif
...@@ -869,7 +871,7 @@ _GLOBAL(start_secondary_resume) ...@@ -869,7 +871,7 @@ _GLOBAL(start_secondary_resume)
/* /*
* This subroutine clobbers r11 and r12 * This subroutine clobbers r11 and r12
*/ */
enable_64b_mode: SYM_FUNC_START_LOCAL(enable_64b_mode)
mfmsr r11 /* grab the current MSR */ mfmsr r11 /* grab the current MSR */
#ifdef CONFIG_PPC_BOOK3E_64 #ifdef CONFIG_PPC_BOOK3E_64
oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
...@@ -881,6 +883,7 @@ enable_64b_mode: ...@@ -881,6 +883,7 @@ enable_64b_mode:
isync isync
#endif #endif
blr blr
SYM_FUNC_END(enable_64b_mode)
/* /*
* This puts the TOC pointer into r2, offset by 0x8000 (as expected * This puts the TOC pointer into r2, offset by 0x8000 (as expected
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/linkage.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -885,7 +887,7 @@ KernelSPE: ...@@ -885,7 +887,7 @@ KernelSPE:
* Translate the effec addr in r3 to phys addr. The phys addr will be put * Translate the effec addr in r3 to phys addr. The phys addr will be put
* into r3(higher 32bit) and r4(lower 32bit) * into r3(higher 32bit) and r4(lower 32bit)
*/ */
get_phys_addr: SYM_FUNC_START_LOCAL(get_phys_addr)
mfmsr r8 mfmsr r8
mfspr r9,SPRN_PID mfspr r9,SPRN_PID
rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */ rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */
...@@ -907,6 +909,7 @@ get_phys_addr: ...@@ -907,6 +909,7 @@ get_phys_addr:
mfspr r3,SPRN_MAS7 mfspr r3,SPRN_MAS7
#endif #endif
blr blr
SYM_FUNC_END(get_phys_addr)
/* /*
* Global functions * Global functions
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/magic.h> #include <linux/magic.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/linkage.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -625,7 +627,7 @@ start_here: ...@@ -625,7 +627,7 @@ start_here:
* 24 Mbytes of data, and the 512k IMMR space. Anything not covered by * 24 Mbytes of data, and the 512k IMMR space. Anything not covered by
* these mappings is mapped by page tables. * these mappings is mapped by page tables.
*/ */
initial_mmu: SYM_FUNC_START_LOCAL(initial_mmu)
li r8, 0 li r8, 0
mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */ mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */
lis r10, MD_TWAM@h lis r10, MD_TWAM@h
...@@ -686,6 +688,7 @@ initial_mmu: ...@@ -686,6 +688,7 @@ initial_mmu:
#endif #endif
mtspr SPRN_DER, r8 mtspr SPRN_DER, r8
blr blr
SYM_FUNC_END(initial_mmu)
_GLOBAL(mmu_pin_tlb) _GLOBAL(mmu_pin_tlb)
lis r9, (1f - PAGE_OFFSET)@h lis r9, (1f - PAGE_OFFSET)@h
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/linkage.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -877,7 +879,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE) ...@@ -877,7 +879,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
* Load stuff into the MMU. Intended to be called with * Load stuff into the MMU. Intended to be called with
* IR=0 and DR=0. * IR=0 and DR=0.
*/ */
early_hash_table: SYM_FUNC_START_LOCAL(early_hash_table)
sync /* Force all PTE updates to finish */ sync /* Force all PTE updates to finish */
isync isync
tlbia /* Clear all TLB entries */ tlbia /* Clear all TLB entries */
...@@ -888,8 +890,9 @@ early_hash_table: ...@@ -888,8 +890,9 @@ early_hash_table:
ori r6, r6, 3 /* 256kB table */ ori r6, r6, 3 /* 256kB table */
mtspr SPRN_SDR1, r6 mtspr SPRN_SDR1, r6
blr blr
SYM_FUNC_END(early_hash_table)
load_up_mmu: SYM_FUNC_START_LOCAL(load_up_mmu)
sync /* Force all PTE updates to finish */ sync /* Force all PTE updates to finish */
isync isync
tlbia /* Clear all TLB entries */ tlbia /* Clear all TLB entries */
...@@ -918,6 +921,7 @@ BEGIN_MMU_FTR_SECTION ...@@ -918,6 +921,7 @@ BEGIN_MMU_FTR_SECTION
LOAD_BAT(7,r3,r4,r5) LOAD_BAT(7,r3,r4,r5)
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr blr
SYM_FUNC_END(load_up_mmu)
_GLOBAL(load_segment_registers) _GLOBAL(load_segment_registers)
li r0, NUM_USER_SEGMENTS /* load up user segment register values */ li r0, NUM_USER_SEGMENTS /* load up user segment register values */
...@@ -1028,7 +1032,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE) ...@@ -1028,7 +1032,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
* this makes sure it's done. * this makes sure it's done.
* -- Cort * -- Cort
*/ */
clear_bats: SYM_FUNC_START_LOCAL(clear_bats)
li r10,0 li r10,0
mtspr SPRN_DBAT0U,r10 mtspr SPRN_DBAT0U,r10
...@@ -1072,6 +1076,7 @@ BEGIN_MMU_FTR_SECTION ...@@ -1072,6 +1076,7 @@ BEGIN_MMU_FTR_SECTION
mtspr SPRN_IBAT7L,r10 mtspr SPRN_IBAT7L,r10
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr blr
SYM_FUNC_END(clear_bats)
_GLOBAL(update_bats) _GLOBAL(update_bats)
lis r4, 1f@h lis r4, 1f@h
...@@ -1108,15 +1113,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) ...@@ -1108,15 +1113,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
mtspr SPRN_SRR1, r6 mtspr SPRN_SRR1, r6
rfi rfi
flush_tlbs: SYM_FUNC_START_LOCAL(flush_tlbs)
lis r10, 0x40 lis r10, 0x40
1: addic. r10, r10, -0x1000 1: addic. r10, r10, -0x1000
tlbie r10 tlbie r10
bgt 1b bgt 1b
sync sync
blr blr
SYM_FUNC_END(flush_tlbs)
mmu_off: SYM_FUNC_START_LOCAL(mmu_off)
addi r4, r3, __after_mmu_off - _start addi r4, r3, __after_mmu_off - _start
mfmsr r3 mfmsr r3
andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
...@@ -1128,9 +1134,10 @@ mmu_off: ...@@ -1128,9 +1134,10 @@ mmu_off:
mtspr SPRN_SRR1,r3 mtspr SPRN_SRR1,r3
sync sync
rfi rfi
SYM_FUNC_END(mmu_off)
/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */ /* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
initial_bats: SYM_FUNC_START_LOCAL(initial_bats)
lis r11,PAGE_OFFSET@h lis r11,PAGE_OFFSET@h
tophys(r8,r11) tophys(r8,r11)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -1146,9 +1153,10 @@ initial_bats: ...@@ -1146,9 +1153,10 @@ initial_bats:
mtspr SPRN_IBAT0U,r11 mtspr SPRN_IBAT0U,r11
isync isync
blr blr
SYM_FUNC_END(initial_bats)
#ifdef CONFIG_BOOTX_TEXT #ifdef CONFIG_BOOTX_TEXT
setup_disp_bat: SYM_FUNC_START_LOCAL(setup_disp_bat)
/* /*
* setup the display bat prepared for us in prom.c * setup the display bat prepared for us in prom.c
*/ */
...@@ -1164,10 +1172,11 @@ setup_disp_bat: ...@@ -1164,10 +1172,11 @@ setup_disp_bat:
mtspr SPRN_DBAT3L,r8 mtspr SPRN_DBAT3L,r8
mtspr SPRN_DBAT3U,r11 mtspr SPRN_DBAT3U,r11
blr blr
SYM_FUNC_END(setup_disp_bat)
#endif /* CONFIG_BOOTX_TEXT */ #endif /* CONFIG_BOOTX_TEXT */
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
setup_cpm_bat: SYM_FUNC_START_LOCAL(setup_cpm_bat)
lis r8, 0xf000 lis r8, 0xf000
ori r8, r8, 0x002a ori r8, r8, 0x002a
mtspr SPRN_DBAT1L, r8 mtspr SPRN_DBAT1L, r8
...@@ -1177,10 +1186,11 @@ setup_cpm_bat: ...@@ -1177,10 +1186,11 @@ setup_cpm_bat:
mtspr SPRN_DBAT1U, r11 mtspr SPRN_DBAT1U, r11
blr blr
SYM_FUNC_END(setup_cpm_bat)
#endif #endif
#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
setup_usbgecko_bat: SYM_FUNC_START_LOCAL(setup_usbgecko_bat)
/* prepare a BAT for early io */ /* prepare a BAT for early io */
#if defined(CONFIG_GAMECUBE) #if defined(CONFIG_GAMECUBE)
lis r8, 0x0c00 lis r8, 0x0c00
...@@ -1199,6 +1209,7 @@ setup_usbgecko_bat: ...@@ -1199,6 +1209,7 @@ setup_usbgecko_bat:
mtspr SPRN_DBAT1L, r8 mtspr SPRN_DBAT1L, r8
mtspr SPRN_DBAT1U, r11 mtspr SPRN_DBAT1U, r11
blr blr
SYM_FUNC_END(setup_usbgecko_bat)
#endif #endif
.data .data
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
*/ */
#include <linux/linkage.h>
#include <linux/sys.h> #include <linux/sys.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/errno.h> #include <asm/errno.h>
...@@ -353,7 +354,7 @@ _GLOBAL(kexec_smp_wait) ...@@ -353,7 +354,7 @@ _GLOBAL(kexec_smp_wait)
* *
* don't overwrite r3 here, it is live for kexec_wait above. * don't overwrite r3 here, it is live for kexec_wait above.
*/ */
real_mode: /* assume normal blr return */ SYM_FUNC_START_LOCAL(real_mode) /* assume normal blr return */
#ifdef CONFIG_PPC_BOOK3E_64 #ifdef CONFIG_PPC_BOOK3E_64
/* Create an identity mapping. */ /* Create an identity mapping. */
b kexec_create_tlb b kexec_create_tlb
...@@ -370,6 +371,7 @@ real_mode: /* assume normal blr return */ ...@@ -370,6 +371,7 @@ real_mode: /* assume normal blr return */
mtspr SPRN_SRR0,r11 mtspr SPRN_SRR0,r11
rfid rfid
#endif #endif
SYM_FUNC_END(real_mode)
/* /*
* kexec_sequence(newstack, start, image, control, clear_all(), * kexec_sequence(newstack, start, image, control, clear_all(),
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/linkage.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cputable.h> #include <asm/cputable.h>
...@@ -400,7 +402,7 @@ _ASM_NOKPROBE_SYMBOL(swsusp_arch_resume) ...@@ -400,7 +402,7 @@ _ASM_NOKPROBE_SYMBOL(swsusp_arch_resume)
/* FIXME:This construct is actually not useful since we don't shut /* FIXME:This construct is actually not useful since we don't shut
* down the instruction MMU, we could just flip back MSR-DR on. * down the instruction MMU, we could just flip back MSR-DR on.
*/ */
turn_on_mmu: SYM_FUNC_START_LOCAL(turn_on_mmu)
mflr r4 mflr r4
mtsrr0 r4 mtsrr0 r4
mtsrr1 r3 mtsrr1 r3
...@@ -408,4 +410,5 @@ turn_on_mmu: ...@@ -408,4 +410,5 @@ turn_on_mmu:
isync isync
rfi rfi
_ASM_NOKPROBE_SYMBOL(turn_on_mmu) _ASM_NOKPROBE_SYMBOL(turn_on_mmu)
SYM_FUNC_END(turn_on_mmu)
...@@ -102,3 +102,5 @@ quiet_cmd_vdso64ld_and_check = VDSO64L $@ ...@@ -102,3 +102,5 @@ quiet_cmd_vdso64ld_and_check = VDSO64L $@
cmd_vdso64ld_and_check = $(VDSOCC) $(c_flags) $(CC64FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) -z noexecstack ; $(cmd_vdso_check) cmd_vdso64ld_and_check = $(VDSOCC) $(c_flags) $(CC64FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) -z noexecstack ; $(cmd_vdso_check)
quiet_cmd_vdso64as = VDSO64A $@ quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(VDSOCC) $(a_flags) $(CC64FLAGS) $(AS64FLAGS) -c -o $@ $< cmd_vdso64as = $(VDSOCC) $(a_flags) $(CC64FLAGS) $(AS64FLAGS) -c -o $@ $<
OBJECT_FILES_NON_STANDARD := y
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/reg.h> #include <asm/reg.h>
...@@ -185,7 +186,7 @@ fphalf: ...@@ -185,7 +186,7 @@ fphalf:
* Internal routine to enable floating point and set FPSCR to 0. * Internal routine to enable floating point and set FPSCR to 0.
* Don't call it from C; it doesn't use the normal calling convention. * Don't call it from C; it doesn't use the normal calling convention.
*/ */
fpenable: SYM_FUNC_START_LOCAL(fpenable)
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
stwu r1,-64(r1) stwu r1,-64(r1)
#else #else
...@@ -202,6 +203,7 @@ fpenable: ...@@ -202,6 +203,7 @@ fpenable:
mffs fr31 mffs fr31
MTFSF_L(fr1) MTFSF_L(fr1)
blr blr
SYM_FUNC_END(fpenable)
fpdisable: fpdisable:
mtlr r12 mtlr r12
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* Authors: Alexander Graf <agraf@suse.de> * Authors: Alexander Graf <agraf@suse.de>
*/ */
#include <linux/linkage.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/reg.h> #include <asm/reg.h>
...@@ -107,7 +108,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ...@@ -107,7 +108,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* /*
* void kvmhv_save_host_pmu(void) * void kvmhv_save_host_pmu(void)
*/ */
kvmhv_save_host_pmu: SYM_FUNC_START_LOCAL(kvmhv_save_host_pmu)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* Work around P8 PMAE bug */ /* Work around P8 PMAE bug */
li r3, -1 li r3, -1
...@@ -154,3 +155,4 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -154,3 +155,4 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
stw r8, HSTATE_PMC5(r13) stw r8, HSTATE_PMC5(r13)
stw r9, HSTATE_PMC6(r13) stw r9, HSTATE_PMC6(r13)
31: blr 31: blr
SYM_FUNC_END(kvmhv_save_host_pmu)
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
* Authors: Alexander Graf <agraf@suse.de> * Authors: Alexander Graf <agraf@suse.de>
*/ */
#include <linux/linkage.h>
#include <linux/objtool.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/code-patching-asm.h> #include <asm/code-patching-asm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
...@@ -1522,12 +1524,14 @@ kvm_flush_link_stack: ...@@ -1522,12 +1524,14 @@ kvm_flush_link_stack:
/* Flush the link stack. On Power8 it's up to 32 entries in size. */ /* Flush the link stack. On Power8 it's up to 32 entries in size. */
.rept 32 .rept 32
ANNOTATE_INTRA_FUNCTION_CALL
bl .+4 bl .+4
.endr .endr
/* And on Power9 it's up to 64. */ /* And on Power9 it's up to 64. */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
.rept 32 .rept 32
ANNOTATE_INTRA_FUNCTION_CALL
bl .+4 bl .+4
.endr .endr
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
...@@ -2358,7 +2362,7 @@ hmi_realmode: ...@@ -2358,7 +2362,7 @@ hmi_realmode:
* This routine calls kvmppc_read_intr, a C function, if an external * This routine calls kvmppc_read_intr, a C function, if an external
* interrupt is pending. * interrupt is pending.
*/ */
kvmppc_check_wake_reason: SYM_FUNC_START_LOCAL(kvmppc_check_wake_reason)
mfspr r6, SPRN_SRR1 mfspr r6, SPRN_SRR1
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
...@@ -2427,6 +2431,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2427,6 +2431,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
addi r1, r1, PPC_MIN_STKFRM addi r1, r1, PPC_MIN_STKFRM
mtlr r0 mtlr r0
blr blr
SYM_FUNC_END(kvmppc_check_wake_reason)
/* /*
* Save away FP, VMX and VSX registers. * Save away FP, VMX and VSX registers.
...@@ -2434,7 +2439,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2434,7 +2439,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* N.B. r30 and r31 are volatile across this function, * N.B. r30 and r31 are volatile across this function,
* thus it is not callable from C. * thus it is not callable from C.
*/ */
kvmppc_save_fp: SYM_FUNC_START_LOCAL(kvmppc_save_fp)
mflr r30 mflr r30
mr r31,r3 mr r31,r3
mfmsr r5 mfmsr r5
...@@ -2462,6 +2467,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -2462,6 +2467,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
stw r6,VCPU_VRSAVE(r31) stw r6,VCPU_VRSAVE(r31)
mtlr r30 mtlr r30
blr blr
SYM_FUNC_END(kvmppc_save_fp)
/* /*
* Load up FP, VMX and VSX registers * Load up FP, VMX and VSX registers
...@@ -2469,7 +2475,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -2469,7 +2475,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
* N.B. r30 and r31 are volatile across this function, * N.B. r30 and r31 are volatile across this function,
* thus it is not callable from C. * thus it is not callable from C.
*/ */
kvmppc_load_fp: SYM_FUNC_START_LOCAL(kvmppc_load_fp)
mflr r30 mflr r30
mr r31,r4 mr r31,r4
mfmsr r9 mfmsr r9
...@@ -2498,6 +2504,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -2498,6 +2504,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mtlr r30 mtlr r30
mr r4,r31 mr r4,r31
blr blr
SYM_FUNC_END(kvmppc_load_fp)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* /*
...@@ -2746,7 +2753,7 @@ kvmppc_bad_host_intr: ...@@ -2746,7 +2753,7 @@ kvmppc_bad_host_intr:
* r9 has a vcpu pointer (in) * r9 has a vcpu pointer (in)
* r0 is used as a scratch register * r0 is used as a scratch register
*/ */
kvmppc_msr_interrupt: SYM_FUNC_START_LOCAL(kvmppc_msr_interrupt)
rldicl r0, r11, 64 - MSR_TS_S_LG, 62 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
cmpwi r0, 2 /* Check if we are in transactional state.. */ cmpwi r0, 2 /* Check if we are in transactional state.. */
ld r11, VCPU_INTR_MSR(r9) ld r11, VCPU_INTR_MSR(r9)
...@@ -2755,13 +2762,14 @@ kvmppc_msr_interrupt: ...@@ -2755,13 +2762,14 @@ kvmppc_msr_interrupt:
li r0, 1 li r0, 1
1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
blr blr
SYM_FUNC_END(kvmppc_msr_interrupt)
/* /*
* void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu) * void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu)
* *
* Load up guest PMU state. R3 points to the vcpu struct. * Load up guest PMU state. R3 points to the vcpu struct.
*/ */
kvmhv_load_guest_pmu: SYM_FUNC_START_LOCAL(kvmhv_load_guest_pmu)
mr r4, r3 mr r4, r3
mflr r0 mflr r0
li r3, 1 li r3, 1
...@@ -2811,13 +2819,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2811,13 +2819,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
isync isync
mtlr r0 mtlr r0
blr blr
SYM_FUNC_END(kvmhv_load_guest_pmu)
/* /*
* void kvmhv_load_host_pmu(void) * void kvmhv_load_host_pmu(void)
* *
* Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
*/ */
kvmhv_load_host_pmu: SYM_FUNC_START_LOCAL(kvmhv_load_host_pmu)
mflr r0 mflr r0
lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
cmpwi r4, 0 cmpwi r4, 0
...@@ -2859,6 +2868,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2859,6 +2868,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
isync isync
mtlr r0 mtlr r0
23: blr 23: blr
SYM_FUNC_END(kvmhv_load_host_pmu)
/* /*
* void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use) * void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use)
...@@ -2866,7 +2876,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2866,7 +2876,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* Save guest PMU state into the vcpu struct. * Save guest PMU state into the vcpu struct.
* r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
*/ */
kvmhv_save_guest_pmu: SYM_FUNC_START_LOCAL(kvmhv_save_guest_pmu)
mr r9, r3 mr r9, r3
mr r8, r4 mr r8, r4
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
...@@ -2942,6 +2952,7 @@ BEGIN_FTR_SECTION ...@@ -2942,6 +2952,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_MMCRS, r4 mtspr SPRN_MMCRS, r4
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
22: blr 22: blr
SYM_FUNC_END(kvmhv_save_guest_pmu)
/* /*
* This works around a hardware bug on POWER8E processors, where * This works around a hardware bug on POWER8E processors, where
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
*/ */
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/linkage.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -110,18 +112,22 @@ FPS_THREE_IN(fsel) ...@@ -110,18 +112,22 @@ FPS_THREE_IN(fsel)
* R8 = (double*)&param3 [load_three] * R8 = (double*)&param3 [load_three]
* LR = instruction call function * LR = instruction call function
*/ */
fpd_load_three: SYM_FUNC_START_LOCAL(fpd_load_three)
lfd 2,0(r8) /* load param3 */ lfd 2,0(r8) /* load param3 */
fpd_load_two: SYM_FUNC_START_LOCAL(fpd_load_two)
lfd 1,0(r7) /* load param2 */ lfd 1,0(r7) /* load param2 */
fpd_load_one: SYM_FUNC_START_LOCAL(fpd_load_one)
lfd 0,0(r6) /* load param1 */ lfd 0,0(r6) /* load param1 */
fpd_load_none: SYM_FUNC_START_LOCAL(fpd_load_none)
lfd 3,0(r3) /* load up fpscr value */ lfd 3,0(r3) /* load up fpscr value */
MTFSF_L(3) MTFSF_L(3)
lwz r6, 0(r4) /* load cr */ lwz r6, 0(r4) /* load cr */
mtcr r6 mtcr r6
blr blr
SYM_FUNC_END(fpd_load_none)
SYM_FUNC_END(fpd_load_one)
SYM_FUNC_END(fpd_load_two)
SYM_FUNC_END(fpd_load_three)
/* /*
* End of double instruction processing * End of double instruction processing
...@@ -131,13 +137,14 @@ fpd_load_none: ...@@ -131,13 +137,14 @@ fpd_load_none:
* R5 = (double*)&result * R5 = (double*)&result
* LR = caller of instruction call function * LR = caller of instruction call function
*/ */
fpd_return: SYM_FUNC_START_LOCAL(fpd_return)
mfcr r6 mfcr r6
stfd 0,0(r5) /* save result */ stfd 0,0(r5) /* save result */
mffs 0 mffs 0
stfd 0,0(r3) /* save new fpscr value */ stfd 0,0(r3) /* save new fpscr value */
stw r6,0(r4) /* save new cr value */ stw r6,0(r4) /* save new cr value */
blr blr
SYM_FUNC_END(fpd_return)
/* /*
* Double operation with no input operand * Double operation with no input operand
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -178,7 +180,8 @@ sram_code: ...@@ -178,7 +180,8 @@ sram_code:
/* local udelay in sram is needed */ /* local udelay in sram is needed */
udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ SYM_FUNC_START_LOCAL(udelay)
/* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */
mullw r12, r12, r11 mullw r12, r12, r11
mftb r13 /* start */ mftb r13 /* start */
add r12, r13, r12 /* end */ add r12, r13, r12 /* end */
...@@ -187,6 +190,7 @@ sram_code: ...@@ -187,6 +190,7 @@ sram_code:
cmp cr0, r13, r12 cmp cr0, r13, r12
blt 1b blt 1b
blr blr
SYM_FUNC_END(udelay)
sram_code_end: sram_code_end:
...@@ -271,7 +275,7 @@ _ASM_NOKPROBE_SYMBOL(lite5200_wakeup) ...@@ -271,7 +275,7 @@ _ASM_NOKPROBE_SYMBOL(lite5200_wakeup)
SAVE_SR(n+2, addr+2); \ SAVE_SR(n+2, addr+2); \
SAVE_SR(n+3, addr+3); SAVE_SR(n+3, addr+3);
save_regs: SYM_FUNC_START_LOCAL(save_regs)
stw r0, 0(r4) stw r0, 0(r4)
stw r1, 0x4(r4) stw r1, 0x4(r4)
stw r2, 0x8(r4) stw r2, 0x8(r4)
...@@ -317,6 +321,7 @@ save_regs: ...@@ -317,6 +321,7 @@ save_regs:
SAVE_SPRN(TBRU, 0x5b) SAVE_SPRN(TBRU, 0x5b)
blr blr
SYM_FUNC_END(save_regs)
/* restore registers */ /* restore registers */
...@@ -336,7 +341,7 @@ save_regs: ...@@ -336,7 +341,7 @@ save_regs:
LOAD_SR(n+2, addr+2); \ LOAD_SR(n+2, addr+2); \
LOAD_SR(n+3, addr+3); LOAD_SR(n+3, addr+3);
restore_regs: SYM_FUNC_START_LOCAL(restore_regs)
lis r4, registers@h lis r4, registers@h
ori r4, r4, registers@l ori r4, r4, registers@l
...@@ -393,6 +398,7 @@ restore_regs: ...@@ -393,6 +398,7 @@ restore_regs:
blr blr
_ASM_NOKPROBE_SYMBOL(restore_regs) _ASM_NOKPROBE_SYMBOL(restore_regs)
SYM_FUNC_END(restore_regs)
...@@ -403,7 +409,7 @@ _ASM_NOKPROBE_SYMBOL(restore_regs) ...@@ -403,7 +409,7 @@ _ASM_NOKPROBE_SYMBOL(restore_regs)
* Flush data cache * Flush data cache
* Do this by just reading lots of stuff into the cache. * Do this by just reading lots of stuff into the cache.
*/ */
flush_data_cache: SYM_FUNC_START_LOCAL(flush_data_cache)
lis r3,CONFIG_KERNEL_START@h lis r3,CONFIG_KERNEL_START@h
ori r3,r3,CONFIG_KERNEL_START@l ori r3,r3,CONFIG_KERNEL_START@l
li r4,NUM_CACHE_LINES li r4,NUM_CACHE_LINES
...@@ -413,3 +419,4 @@ flush_data_cache: ...@@ -413,3 +419,4 @@ flush_data_cache:
addi r3,r3,L1_CACHE_BYTES /* Next line, please */ addi r3,r3,L1_CACHE_BYTES /* Next line, please */
bdnz 1b bdnz 1b
blr blr
SYM_FUNC_END(flush_data_cache)
...@@ -195,6 +195,7 @@ config X86 ...@@ -195,6 +195,7 @@ config X86
select HAVE_CONTEXT_TRACKING_USER_OFFSTACK if HAVE_CONTEXT_TRACKING_USER select HAVE_CONTEXT_TRACKING_USER_OFFSTACK if HAVE_CONTEXT_TRACKING_USER
select HAVE_C_RECORDMCOUNT select HAVE_C_RECORDMCOUNT
select HAVE_OBJTOOL_MCOUNT if HAVE_OBJTOOL select HAVE_OBJTOOL_MCOUNT if HAVE_OBJTOOL
select HAVE_OBJTOOL_NOP_MCOUNT if HAVE_OBJTOOL_MCOUNT
select HAVE_BUILDTIME_MCOUNT_SORT select HAVE_BUILDTIME_MCOUNT_SORT
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
......
...@@ -19,3 +19,5 @@ targets += aesp8-ppc.S ghashp8-ppc.S ...@@ -19,3 +19,5 @@ targets += aesp8-ppc.S ghashp8-ppc.S
$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
$(call if_changed,perl) $(call if_changed,perl)
OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
...@@ -82,6 +82,13 @@ config HAVE_OBJTOOL_MCOUNT ...@@ -82,6 +82,13 @@ config HAVE_OBJTOOL_MCOUNT
help help
Arch supports objtool --mcount Arch supports objtool --mcount
config HAVE_OBJTOOL_NOP_MCOUNT
bool
help
Arch supports the objtool options --mcount with --mnop.
An architecture can select this if it wants to enable nop'ing
of ftrace locations.
config HAVE_C_RECORDMCOUNT config HAVE_C_RECORDMCOUNT
bool bool
help help
......
...@@ -256,6 +256,9 @@ objtool-args-$(CONFIG_HAVE_JUMP_LABEL_HACK) += --hacks=jump_label ...@@ -256,6 +256,9 @@ objtool-args-$(CONFIG_HAVE_JUMP_LABEL_HACK) += --hacks=jump_label
objtool-args-$(CONFIG_HAVE_NOINSTR_HACK) += --hacks=noinstr objtool-args-$(CONFIG_HAVE_NOINSTR_HACK) += --hacks=noinstr
objtool-args-$(CONFIG_X86_KERNEL_IBT) += --ibt objtool-args-$(CONFIG_X86_KERNEL_IBT) += --ibt
objtool-args-$(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL) += --mcount objtool-args-$(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL) += --mcount
ifdef CONFIG_FTRACE_MCOUNT_USE_OBJTOOL
objtool-args-$(CONFIG_HAVE_OBJTOOL_NOP_MCOUNT) += --mnop
endif
objtool-args-$(CONFIG_UNWINDER_ORC) += --orc objtool-args-$(CONFIG_UNWINDER_ORC) += --orc
objtool-args-$(CONFIG_RETPOLINE) += --retpoline objtool-args-$(CONFIG_RETPOLINE) += --retpoline
objtool-args-$(CONFIG_RETHUNK) += --rethunk objtool-args-$(CONFIG_RETHUNK) += --rethunk
......
objtool-y += decode.o
objtool-y += special.o
// SPDX-License-Identifier: GPL-2.0-or-later
#include <stdio.h>
#include <stdlib.h>
#include <objtool/check.h>
#include <objtool/elf.h>
#include <objtool/arch.h>
#include <objtool/warn.h>
#include <objtool/builtin.h>
#include <objtool/endianness.h>
int arch_ftrace_match(char *name)
{
return !strcmp(name, "_mcount");
}
unsigned long arch_dest_reloc_offset(int addend)
{
return addend;
}
bool arch_callee_saved_reg(unsigned char reg)
{
return false;
}
int arch_decode_hint_reg(u8 sp_reg, int *base)
{
exit(-1);
}
const char *arch_nop_insn(int len)
{
exit(-1);
}
const char *arch_ret_insn(int len)
{
exit(-1);
}
int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
unsigned long offset, unsigned int maxlen,
unsigned int *len, enum insn_type *type,
unsigned long *immediate,
struct list_head *ops_list)
{
unsigned int opcode;
enum insn_type typ;
unsigned long imm;
u32 insn;
insn = bswap_if_needed(file->elf, *(u32 *)(sec->data->d_buf + offset));
opcode = insn >> 26;
typ = INSN_OTHER;
imm = 0;
switch (opcode) {
case 18: /* b[l][a] */
if ((insn & 3) == 1) /* bl */
typ = INSN_CALL;
imm = insn & 0x3fffffc;
if (imm & 0x2000000)
imm -= 0x4000000;
break;
}
if (opcode == 1)
*len = 8;
else
*len = 4;
*type = typ;
*immediate = imm;
return 0;
}
unsigned long arch_jump_destination(struct instruction *insn)
{
return insn->offset + insn->immediate;
}
bool arch_pc_relative_reloc(struct reloc *reloc)
{
/*
* The powerpc build only allows certain relocation types, see
* relocs_check.sh, and none of those accepted are PC relative.
*/
return false;
}
void arch_initial_func_cfi_state(struct cfi_init_state *state)
{
int i;
for (i = 0; i < CFI_NUM_REGS; i++) {
state->regs[i].base = CFI_UNDEFINED;
state->regs[i].offset = 0;
}
/* initial CFA (call frame address) */
state->cfa.base = CFI_SP;
state->cfa.offset = 0;
/* initial LR (return address) */
state->regs[CFI_RA].base = CFI_CFA;
state->regs[CFI_RA].offset = 0;
}
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _OBJTOOL_CFI_REGS_H
#define _OBJTOOL_CFI_REGS_H
#define CFI_BP 1
#define CFI_SP CFI_BP
#define CFI_RA 32
#define CFI_NUM_REGS 33
#endif
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _OBJTOOL_ARCH_ELF
#define _OBJTOOL_ARCH_ELF
#define R_NONE R_PPC_NONE
#define R_ABS64 R_PPC64_ADDR64
#define R_ABS32 R_PPC_ADDR32
#endif /* _OBJTOOL_ARCH_ELF */
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _PPC_ARCH_SPECIAL_H
#define _PPC_ARCH_SPECIAL_H
#define EX_ENTRY_SIZE 8
#define EX_ORIG_OFFSET 0
#define EX_NEW_OFFSET 4
#define JUMP_ENTRY_SIZE 16
#define JUMP_ORIG_OFFSET 0
#define JUMP_NEW_OFFSET 4
#define JUMP_KEY_OFFSET 8
#define ALT_ENTRY_SIZE 12
#define ALT_ORIG_OFFSET 0
#define ALT_NEW_OFFSET 4
#define ALT_FEATURE_OFFSET 8
#define ALT_ORIG_LEN_OFFSET 10
#define ALT_NEW_LEN_OFFSET 11
#endif /* _PPC_ARCH_SPECIAL_H */
// SPDX-License-Identifier: GPL-2.0-or-later
#include <string.h>
#include <stdlib.h>
#include <objtool/special.h>
#include <objtool/builtin.h>
bool arch_support_alt_relocation(struct special_alt *special_alt,
struct instruction *insn,
struct reloc *reloc)
{
exit(-1);
}
struct reloc *arch_find_switch_table(struct objtool_file *file,
struct instruction *insn)
{
exit(-1);
}
...@@ -23,6 +23,11 @@ ...@@ -23,6 +23,11 @@
#include <objtool/builtin.h> #include <objtool/builtin.h>
#include <arch/elf.h> #include <arch/elf.h>
int arch_ftrace_match(char *name)
{
return !strcmp(name, "__fentry__");
}
static int is_x86_64(const struct elf *elf) static int is_x86_64(const struct elf *elf)
{ {
switch (elf->ehdr.e_machine) { switch (elf->ehdr.e_machine) {
......
...@@ -2,5 +2,7 @@ ...@@ -2,5 +2,7 @@
#define _OBJTOOL_ARCH_ELF #define _OBJTOOL_ARCH_ELF
#define R_NONE R_X86_64_NONE #define R_NONE R_X86_64_NONE
#define R_ABS64 R_X86_64_64
#define R_ABS32 R_X86_64_32
#endif /* _OBJTOOL_ARCH_ELF */ #endif /* _OBJTOOL_ARCH_ELF */
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ARCH_ENDIANNESS_H
#define _ARCH_ENDIANNESS_H
#include <endian.h>
#define __TARGET_BYTE_ORDER __LITTLE_ENDIAN
#endif /* _ARCH_ENDIANNESS_H */
...@@ -82,6 +82,7 @@ const struct option check_options[] = { ...@@ -82,6 +82,7 @@ const struct option check_options[] = {
OPT_BOOLEAN(0, "dry-run", &opts.dryrun, "don't write modifications"), OPT_BOOLEAN(0, "dry-run", &opts.dryrun, "don't write modifications"),
OPT_BOOLEAN(0, "link", &opts.link, "object is a linked object"), OPT_BOOLEAN(0, "link", &opts.link, "object is a linked object"),
OPT_BOOLEAN(0, "module", &opts.module, "object is part of a kernel module"), OPT_BOOLEAN(0, "module", &opts.module, "object is part of a kernel module"),
OPT_BOOLEAN(0, "mnop", &opts.mnop, "nop out mcount call sites"),
OPT_BOOLEAN(0, "no-unreachable", &opts.no_unreachable, "skip 'unreachable instruction' warnings"), OPT_BOOLEAN(0, "no-unreachable", &opts.no_unreachable, "skip 'unreachable instruction' warnings"),
OPT_BOOLEAN(0, "sec-address", &opts.sec_address, "print section addresses in warnings"), OPT_BOOLEAN(0, "sec-address", &opts.sec_address, "print section addresses in warnings"),
OPT_BOOLEAN(0, "stats", &opts.stats, "print statistics"), OPT_BOOLEAN(0, "stats", &opts.stats, "print statistics"),
...@@ -150,6 +151,16 @@ static bool opts_valid(void) ...@@ -150,6 +151,16 @@ static bool opts_valid(void)
return false; return false;
} }
static bool mnop_opts_valid(void)
{
if (opts.mnop && !opts.mcount) {
ERROR("--mnop requires --mcount");
return false;
}
return true;
}
static bool link_opts_valid(struct objtool_file *file) static bool link_opts_valid(struct objtool_file *file)
{ {
if (opts.link) if (opts.link)
...@@ -198,6 +209,9 @@ int objtool_run(int argc, const char **argv) ...@@ -198,6 +209,9 @@ int objtool_run(int argc, const char **argv)
if (!file) if (!file)
return 1; return 1;
if (!mnop_opts_valid())
return 1;
if (!link_opts_valid(file)) if (!link_opts_valid(file))
return 1; return 1;
......
...@@ -207,7 +207,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, ...@@ -207,7 +207,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
return false; return false;
insn = find_insn(file, func->sec, func->offset); insn = find_insn(file, func->sec, func->offset);
if (!insn->func) if (!insn || !insn->func)
return false; return false;
func_for_each_insn(file, func, insn) { func_for_each_insn(file, func, insn) {
...@@ -852,9 +852,9 @@ static int create_ibt_endbr_seal_sections(struct objtool_file *file) ...@@ -852,9 +852,9 @@ static int create_ibt_endbr_seal_sections(struct objtool_file *file)
static int create_mcount_loc_sections(struct objtool_file *file) static int create_mcount_loc_sections(struct objtool_file *file)
{ {
struct section *sec; int addrsize = elf_class_addrsize(file->elf);
unsigned long *loc;
struct instruction *insn; struct instruction *insn;
struct section *sec;
int idx; int idx;
sec = find_section_by_name(file->elf, "__mcount_loc"); sec = find_section_by_name(file->elf, "__mcount_loc");
...@@ -871,23 +871,25 @@ static int create_mcount_loc_sections(struct objtool_file *file) ...@@ -871,23 +871,25 @@ static int create_mcount_loc_sections(struct objtool_file *file)
list_for_each_entry(insn, &file->mcount_loc_list, call_node) list_for_each_entry(insn, &file->mcount_loc_list, call_node)
idx++; idx++;
sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx); sec = elf_create_section(file->elf, "__mcount_loc", 0, addrsize, idx);
if (!sec) if (!sec)
return -1; return -1;
sec->sh.sh_addralign = addrsize;
idx = 0; idx = 0;
list_for_each_entry(insn, &file->mcount_loc_list, call_node) { list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
void *loc;
loc = (unsigned long *)sec->data->d_buf + idx; loc = sec->data->d_buf + idx;
memset(loc, 0, sizeof(unsigned long)); memset(loc, 0, addrsize);
if (elf_add_reloc_to_insn(file->elf, sec, if (elf_add_reloc_to_insn(file->elf, sec, idx,
idx * sizeof(unsigned long), addrsize == sizeof(u64) ? R_ABS64 : R_ABS32,
R_X86_64_64,
insn->sec, insn->offset)) insn->sec, insn->offset))
return -1; return -1;
idx++; idx += addrsize;
} }
return 0; return 0;
...@@ -1254,17 +1256,18 @@ static void annotate_call_site(struct objtool_file *file, ...@@ -1254,17 +1256,18 @@ static void annotate_call_site(struct objtool_file *file,
if (opts.mcount && sym->fentry) { if (opts.mcount && sym->fentry) {
if (sibling) if (sibling)
WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset); WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset);
if (opts.mnop) {
if (reloc) {
reloc->type = R_NONE;
elf_write_reloc(file->elf, reloc);
}
if (reloc) { elf_write_insn(file->elf, insn->sec,
reloc->type = R_NONE; insn->offset, insn->len,
elf_write_reloc(file->elf, reloc); arch_nop_insn(insn->len));
}
elf_write_insn(file->elf, insn->sec,
insn->offset, insn->len,
arch_nop_insn(insn->len));
insn->type = INSN_NOP; insn->type = INSN_NOP;
}
list_add_tail(&insn->call_node, &file->mcount_loc_list); list_add_tail(&insn->call_node, &file->mcount_loc_list);
return; return;
...@@ -2100,7 +2103,7 @@ static int read_unwind_hints(struct objtool_file *file) ...@@ -2100,7 +2103,7 @@ static int read_unwind_hints(struct objtool_file *file)
return -1; return -1;
} }
cfi.cfa.offset = bswap_if_needed(hint->sp_offset); cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
cfi.type = hint->type; cfi.type = hint->type;
cfi.end = hint->end; cfi.end = hint->end;
...@@ -2313,7 +2316,7 @@ static int classify_symbols(struct objtool_file *file) ...@@ -2313,7 +2316,7 @@ static int classify_symbols(struct objtool_file *file)
if (arch_is_rethunk(func)) if (arch_is_rethunk(func))
func->return_thunk = true; func->return_thunk = true;
if (!strcmp(func->name, "__fentry__")) if (arch_ftrace_match(func->name))
func->fentry = true; func->fentry = true;
if (is_profiling_func(func->name)) if (is_profiling_func(func->name))
...@@ -2389,9 +2392,11 @@ static int decode_sections(struct objtool_file *file) ...@@ -2389,9 +2392,11 @@ static int decode_sections(struct objtool_file *file)
* Must be before add_jump_destinations(), which depends on 'func' * Must be before add_jump_destinations(), which depends on 'func'
* being set for alternatives, to enable proper sibling call detection. * being set for alternatives, to enable proper sibling call detection.
*/ */
ret = add_special_section_alts(file); if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) {
if (ret) ret = add_special_section_alts(file);
return ret; if (ret)
return ret;
}
ret = add_jump_destinations(file); ret = add_jump_destinations(file);
if (ret) if (ret)
......
...@@ -1129,6 +1129,7 @@ static struct section *elf_create_rela_reloc_section(struct elf *elf, struct sec ...@@ -1129,6 +1129,7 @@ static struct section *elf_create_rela_reloc_section(struct elf *elf, struct sec
{ {
char *relocname; char *relocname;
struct section *sec; struct section *sec;
int addrsize = elf_class_addrsize(elf);
relocname = malloc(strlen(base->name) + strlen(".rela") + 1); relocname = malloc(strlen(base->name) + strlen(".rela") + 1);
if (!relocname) { if (!relocname) {
...@@ -1138,7 +1139,10 @@ static struct section *elf_create_rela_reloc_section(struct elf *elf, struct sec ...@@ -1138,7 +1139,10 @@ static struct section *elf_create_rela_reloc_section(struct elf *elf, struct sec
strcpy(relocname, ".rela"); strcpy(relocname, ".rela");
strcat(relocname, base->name); strcat(relocname, base->name);
sec = elf_create_section(elf, relocname, 0, sizeof(GElf_Rela), 0); if (addrsize == sizeof(u32))
sec = elf_create_section(elf, relocname, 0, sizeof(Elf32_Rela), 0);
else
sec = elf_create_section(elf, relocname, 0, sizeof(GElf_Rela), 0);
free(relocname); free(relocname);
if (!sec) if (!sec)
return NULL; return NULL;
...@@ -1147,7 +1151,7 @@ static struct section *elf_create_rela_reloc_section(struct elf *elf, struct sec ...@@ -1147,7 +1151,7 @@ static struct section *elf_create_rela_reloc_section(struct elf *elf, struct sec
sec->base = base; sec->base = base;
sec->sh.sh_type = SHT_RELA; sec->sh.sh_type = SHT_RELA;
sec->sh.sh_addralign = 8; sec->sh.sh_addralign = addrsize;
sec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx; sec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
sec->sh.sh_info = base->idx; sec->sh.sh_info = base->idx;
sec->sh.sh_flags = SHF_INFO_LINK; sec->sh.sh_flags = SHF_INFO_LINK;
......
...@@ -69,6 +69,8 @@ struct stack_op { ...@@ -69,6 +69,8 @@ struct stack_op {
struct instruction; struct instruction;
int arch_ftrace_match(char *name);
void arch_initial_func_cfi_state(struct cfi_init_state *state); void arch_initial_func_cfi_state(struct cfi_init_state *state);
int arch_decode_instruction(struct objtool_file *file, const struct section *sec, int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
...@@ -93,4 +95,6 @@ bool arch_is_rethunk(struct symbol *sym); ...@@ -93,4 +95,6 @@ bool arch_is_rethunk(struct symbol *sym);
int arch_rewrite_retpolines(struct objtool_file *file); int arch_rewrite_retpolines(struct objtool_file *file);
bool arch_pc_relative_reloc(struct reloc *reloc);
#endif /* _ARCH_H */ #endif /* _ARCH_H */
...@@ -31,6 +31,7 @@ struct opts { ...@@ -31,6 +31,7 @@ struct opts {
bool backup; bool backup;
bool dryrun; bool dryrun;
bool link; bool link;
bool mnop;
bool module; bool module;
bool no_unreachable; bool no_unreachable;
bool sec_address; bool sec_address;
......
...@@ -142,6 +142,14 @@ static inline bool has_multiple_files(struct elf *elf) ...@@ -142,6 +142,14 @@ static inline bool has_multiple_files(struct elf *elf)
return elf->num_files > 1; return elf->num_files > 1;
} }
static inline int elf_class_addrsize(struct elf *elf)
{
if (elf->ehdr.e_ident[EI_CLASS] == ELFCLASS32)
return sizeof(u32);
else
return sizeof(u64);
}
struct elf *elf_open_read(const char *name, int flags); struct elf *elf_open_read(const char *name, int flags);
struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr); struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr);
......
...@@ -2,33 +2,33 @@ ...@@ -2,33 +2,33 @@
#ifndef _OBJTOOL_ENDIANNESS_H #ifndef _OBJTOOL_ENDIANNESS_H
#define _OBJTOOL_ENDIANNESS_H #define _OBJTOOL_ENDIANNESS_H
#include <arch/endianness.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <endian.h> #include <endian.h>
#include <objtool/elf.h>
#ifndef __TARGET_BYTE_ORDER
#error undefined arch __TARGET_BYTE_ORDER
#endif
#if __BYTE_ORDER != __TARGET_BYTE_ORDER
#define __NEED_BSWAP 1
#else
#define __NEED_BSWAP 0
#endif
/* /*
* Does a byte swap if target endianness doesn't match the host, i.e. cross * Does a byte swap if target file endianness doesn't match the host, i.e. cross
* compilation for little endian on big endian and vice versa. * compilation for little endian on big endian and vice versa.
* To be used for multi-byte values conversion, which are read from / about * To be used for multi-byte values conversion, which are read from / about
* to be written to a target native endianness ELF file. * to be written to a target native endianness ELF file.
*/ */
#define bswap_if_needed(val) \ static inline bool need_bswap(struct elf *elf)
{
return (__BYTE_ORDER == __LITTLE_ENDIAN) ^
(elf->ehdr.e_ident[EI_DATA] == ELFDATA2LSB);
}
#define bswap_if_needed(elf, val) \
({ \ ({ \
__typeof__(val) __ret; \ __typeof__(val) __ret; \
bool __need_bswap = need_bswap(elf); \
switch (sizeof(val)) { \ switch (sizeof(val)) { \
case 8: __ret = __NEED_BSWAP ? bswap_64(val) : (val); break; \ case 8: \
case 4: __ret = __NEED_BSWAP ? bswap_32(val) : (val); break; \ __ret = __need_bswap ? bswap_64(val) : (val); break; \
case 2: __ret = __NEED_BSWAP ? bswap_16(val) : (val); break; \ case 4: \
__ret = __need_bswap ? bswap_32(val) : (val); break; \
case 2: \
__ret = __need_bswap ? bswap_16(val) : (val); break; \
default: \ default: \
BUILD_BUG(); break; \ BUILD_BUG(); break; \
} \ } \
......
...@@ -76,6 +76,7 @@ int orc_dump(const char *_objname) ...@@ -76,6 +76,7 @@ int orc_dump(const char *_objname)
GElf_Rela rela; GElf_Rela rela;
GElf_Sym sym; GElf_Sym sym;
Elf_Data *data, *symtab = NULL, *rela_orc_ip = NULL; Elf_Data *data, *symtab = NULL, *rela_orc_ip = NULL;
struct elf dummy_elf = {};
objname = _objname; objname = _objname;
...@@ -94,6 +95,12 @@ int orc_dump(const char *_objname) ...@@ -94,6 +95,12 @@ int orc_dump(const char *_objname)
return -1; return -1;
} }
if (!elf64_getehdr(elf)) {
WARN_ELF("elf64_getehdr");
return -1;
}
memcpy(&dummy_elf.ehdr, elf64_getehdr(elf), sizeof(dummy_elf.ehdr));
if (elf_getshdrnum(elf, &nr_sections)) { if (elf_getshdrnum(elf, &nr_sections)) {
WARN_ELF("elf_getshdrnum"); WARN_ELF("elf_getshdrnum");
return -1; return -1;
...@@ -198,11 +205,11 @@ int orc_dump(const char *_objname) ...@@ -198,11 +205,11 @@ int orc_dump(const char *_objname)
printf(" sp:"); printf(" sp:");
print_reg(orc[i].sp_reg, bswap_if_needed(orc[i].sp_offset)); print_reg(orc[i].sp_reg, bswap_if_needed(&dummy_elf, orc[i].sp_offset));
printf(" bp:"); printf(" bp:");
print_reg(orc[i].bp_reg, bswap_if_needed(orc[i].bp_offset)); print_reg(orc[i].bp_reg, bswap_if_needed(&dummy_elf, orc[i].bp_offset));
printf(" type:%s end:%d\n", printf(" type:%s end:%d\n",
orc_type_name(orc[i].type), orc[i].end); orc_type_name(orc[i].type), orc[i].end);
......
...@@ -97,8 +97,8 @@ static int write_orc_entry(struct elf *elf, struct section *orc_sec, ...@@ -97,8 +97,8 @@ static int write_orc_entry(struct elf *elf, struct section *orc_sec,
/* populate ORC data */ /* populate ORC data */
orc = (struct orc_entry *)orc_sec->data->d_buf + idx; orc = (struct orc_entry *)orc_sec->data->d_buf + idx;
memcpy(orc, o, sizeof(*orc)); memcpy(orc, o, sizeof(*orc));
orc->sp_offset = bswap_if_needed(orc->sp_offset); orc->sp_offset = bswap_if_needed(elf, orc->sp_offset);
orc->bp_offset = bswap_if_needed(orc->bp_offset); orc->bp_offset = bswap_if_needed(elf, orc->bp_offset);
/* populate reloc for ip */ /* populate reloc for ip */
if (elf_add_reloc_to_insn(elf, ip_sec, idx * sizeof(int), R_X86_64_PC32, if (elf_add_reloc_to_insn(elf, ip_sec, idx * sizeof(int), R_X86_64_PC32,
......
...@@ -87,7 +87,8 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry, ...@@ -87,7 +87,8 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
if (entry->feature) { if (entry->feature) {
unsigned short feature; unsigned short feature;
feature = bswap_if_needed(*(unsigned short *)(sec->data->d_buf + feature = bswap_if_needed(elf,
*(unsigned short *)(sec->data->d_buf +
offset + offset +
entry->feature)); entry->feature));
arch_handle_alternative(feature, alt); arch_handle_alternative(feature, alt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment