Commit 0ed28866 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "There's more here than we usually have at this stage, but that's
  mainly down to the stacktrace changes which came in slightly too late
  for the merge window.

  Summary:

   - Big bad batch of MAINTAINERS updates

   - Fix handling of SP alignment fault exceptions

   - Fix PSTATE.SSBS handling on heterogeneous systems

   - Fix fallout from moving to the generic vDSO implementation

   - Fix stack unwinding in the face of frame corruption

   - Fix off-by-one in IORT code

   - Minor SVE cleanups"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  ACPI/IORT: Fix off-by-one check in iort_dev_find_its_id()
  arm64: entry: SP Alignment Fault doesn't write to FAR_EL1
  arm64: Force SSBS on context switch
  MAINTAINERS: Update my email address
  MAINTAINERS: Update my email address
  MAINTAINERS: Fix spelling mistake in my name
  MAINTAINERS: Update my email address to @kernel.org
  arm64: mm: Drop pte_huge()
  arm64/sve: Fix a couple of magic numbers for the Z-reg count
  arm64/sve: Factor out FPSIMD to SVE state conversion
  arm64: stacktrace: Better handle corrupted stacks
  arm64: stacktrace: Factor out backtrace initialisation
  arm64: stacktrace: Constify stacktrace.h functions
  arm64: vdso: Cleanup Makefiles
  arm64: vdso: fix flip/flop vdso build bug
  arm64: vdso: Fix population of AT_SYSINFO_EHDR for compat vdso
parents 4792ba1f 5a46d3f7
......@@ -98,6 +98,7 @@ Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
Jean Tourrilhes <jt@hpl.hp.com>
<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
Jeff Garzik <jgarzik@pretzel.yyz.us>
Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
......@@ -116,6 +117,7 @@ John Stultz <johnstul@us.ibm.com>
Juha Yrjola <at solidboot.com>
Juha Yrjola <juha.yrjola@nokia.com>
Juha Yrjola <juha.yrjola@solidboot.com>
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
Kay Sievers <kay.sievers@vrfy.org>
Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
......@@ -132,6 +134,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Mark Brown <broonie@sirena.org.uk>
Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
......
......@@ -1194,7 +1194,7 @@ F: include/uapi/linux/if_arcnet.h
ARM ARCHITECTED TIMER DRIVER
M: Mark Rutland <mark.rutland@arm.com>
M: Marc Zyngier <marc.zyngier@arm.com>
M: Marc Zyngier <maz@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/include/asm/arch_timer.h
......@@ -8490,7 +8490,7 @@ S: Obsolete
F: include/uapi/linux/ipx.h
IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
M: Marc Zyngier <marc.zyngier@arm.com>
M: Marc Zyngier <maz@kernel.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: Documentation/IRQ-domain.txt
......@@ -8508,7 +8508,7 @@ F: kernel/irq/
IRQCHIP DRIVERS
M: Thomas Gleixner <tglx@linutronix.de>
M: Jason Cooper <jason@lakedaemon.net>
M: Marc Zyngier <marc.zyngier@arm.com>
M: Marc Zyngier <maz@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
......@@ -8828,10 +8828,10 @@ F: arch/x86/include/asm/svm.h
F: arch/x86/kvm/svm.c
KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
M: Marc Zyngier <marc.zyngier@arm.com>
M: Marc Zyngier <maz@kernel.org>
R: James Morse <james.morse@arm.com>
R: Julien Thierry <julien.thierry@arm.com>
R: Suzuki K Pouloze <suzuki.poulose@arm.com>
R: Julien Thierry <julien.thierry.kdev@gmail.com>
R: Suzuki K Poulose <suzuki.poulose@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
......@@ -17124,7 +17124,7 @@ F: drivers/virtio/virtio_input.c
F: include/uapi/linux/virtio_input.h
VIRTIO IOMMU DRIVER
M: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
M: Jean-Philippe Brucker <jean-philippe@linaro.org>
L: virtualization@lists.linux-foundation.org
S: Maintained
F: drivers/iommu/virtio-iommu.c
......
......@@ -202,7 +202,7 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
({ \
set_thread_flag(TIF_32BIT); \
})
#ifdef CONFIG_GENERIC_COMPAT_VDSO
#ifdef CONFIG_COMPAT_VDSO
#define COMPAT_ARCH_DLINFO \
do { \
/* \
......
......@@ -301,7 +301,6 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
/*
* Huge pte definitions.
*/
#define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
/*
......
......@@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
regs->pmr_save = GIC_PRIO_IRQON;
}
static inline void set_ssbs_bit(struct pt_regs *regs)
{
regs->pstate |= PSR_SSBS_BIT;
}
static inline void set_compat_ssbs_bit(struct pt_regs *regs)
{
regs->pstate |= PSR_AA32_SSBS_BIT;
}
static inline void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
......@@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate = PSR_MODE_EL0t;
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
regs->pstate |= PSR_SSBS_BIT;
set_ssbs_bit(regs);
regs->sp = sp;
}
......@@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
#endif
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
regs->pstate |= PSR_AA32_SSBS_BIT;
set_compat_ssbs_bit(regs);
regs->compat_sp = sp;
}
......
......@@ -8,19 +8,12 @@
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/types.h>
#include <asm/memory.h>
#include <asm/ptrace.h>
#include <asm/sdei.h>
struct stackframe {
unsigned long fp;
unsigned long pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int graph;
#endif
};
enum stack_type {
STACK_TYPE_UNKNOWN,
STACK_TYPE_TASK,
......@@ -28,6 +21,7 @@ enum stack_type {
STACK_TYPE_OVERFLOW,
STACK_TYPE_SDEI_NORMAL,
STACK_TYPE_SDEI_CRITICAL,
__NR_STACK_TYPES
};
struct stack_info {
......@@ -36,6 +30,37 @@ struct stack_info {
enum stack_type type;
};
/*
* A snapshot of a frame record or fp/lr register values, along with some
* accounting information necessary for robust unwinding.
*
* @fp: The fp value in the frame record (or the real fp)
* @pc: The fp value in the frame record (or the real lr)
*
* @stacks_done: Stacks which have been entirely unwound, for which it is no
* longer valid to unwind to.
*
* @prev_fp: The fp that pointed to this frame record, or a synthetic value
* of 0. This is used to ensure that within a stack, each
* subsequent frame record is at an increasing address.
* @prev_type: The type of stack this frame record was on, or a synthetic
* value of STACK_TYPE_UNKNOWN. This is used to detect a
* transition from one stack to another.
*
* @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
* replacement lr value in the ftrace graph stack.
*/
struct stackframe {
unsigned long fp;
unsigned long pc;
DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
unsigned long prev_fp;
enum stack_type prev_type;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int graph;
#endif
};
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
......@@ -64,8 +89,9 @@ static inline bool on_irq_stack(unsigned long sp,
return true;
}
static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp,
struct stack_info *info)
static inline bool on_task_stack(const struct task_struct *tsk,
unsigned long sp,
struct stack_info *info)
{
unsigned long low = (unsigned long)task_stack_page(tsk);
unsigned long high = low + THREAD_SIZE;
......@@ -112,10 +138,13 @@ static inline bool on_overflow_stack(unsigned long sp,
* We can only safely access per-cpu stacks from current in a non-preemptible
* context.
*/
static inline bool on_accessible_stack(struct task_struct *tsk,
unsigned long sp,
struct stack_info *info)
static inline bool on_accessible_stack(const struct task_struct *tsk,
unsigned long sp,
struct stack_info *info)
{
if (info)
info->type = STACK_TYPE_UNKNOWN;
if (on_task_stack(tsk, sp, info))
return true;
if (tsk != current || preemptible())
......@@ -130,4 +159,27 @@ static inline bool on_accessible_stack(struct task_struct *tsk,
return false;
}
static inline void start_backtrace(struct stackframe *frame,
unsigned long fp, unsigned long pc)
{
frame->fp = fp;
frame->pc = pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame->graph = 0;
#endif
/*
* Prime the first unwind.
*
* In unwind_frame() we'll check that the FP points to a valid stack,
* which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
* treated as a transition to whichever stack that happens to be. The
* prev_fp value won't be used, but we set it to 0 such that it is
* definitely not an accessible stack address.
*/
bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
frame->prev_fp = 0;
frame->prev_type = STACK_TYPE_UNKNOWN;
}
#endif /* __ASM_STACKTRACE_H */
......@@ -586,10 +586,8 @@ el1_sync:
b.eq el1_ia
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el1_sp_pc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el1_sp_pc
b.eq el1_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
b.eq el1_undef
cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
......@@ -611,9 +609,11 @@ el1_da:
bl do_mem_abort
kernel_exit 1
el1_sp_pc:
el1_pc:
/*
* Stack or PC alignment exception handling
* PC alignment exception handling. We don't handle SP alignment faults,
* since we will have hit a recursive exception when trying to push the
* initial pt_regs.
*/
mrs x0, far_el1
inherit_daif pstate=x23, tmp=x2
......@@ -732,9 +732,9 @@ el0_sync:
ccmp x24, #ESR_ELx_EC_WFx, #4, ne
b.eq el0_sys
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el0_sp_pc
b.eq el0_sp
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el0_sp_pc
b.eq el0_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
......@@ -758,7 +758,7 @@ el0_sync_compat:
cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el0_sp_pc
b.eq el0_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
......@@ -858,11 +858,15 @@ el0_fpsimd_exc:
mov x1, sp
bl do_fpsimd_exc
b ret_to_user
el0_sp:
ldr x26, [sp, #S_SP]
b el0_sp_pc
el0_pc:
mrs x26, far_el1
el0_sp_pc:
/*
* Stack or PC alignment exception handling
*/
mrs x26, far_el1
gic_prio_kentry_setup tmp=x0
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
......
......@@ -406,6 +406,18 @@ static __uint128_t arm64_cpu_to_le128(__uint128_t x)
#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
unsigned int vq)
{
unsigned int i;
__uint128_t *p;
for (i = 0; i < SVE_NUM_ZREGS; ++i) {
p = (__uint128_t *)ZREG(sst, vq, i);
*p = arm64_cpu_to_le128(fst->vregs[i]);
}
}
/*
* Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
* task->thread.sve_state.
......@@ -423,17 +435,12 @@ static void fpsimd_to_sve(struct task_struct *task)
unsigned int vq;
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
__uint128_t *p;
if (!system_supports_sve())
return;
vq = sve_vq_from_vl(task->thread.sve_vl);
for (i = 0; i < 32; ++i) {
p = (__uint128_t *)ZREG(sst, vq, i);
*p = arm64_cpu_to_le128(fst->vregs[i]);
}
__fpsimd_to_sve(sst, fst, vq);
}
/*
......@@ -459,7 +466,7 @@ static void sve_to_fpsimd(struct task_struct *task)
return;
vq = sve_vq_from_vl(task->thread.sve_vl);
for (i = 0; i < 32; ++i) {
for (i = 0; i < SVE_NUM_ZREGS; ++i) {
p = (__uint128_t const *)ZREG(sst, vq, i);
fst->vregs[i] = arm64_le128_to_cpu(*p);
}
......@@ -550,8 +557,6 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
unsigned int vq;
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
__uint128_t *p;
if (!test_tsk_thread_flag(task, TIF_SVE))
return;
......@@ -559,11 +564,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
vq = sve_vq_from_vl(task->thread.sve_vl);
memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
for (i = 0; i < 32; ++i) {
p = (__uint128_t *)ZREG(sst, vq, i);
*p = arm64_cpu_to_le128(fst->vregs[i]);
}
__fpsimd_to_sve(sst, fst, vq);
}
int sve_set_vector_length(struct task_struct *task,
......
......@@ -154,12 +154,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
return;
}
frame.fp = regs->regs[29];
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = 0;
#endif
start_backtrace(&frame, regs->regs[29], regs->pc);
walk_stackframe(current, &frame, callchain_trace, entry);
}
......
......@@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
childregs->pstate |= PSR_UAO_BIT;
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
childregs->pstate |= PSR_SSBS_BIT;
set_ssbs_bit(childregs);
if (system_uses_irq_prio_masking())
childregs->pmr_save = GIC_PRIO_IRQON;
......@@ -442,6 +442,32 @@ void uao_thread_switch(struct task_struct *next)
}
}
/*
* Force SSBS state on context-switch, since it may be lost after migrating
* from a CPU which treats the bit as RES0 in a heterogeneous system.
*/
static void ssbs_thread_switch(struct task_struct *next)
{
struct pt_regs *regs = task_pt_regs(next);
/*
* Nothing to do for kernel threads, but 'regs' may be junk
* (e.g. idle task) so check the flags and bail early.
*/
if (unlikely(next->flags & PF_KTHREAD))
return;
/* If the mitigation is enabled, then we leave SSBS clear. */
if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
test_tsk_thread_flag(next, TIF_SSBD))
return;
if (compat_user_mode(regs))
set_compat_ssbs_bit(regs);
else if (user_mode(regs))
set_ssbs_bit(regs);
}
/*
* We store our current task in sp_el0, which is clobbered by userspace. Keep a
* shadow copy so that we can restore this upon entry from userspace.
......@@ -471,6 +497,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
entry_task_switch(next);
uao_thread_switch(next);
ptrauth_thread_switch(next);
ssbs_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
......@@ -498,11 +525,8 @@ unsigned long get_wchan(struct task_struct *p)
if (!stack_page)
return 0;
frame.fp = thread_saved_fp(p);
frame.pc = thread_saved_pc(p);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = 0;
#endif
start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
do {
if (unwind_frame(p, &frame))
goto out;
......
......@@ -38,12 +38,9 @@ void *return_address(unsigned int level)
data.level = level + 2;
data.addr = NULL;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.pc = (unsigned long)return_address; /* dummy */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = 0;
#endif
start_backtrace(&frame,
(unsigned long)__builtin_frame_address(0),
(unsigned long)return_address);
walk_stackframe(current, &frame, save_return_addr, &data);
if (!data.level)
......
......@@ -29,9 +29,18 @@
* ldp x29, x30, [sp]
* add sp, sp, #0x10
*/
/*
* Unwind from one frame record (A) to the next frame record (B).
*
* We terminate early if the location of B indicates a malformed chain of frame
* records (e.g. a cycle), determined based on the location and fp value of A
* and the location (but not the fp value) of B.
*/
int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
{
unsigned long fp = frame->fp;
struct stack_info info;
if (fp & 0xf)
return -EINVAL;
......@@ -39,11 +48,40 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
if (!tsk)
tsk = current;
if (!on_accessible_stack(tsk, fp, NULL))
if (!on_accessible_stack(tsk, fp, &info))
return -EINVAL;
if (test_bit(info.type, frame->stacks_done))
return -EINVAL;
/*
* As stacks grow downward, any valid record on the same stack must be
* at a strictly higher address than the prior record.
*
* Stacks can nest in several valid orders, e.g.
*
* TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
* TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
*
* ... but the nesting itself is strict. Once we transition from one
* stack to another, it's never valid to unwind back to that first
* stack.
*/
if (info.type == frame->prev_type) {
if (fp <= frame->prev_fp)
return -EINVAL;
} else {
set_bit(frame->prev_type, frame->stacks_done);
}
/*
* Record this frame record's values and location. The prev_fp and
* prev_type are only meaningful to the next unwind_frame() invocation.
*/
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
frame->prev_fp = fp;
frame->prev_type = info.type;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
......@@ -122,12 +160,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
data.skip = trace->skip;
data.no_sched_functions = 0;
frame.fp = regs->regs[29];
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = 0;
#endif
start_backtrace(&frame, regs->regs[29], regs->pc);
walk_stackframe(current, &frame, save_trace, &data);
}
EXPORT_SYMBOL_GPL(save_stack_trace_regs);
......@@ -146,17 +179,15 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
data.no_sched_functions = nosched;
if (tsk != current) {
frame.fp = thread_saved_fp(tsk);
frame.pc = thread_saved_pc(tsk);
start_backtrace(&frame, thread_saved_fp(tsk),
thread_saved_pc(tsk));
} else {
/* We don't want this function nor the caller */
data.skip += 2;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.pc = (unsigned long)__save_stack_trace;
start_backtrace(&frame,
(unsigned long)__builtin_frame_address(0),
(unsigned long)__save_stack_trace);
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = 0;
#endif
walk_stackframe(tsk, &frame, save_trace, &data);
......
......@@ -38,11 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs)
if (!in_lock_functions(regs->pc))
return regs->pc;
frame.fp = regs->regs[29];
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = 0;
#endif
start_backtrace(&frame, regs->regs[29], regs->pc);
do {
int ret = unwind_frame(NULL, &frame);
if (ret < 0)
......
......@@ -100,18 +100,17 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
return;
if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.pc = (unsigned long)dump_backtrace;
start_backtrace(&frame,
(unsigned long)__builtin_frame_address(0),
(unsigned long)dump_backtrace);
} else {
/*
* task blocked in __switch_to
*/
frame.fp = thread_saved_fp(tsk);
frame.pc = thread_saved_pc(tsk);
start_backtrace(&frame,
thread_saved_fp(tsk),
thread_saved_pc(tsk));
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = 0;
#endif
printk("Call trace:\n");
do {
......
......@@ -32,10 +32,10 @@ UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n
ifeq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny
else
CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -include $(c-gettimeofday-y)
ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
endif
# Clang versions less than 8 do not support -mcmodel=tiny
......@@ -57,8 +57,7 @@ $(obj)/vdso.o : $(obj)/vdso.so
# Link rule for the .so file, .lds has to be first
$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,ld)
$(call if_changed,vdso_check)
$(call if_changed,vdsold_and_vdso_check)
# Strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
......@@ -74,8 +73,8 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
$(call if_changed,vdsosym)
# Actual build commands
quiet_cmd_vdsocc = VDSOCC $@
cmd_vdsocc = $(CC) $(a_flags) $(c_flags) -c -o $@ $<
quiet_cmd_vdsold_and_vdso_check = LD $@
cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check)
# Install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
......
......@@ -144,8 +144,7 @@ $(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE
# Link rule for the .so file, .lds has to be first
$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
$(call if_changed,vdso_check)
$(call if_changed,vdsold_and_vdso_check)
# Compilation rules for the vDSO sources
$(c-obj-vdso): %.o: %.c FORCE
......@@ -156,14 +155,17 @@ $(asm-obj-vdso): %.o: %.S FORCE
$(call if_changed_dep,vdsoas)
# Actual build commands
quiet_cmd_vdsold = VDSOL $@
quiet_cmd_vdsold_and_vdso_check = LD32 $@
cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
quiet_cmd_vdsold = LD32 $@
cmd_vdsold = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \
-Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
quiet_cmd_vdsocc = VDSOC $@
quiet_cmd_vdsocc = CC32 $@
cmd_vdsocc = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
quiet_cmd_vdsocc_gettimeofday = VDSOC_GTD $@
quiet_cmd_vdsocc_gettimeofday = CC32 $@
cmd_vdsocc_gettimeofday = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $<
quiet_cmd_vdsoas = VDSOA $@
quiet_cmd_vdsoas = AS32 $@
cmd_vdsoas = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $<
quiet_cmd_vdsomunge = MUNGE $@
......
......@@ -611,8 +611,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)node->node_data;
if (idx > its->its_count) {
dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
if (idx >= its->its_count) {
dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
idx, its->its_count);
return -ENXIO;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment