Commit 65738c6b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:
 "arm64 and perf fixes:

   - build error when accessing MPIDR_HWID_BITMASK from .S

   - fix CTR_EL0 field definitions

   - remove/disable some kernel messages on user faults (unhandled
     signals, unimplemented syscalls)

   - fix kernel page fault in unwind_frame() with function graph tracing

   - fix perf sleeping while atomic errors when booting with ACPI"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: fix unwind_frame() for filtered out fn for function graph tracing
  arm64: Enforce BBM for huge IO/VMAP mappings
  arm64: perf: correct PMUVer probing
  arm_pmu: acpi: request IRQs up-front
  arm_pmu: note IRQs and PMUs per-cpu
  arm_pmu: explicitly enable/disable SPIs at hotplug
  arm_pmu: acpi: check for mismatched PPIs
  arm_pmu: add armpmu_alloc_atomic()
  arm_pmu: fold platform helpers into platform code
  arm_pmu: kill arm_pmu_platdata
  ARM: ux500: remove PMU IRQ bouncer
  arm64: __show_regs: Only resolve kernel symbols when running at EL1
  arm64: Remove unimplemented syscall log message
  arm64: Disable unhandled signal log messages by default
  arm64: cpufeature: Fix CTR_EL0 field definitions
  arm64: uaccess: Formalise types for access_ok()
  arm64: Fix compilation error while accessing MPIDR_HWID_BITMASK from .S files
parents 2bd06ce7 9f416319
......@@ -23,7 +23,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/perf/arm_pmu.h>
#include <linux/regulator/machine.h>
#include <asm/outercache.h>
......@@ -112,37 +111,6 @@ static void ux500_restart(enum reboot_mode mode, const char *cmd)
prcmu_system_reset(0);
}
/*
* The PMU IRQ lines of two cores are wired together into a single interrupt.
* Bounce the interrupt to the other core if it's not ours.
*/
static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler)
{
irqreturn_t ret = handler(irq, dev);
int other = !smp_processor_id();
if (ret == IRQ_NONE && cpu_online(other))
irq_set_affinity(irq, cpumask_of(other));
/*
* We should be able to get away with the amount of IRQ_NONEs we give,
* while still having the spurious IRQ detection code kick in if the
* interrupt really starts hitting spuriously.
*/
return ret;
}
static struct arm_pmu_platdata db8500_pmu_platdata = {
.handle_irq = db8500_pmu_handler,
.irq_flags = IRQF_NOBALANCING | IRQF_NO_THREAD,
};
static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
/* Requires call-back bindings. */
OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata),
{},
};
static struct of_dev_auxdata u8540_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", NULL),
{},
......@@ -165,9 +133,6 @@ static void __init u8500_init_machine(void)
if (of_machine_is_compatible("st-ericsson,u8540"))
of_platform_populate(NULL, u8500_local_bus_nodes,
u8540_auxdata_lookup, NULL);
else
of_platform_populate(NULL, u8500_local_bus_nodes,
u8500_auxdata_lookup, NULL);
}
static const char * stericsson_dt_platform_compat[] = {
......
......@@ -20,7 +20,7 @@
#define MPIDR_UP_BITMASK (0x1 << 30)
#define MPIDR_MT_BITMASK (0x1 << 24)
#define MPIDR_HWID_BITMASK 0xff00ffffffUL
#define MPIDR_HWID_BITMASK UL(0xff00ffffff)
#define MPIDR_LEVEL_BITS_SHIFT 3
#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
......
......@@ -28,7 +28,7 @@ struct stackframe {
unsigned long fp;
unsigned long pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
unsigned int graph;
int graph;
#endif
};
......
......@@ -72,15 +72,15 @@ static inline void set_fs(mm_segment_t fs)
* This is equivalent to the following test:
* (u65)addr + (u65)size <= (u65)current->addr_limit + 1
*/
static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
{
unsigned long limit = current_thread_info()->addr_limit;
unsigned long ret, limit = current_thread_info()->addr_limit;
__chk_user_ptr(addr);
asm volatile(
// A + B <= C + 1 for all A,B,C, in four easy steps:
// 1: X = A + B; X' = X % 2^64
" adds %0, %0, %2\n"
" adds %0, %3, %2\n"
// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
" csel %1, xzr, %1, hi\n"
// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
......@@ -92,9 +92,9 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
// testing X' - C == 0, subject to the previous adjustments.
" sbcs xzr, %0, %1\n"
" cset %0, ls\n"
: "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
: "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
return addr;
return ret;
}
/*
......@@ -104,7 +104,7 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
*/
#define untagged_addr(addr) sign_extend64(addr, 55)
#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
#define _ASM_EXTABLE(from, to) \
......
......@@ -370,6 +370,7 @@ static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
static int swp_handler(struct pt_regs *regs, u32 instr)
{
u32 destreg, data, type, address = 0;
const void __user *user_ptr;
int rn, rt2, res = 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
......@@ -401,7 +402,8 @@ static int swp_handler(struct pt_regs *regs, u32 instr)
aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
/* Check access in reasonable access range for both SWP and SWPB */
if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
user_ptr = (const void __user *)(unsigned long)(address & ~3);
if (!access_ok(VERIFY_WRITE, user_ptr, 4)) {
pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
address);
goto fault;
......
......@@ -199,9 +199,11 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
};
static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
/*
* Linux can handle differing I-cache policies. Userspace JITs will
......
......@@ -908,9 +908,9 @@ static void __armv8pmu_probe_pmu(void *info)
int pmuver;
dfr0 = read_sysreg(id_aa64dfr0_el1);
pmuver = cpuid_feature_extract_signed_field(dfr0,
pmuver = cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_PMUVER_SHIFT);
if (pmuver < 1)
if (pmuver == 0xf || pmuver == 0)
return;
probe->present = true;
......
......@@ -220,8 +220,15 @@ void __show_regs(struct pt_regs *regs)
show_regs_print_info(KERN_DEFAULT);
print_pstate(regs);
printk("pc : %pS\n", (void *)regs->pc);
printk("lr : %pS\n", (void *)lr);
if (!user_mode(regs)) {
printk("pc : %pS\n", (void *)regs->pc);
printk("lr : %pS\n", (void *)lr);
} else {
printk("pc : %016llx\n", regs->pc);
printk("lr : %016llx\n", lr);
}
printk("sp : %016llx\n", sp);
i = top_reg;
......
......@@ -59,6 +59,11 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
(frame->pc == (unsigned long)return_to_handler)) {
if (WARN_ON_ONCE(frame->graph == -1))
return -EINVAL;
if (frame->graph < -1)
frame->graph += FTRACE_NOTRACE_DEPTH;
/*
* This is a case where function graph tracer has
* modified a return address (LR) in a stack frame
......
......@@ -57,7 +57,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
if (end < start || flags)
return -EINVAL;
if (!access_ok(VERIFY_READ, start, end - start))
if (!access_ok(VERIFY_READ, (const void __user *)start, end - start))
return -EFAULT;
return __do_compat_cache_op(start, end);
......
......@@ -52,7 +52,7 @@ unsigned long profile_pc(struct pt_regs *regs)
frame.fp = regs->regs[29];
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = -1; /* no task info */
frame.graph = current->curr_ret_stack;
#endif
do {
int ret = unwind_frame(NULL, &frame);
......
......@@ -57,7 +57,7 @@ static const char *handler[]= {
"Error"
};
int show_unhandled_signals = 1;
int show_unhandled_signals = 0;
static void dump_backtrace_entry(unsigned long where)
{
......@@ -526,14 +526,6 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
}
#endif
if (show_unhandled_signals_ratelimited()) {
pr_info("%s[%d]: syscall %d\n", current->comm,
task_pid_nr(current), regs->syscallno);
dump_instr("", regs);
if (user_mode(regs))
__show_regs(regs);
}
return sys_ni_syscall();
}
......
......@@ -933,6 +933,11 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
{
pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
pgprot_val(mk_sect_prot(prot)));
/* ioremap_page_range doesn't honour BBM */
if (pud_present(READ_ONCE(*pudp)))
return 0;
BUG_ON(phys & ~PUD_MASK);
set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
return 1;
......@@ -942,6 +947,11 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
{
pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
pgprot_val(mk_sect_prot(prot)));
/* ioremap_page_range doesn't honour BBM */
if (pmd_present(READ_ONCE(*pmdp)))
return 0;
BUG_ON(phys & ~PMD_MASK);
set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
return 1;
......
......@@ -17,7 +17,6 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sched/clock.h>
#include <linux/spinlock.h>
......@@ -26,6 +25,9 @@
#include <asm/irq_regs.h>
static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
static DEFINE_PER_CPU(int, cpu_irq);
static int
armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
......@@ -320,17 +322,9 @@ validate_group(struct perf_event *event)
return 0;
}
static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
{
struct platform_device *pdev = armpmu->plat_device;
return pdev ? dev_get_platdata(&pdev->dev) : NULL;
}
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
struct arm_pmu *armpmu;
struct arm_pmu_platdata *plat;
int ret;
u64 start_clock, finish_clock;
......@@ -341,14 +335,11 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
* dereference.
*/
armpmu = *(void **)dev;
plat = armpmu_get_platdata(armpmu);
if (WARN_ON_ONCE(!armpmu))
return IRQ_NONE;
start_clock = sched_clock();
if (plat && plat->handle_irq)
ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
else
ret = armpmu->handle_irq(irq, armpmu);
ret = armpmu->handle_irq(irq, armpmu);
finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock);
......@@ -531,54 +522,41 @@ int perf_num_counters(void)
}
EXPORT_SYMBOL_GPL(perf_num_counters);
void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
static int armpmu_count_irq_users(const int irq)
{
struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
int irq = per_cpu(hw_events->irq, cpu);
int cpu, count = 0;
if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
return;
if (irq_is_percpu_devid(irq)) {
free_percpu_irq(irq, &hw_events->percpu_pmu);
cpumask_clear(&armpmu->active_irqs);
return;
for_each_possible_cpu(cpu) {
if (per_cpu(cpu_irq, cpu) == irq)
count++;
}
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
return count;
}
void armpmu_free_irqs(struct arm_pmu *armpmu)
void armpmu_free_irq(int irq, int cpu)
{
int cpu;
if (per_cpu(cpu_irq, cpu) == 0)
return;
if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
return;
if (!irq_is_percpu_devid(irq))
free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
else if (armpmu_count_irq_users(irq) == 1)
free_percpu_irq(irq, &cpu_armpmu);
for_each_cpu(cpu, &armpmu->supported_cpus)
armpmu_free_irq(armpmu, cpu);
per_cpu(cpu_irq, cpu) = 0;
}
int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
int armpmu_request_irq(int irq, int cpu)
{
int err = 0;
struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
const irq_handler_t handler = armpmu_dispatch_irq;
int irq = per_cpu(hw_events->irq, cpu);
if (!irq)
return 0;
if (irq_is_percpu_devid(irq) && cpumask_empty(&armpmu->active_irqs)) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&hw_events->percpu_pmu);
} else if (irq_is_percpu_devid(irq)) {
int other_cpu = cpumask_first(&armpmu->active_irqs);
int other_irq = per_cpu(hw_events->irq, other_cpu);
if (irq != other_irq) {
pr_warn("mismatched PPIs detected.\n");
err = -EINVAL;
goto err_out;
}
} else {
struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu);
if (!irq_is_percpu_devid(irq)) {
unsigned long irq_flags;
err = irq_force_affinity(irq, cpumask_of(cpu));
......@@ -589,22 +567,22 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
goto err_out;
}
if (platdata && platdata->irq_flags) {
irq_flags = platdata->irq_flags;
} else {
irq_flags = IRQF_PERCPU |
IRQF_NOBALANCING |
IRQF_NO_THREAD;
}
irq_flags = IRQF_PERCPU |
IRQF_NOBALANCING |
IRQF_NO_THREAD;
irq_set_status_flags(irq, IRQ_NOAUTOEN);
err = request_irq(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&hw_events->percpu_pmu, cpu));
per_cpu_ptr(&cpu_armpmu, cpu));
} else if (armpmu_count_irq_users(irq) == 0) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&cpu_armpmu);
}
if (err)
goto err_out;
cpumask_set_cpu(cpu, &armpmu->active_irqs);
per_cpu(cpu_irq, cpu) = irq;
return 0;
err_out:
......@@ -612,19 +590,6 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
return err;
}
int armpmu_request_irqs(struct arm_pmu *armpmu)
{
int cpu, err;
for_each_cpu(cpu, &armpmu->supported_cpus) {
err = armpmu_request_irq(armpmu, cpu);
if (err)
break;
}
return err;
}
static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
{
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
......@@ -647,12 +612,14 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
if (pmu->reset)
pmu->reset(pmu);
per_cpu(cpu_armpmu, cpu) = pmu;
irq = armpmu_get_cpu_irq(pmu, cpu);
if (irq) {
if (irq_is_percpu_devid(irq)) {
if (irq_is_percpu_devid(irq))
enable_percpu_irq(irq, IRQ_TYPE_NONE);
return 0;
}
else
enable_irq(irq);
}
return 0;
......@@ -667,8 +634,14 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
irq = armpmu_get_cpu_irq(pmu, cpu);
if (irq && irq_is_percpu_devid(irq))
disable_percpu_irq(irq);
if (irq) {
if (irq_is_percpu_devid(irq))
disable_percpu_irq(irq);
else
disable_irq(irq);
}
per_cpu(cpu_armpmu, cpu) = NULL;
return 0;
}
......@@ -800,18 +773,18 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
&cpu_pmu->node);
}
struct arm_pmu *armpmu_alloc(void)
static struct arm_pmu *__armpmu_alloc(gfp_t flags)
{
struct arm_pmu *pmu;
int cpu;
pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
pmu = kzalloc(sizeof(*pmu), flags);
if (!pmu) {
pr_info("failed to allocate PMU device!\n");
goto out;
}
pmu->hw_events = alloc_percpu(struct pmu_hw_events);
pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
if (!pmu->hw_events) {
pr_info("failed to allocate per-cpu PMU data.\n");
goto out_free_pmu;
......@@ -857,6 +830,17 @@ struct arm_pmu *armpmu_alloc(void)
return NULL;
}
struct arm_pmu *armpmu_alloc(void)
{
return __armpmu_alloc(GFP_KERNEL);
}
struct arm_pmu *armpmu_alloc_atomic(void)
{
return __armpmu_alloc(GFP_ATOMIC);
}
void armpmu_free(struct arm_pmu *pmu)
{
free_percpu(pmu->hw_events);
......
......@@ -11,6 +11,8 @@
#include <linux/acpi.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/percpu.h>
#include <linux/perf/arm_pmu.h>
......@@ -87,7 +89,13 @@ static int arm_pmu_acpi_parse_irqs(void)
pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
}
/*
* Log and request the IRQ so the core arm_pmu code can manage
* it. We'll have to sanity-check IRQs later when we associate
* them with their PMUs.
*/
per_cpu(pmu_irqs, cpu) = irq;
armpmu_request_irq(irq, cpu);
}
return 0;
......@@ -127,7 +135,7 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
return pmu;
}
pmu = armpmu_alloc();
pmu = armpmu_alloc_atomic();
if (!pmu) {
pr_warn("Unable to allocate PMU for CPU%d\n",
smp_processor_id());
......@@ -139,6 +147,35 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
return pmu;
}
/*
* Check whether the new IRQ is compatible with those already associated with
* the PMU (e.g. we don't have mismatched PPIs).
*/
static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
{
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
int cpu;
if (!irq)
return true;
for_each_cpu(cpu, &pmu->supported_cpus) {
int other_irq = per_cpu(hw_events->irq, cpu);
if (!other_irq)
continue;
if (irq == other_irq)
continue;
if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
continue;
pr_warn("mismatched PPIs detected\n");
return false;
}
return true;
}
/*
* This must run before the common arm_pmu hotplug logic, so that we can
* associate a CPU and its interrupt before the common code tries to manage the
......@@ -164,19 +201,14 @@ static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
if (!pmu)
return -ENOMEM;
cpumask_set_cpu(cpu, &pmu->supported_cpus);
per_cpu(probed_pmus, cpu) = pmu;
/*
* Log and request the IRQ so the core arm_pmu code can manage it. In
* some situations (e.g. mismatched PPIs), we may fail to request the
* IRQ. However, it may be too late for us to do anything about it.
* The common ARM PMU code will log a warning in this case.
*/
hw_events = pmu->hw_events;
per_cpu(hw_events->irq, cpu) = irq;
armpmu_request_irq(pmu, cpu);
if (pmu_irq_matches(pmu, irq)) {
hw_events = pmu->hw_events;
per_cpu(hw_events->irq, cpu) = irq;
}
cpumask_set_cpu(cpu, &pmu->supported_cpus);
/*
* Ideally, we'd probe the PMU here when we find the first matching
......@@ -247,11 +279,6 @@ static int arm_pmu_acpi_init(void)
if (acpi_disabled)
return 0;
/*
* We can't request IRQs yet, since we don't know the cookie value
* until we know which CPUs share the same logical PMU. We'll handle
* that in arm_pmu_acpi_cpu_starting().
*/
ret = arm_pmu_acpi_parse_irqs();
if (ret)
return ret;
......
......@@ -127,13 +127,6 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
pdev->dev.of_node);
}
/*
* Some platforms have all PMU IRQs OR'd into a single IRQ, with a
* special platdata function that attempts to demux them.
*/
if (dev_get_platdata(&pdev->dev))
cpumask_setall(&pmu->supported_cpus);
for (i = 0; i < num_irqs; i++) {
int cpu, irq;
......@@ -164,6 +157,36 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
return 0;
}
static int armpmu_request_irqs(struct arm_pmu *armpmu)
{
struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
int cpu, err;
for_each_cpu(cpu, &armpmu->supported_cpus) {
int irq = per_cpu(hw_events->irq, cpu);
if (!irq)
continue;
err = armpmu_request_irq(irq, cpu);
if (err)
break;
}
return err;
}
static void armpmu_free_irqs(struct arm_pmu *armpmu)
{
int cpu;
struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
for_each_cpu(cpu, &armpmu->supported_cpus) {
int irq = per_cpu(hw_events->irq, cpu);
armpmu_free_irq(irq, cpu);
}
}
int arm_pmu_device_probe(struct platform_device *pdev,
const struct of_device_id *of_table,
const struct pmu_probe_info *probe_table)
......
......@@ -14,26 +14,10 @@
#include <linux/interrupt.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/sysfs.h>
#include <asm/cputype.h>
/*
* struct arm_pmu_platdata - ARM PMU platform data
*
* @handle_irq: an optional handler which will be called from the
* interrupt and passed the address of the low level handler,
* and can be used to implement any platform specific handling
* before or after calling it.
*
* @irq_flags: if non-zero, these flags will be passed to request_irq
* when requesting interrupts for this PMU device.
*/
struct arm_pmu_platdata {
irqreturn_t (*handle_irq)(int irq, void *dev,
irq_handler_t pmu_handler);
unsigned long irq_flags;
};
#ifdef CONFIG_ARM_PMU
/*
......@@ -92,7 +76,6 @@ enum armpmu_attr_groups {
struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
cpumask_t supported_cpus;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
......@@ -174,12 +157,11 @@ static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void);
void armpmu_free(struct arm_pmu *pmu);
int armpmu_register(struct arm_pmu *pmu);
int armpmu_request_irqs(struct arm_pmu *armpmu);
void armpmu_free_irqs(struct arm_pmu *armpmu);
int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);
void armpmu_free_irq(struct arm_pmu *armpmu, int cpu);
int armpmu_request_irq(int irq, int cpu);
void armpmu_free_irq(int irq, int cpu);
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment