Commit 57ef300e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arc-5.0-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC architecture updates from Vineet Gupta:

 - Perf support for raw events

 - boot log printing: return stack, action points

 - fix memset to avoid prefetchw bleeding past end of buffer

 - do_page_fault fix for mmap_sem held while returning to userspace

 - other misc fixes

* tag 'arc-5.0-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARCv2: lib: memeset: fix doing prefetchw outside of buffer
  ARC: mm: do_page_fault fixes #1: relinquish mmap_sem if signal arrives while handle_mm_fault
  ARC: show_regs: lockdep: re-enable preemption
  ARC: show_regs: lockdep: avoid page allocator...
  ARC: perf: avoid kernel killing where it is possible
  ARC: perf: move HW events mapping to separate function
  ARC: perf: introduce Kernel PMU events support
  ARC: perf: trivial code cleanup
  ARC: perf: map generic branches to correct hardware condition
  ARC: adjust memblock_reserve of kernel memory
  arc: remove redundant kernel-space generic-y
  ARC: fix __ffs return value to avoid build warnings
  ARC: boot log: print Action point details
  ARCv2: boot log: BPU return stack depth
parents 49a57857 e6a72b7d
...@@ -3,23 +3,19 @@ generic-y += bugs.h ...@@ -3,23 +3,19 @@ generic-y += bugs.h
generic-y += compat.h generic-y += compat.h
generic-y += device.h generic-y += device.h
generic-y += div64.h generic-y += div64.h
generic-y += dma-mapping.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += extable.h generic-y += extable.h
generic-y += fb.h
generic-y += ftrace.h generic-y += ftrace.h
generic-y += hardirq.h generic-y += hardirq.h
generic-y += hw_irq.h generic-y += hw_irq.h
generic-y += irq_regs.h generic-y += irq_regs.h
generic-y += irq_work.h generic-y += irq_work.h
generic-y += kmap_types.h
generic-y += local.h generic-y += local.h
generic-y += local64.h generic-y += local64.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h generic-y += mm-arch-hooks.h
generic-y += msi.h generic-y += msi.h
generic-y += parport.h generic-y += parport.h
generic-y += pci.h
generic-y += percpu.h generic-y += percpu.h
generic-y += preempt.h generic-y += preempt.h
generic-y += topology.h generic-y += topology.h
......
...@@ -216,6 +216,14 @@ struct bcr_fp_arcv2 { ...@@ -216,6 +216,14 @@ struct bcr_fp_arcv2 {
#endif #endif
}; };
struct bcr_actionpoint {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:21, min:1, num:2, ver:8;
#else
unsigned int ver:8, num:2, min:1, pad:21;
#endif
};
#include <soc/arc/timers.h> #include <soc/arc/timers.h>
struct bcr_bpu_arcompact { struct bcr_bpu_arcompact {
...@@ -283,7 +291,7 @@ struct cpuinfo_arc_cache { ...@@ -283,7 +291,7 @@ struct cpuinfo_arc_cache {
}; };
struct cpuinfo_arc_bpu { struct cpuinfo_arc_bpu {
unsigned int ver, full, num_cache, num_pred; unsigned int ver, full, num_cache, num_pred, ret_stk;
}; };
struct cpuinfo_arc_ccm { struct cpuinfo_arc_ccm {
...@@ -302,7 +310,7 @@ struct cpuinfo_arc { ...@@ -302,7 +310,7 @@ struct cpuinfo_arc {
struct { struct {
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4, fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
debug:1, ap:1, smart:1, rtt:1, pad3:4, ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
} extn; } extn;
struct bcr_mpy extn_mpy; struct bcr_mpy extn_mpy;
......
...@@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x) ...@@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x)
/* /*
* __ffs: Similar to ffs, but zero based (0-31) * __ffs: Similar to ffs, but zero based (0-31)
*/ */
static inline __attribute__ ((const)) int __ffs(unsigned long word) static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
{ {
if (!word) if (!word)
return word; return word;
...@@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x) ...@@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x)
/* /*
* __ffs: Similar to ffs, but zero based (0-31) * __ffs: Similar to ffs, but zero based (0-31)
*/ */
static inline __attribute__ ((const)) int __ffs(unsigned long x) static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
{ {
int n; unsigned long n;
asm volatile( asm volatile(
" ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */ " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
......
...@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = { ...@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
/* counts condition */ /* counts condition */
[PERF_COUNT_HW_INSTRUCTIONS] = "iall", [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */ /* All jump instructions that are taken */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
#ifdef CONFIG_ISA_ARCV2 #ifdef CONFIG_ISA_ARCV2
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
......
/* // SPDX-License-Identifier: GPL-2.0+
* Linux performance counter support for ARC700 series //
* // Linux performance counter support for ARC CPUs.
* Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com) // This code is inspired by the perf support of various other architectures.
* //
* This code is inspired by the perf support of various other architectures. // Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -19,12 +14,31 @@ ...@@ -19,12 +14,31 @@
#include <asm/arcregs.h> #include <asm/arcregs.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
/* HW holds 8 symbols + one for null terminator */
#define ARCPMU_EVENT_NAME_LEN 9
enum arc_pmu_attr_groups {
ARCPMU_ATTR_GR_EVENTS,
ARCPMU_ATTR_GR_FORMATS,
ARCPMU_NR_ATTR_GR
};
struct arc_pmu_raw_event_entry {
char name[ARCPMU_EVENT_NAME_LEN];
};
struct arc_pmu { struct arc_pmu {
struct pmu pmu; struct pmu pmu;
unsigned int irq; unsigned int irq;
int n_counters; int n_counters;
int n_events;
u64 max_period; u64 max_period;
int ev_hw_idx[PERF_COUNT_ARC_HW_MAX]; int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
struct arc_pmu_raw_event_entry *raw_entry;
struct attribute **attrs;
struct perf_pmu_events_attr *attr;
const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1];
}; };
struct arc_pmu_cpu { struct arc_pmu_cpu {
...@@ -49,6 +63,7 @@ static int callchain_trace(unsigned int addr, void *data) ...@@ -49,6 +63,7 @@ static int callchain_trace(unsigned int addr, void *data)
{ {
struct arc_callchain_trace *ctrl = data; struct arc_callchain_trace *ctrl = data;
struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
perf_callchain_store(entry, addr); perf_callchain_store(entry, addr);
if (ctrl->depth++ < 3) if (ctrl->depth++ < 3)
...@@ -57,8 +72,8 @@ static int callchain_trace(unsigned int addr, void *data) ...@@ -57,8 +72,8 @@ static int callchain_trace(unsigned int addr, void *data)
return -1; return -1;
} }
void void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) struct pt_regs *regs)
{ {
struct arc_callchain_trace ctrl = { struct arc_callchain_trace ctrl = {
.depth = 0, .depth = 0,
...@@ -68,8 +83,8 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re ...@@ -68,8 +83,8 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
arc_unwind_core(NULL, regs, callchain_trace, &ctrl); arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
} }
void void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) struct pt_regs *regs)
{ {
/* /*
* User stack can't be unwound trivially with kernel dwarf unwinder * User stack can't be unwound trivially with kernel dwarf unwinder
...@@ -82,10 +97,10 @@ static struct arc_pmu *arc_pmu; ...@@ -82,10 +97,10 @@ static struct arc_pmu *arc_pmu;
static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu); static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
/* read counter #idx; note that counter# != event# on ARC! */ /* read counter #idx; note that counter# != event# on ARC! */
static uint64_t arc_pmu_read_counter(int idx) static u64 arc_pmu_read_counter(int idx)
{ {
uint32_t tmp; u32 tmp;
uint64_t result; u64 result;
/* /*
* ARC supports making 'snapshots' of the counters, so we don't * ARC supports making 'snapshots' of the counters, so we don't
...@@ -94,7 +109,7 @@ static uint64_t arc_pmu_read_counter(int idx) ...@@ -94,7 +109,7 @@ static uint64_t arc_pmu_read_counter(int idx)
write_aux_reg(ARC_REG_PCT_INDEX, idx); write_aux_reg(ARC_REG_PCT_INDEX, idx);
tmp = read_aux_reg(ARC_REG_PCT_CONTROL); tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN); write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
result |= read_aux_reg(ARC_REG_PCT_SNAPL); result |= read_aux_reg(ARC_REG_PCT_SNAPL);
return result; return result;
...@@ -103,9 +118,9 @@ static uint64_t arc_pmu_read_counter(int idx) ...@@ -103,9 +118,9 @@ static uint64_t arc_pmu_read_counter(int idx)
static void arc_perf_event_update(struct perf_event *event, static void arc_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx) struct hw_perf_event *hwc, int idx)
{ {
uint64_t prev_raw_count = local64_read(&hwc->prev_count); u64 prev_raw_count = local64_read(&hwc->prev_count);
uint64_t new_raw_count = arc_pmu_read_counter(idx); u64 new_raw_count = arc_pmu_read_counter(idx);
int64_t delta = new_raw_count - prev_raw_count; s64 delta = new_raw_count - prev_raw_count;
/* /*
* We aren't afraid of hwc->prev_count changing beneath our feet * We aren't afraid of hwc->prev_count changing beneath our feet
...@@ -192,6 +207,18 @@ static int arc_pmu_event_init(struct perf_event *event) ...@@ -192,6 +207,18 @@ static int arc_pmu_event_init(struct perf_event *event)
pr_debug("init cache event with h/w %08x \'%s\'\n", pr_debug("init cache event with h/w %08x \'%s\'\n",
(int)hwc->config, arc_pmu_ev_hw_map[ret]); (int)hwc->config, arc_pmu_ev_hw_map[ret]);
return 0; return 0;
case PERF_TYPE_RAW:
if (event->attr.config >= arc_pmu->n_events)
return -ENOENT;
hwc->config |= event->attr.config;
pr_debug("init raw event with idx %lld \'%s\'\n",
event->attr.config,
arc_pmu->raw_entry[event->attr.config].name);
return 0;
default: default:
return -ENOENT; return -ENOENT;
} }
...@@ -200,7 +227,7 @@ static int arc_pmu_event_init(struct perf_event *event) ...@@ -200,7 +227,7 @@ static int arc_pmu_event_init(struct perf_event *event)
/* starts all counters */ /* starts all counters */
static void arc_pmu_enable(struct pmu *pmu) static void arc_pmu_enable(struct pmu *pmu)
{ {
uint32_t tmp; u32 tmp;
tmp = read_aux_reg(ARC_REG_PCT_CONTROL); tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1); write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
} }
...@@ -208,7 +235,7 @@ static void arc_pmu_enable(struct pmu *pmu) ...@@ -208,7 +235,7 @@ static void arc_pmu_enable(struct pmu *pmu)
/* stops all counters */ /* stops all counters */
static void arc_pmu_disable(struct pmu *pmu) static void arc_pmu_disable(struct pmu *pmu)
{ {
uint32_t tmp; u32 tmp;
tmp = read_aux_reg(ARC_REG_PCT_CONTROL); tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0); write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
} }
...@@ -246,8 +273,8 @@ static int arc_pmu_event_set_period(struct perf_event *event) ...@@ -246,8 +273,8 @@ static int arc_pmu_event_set_period(struct perf_event *event)
write_aux_reg(ARC_REG_PCT_INDEX, idx); write_aux_reg(ARC_REG_PCT_INDEX, idx);
/* Write value */ /* Write value */
write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value); write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value));
write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32)); write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value));
perf_event_update_userpage(event); perf_event_update_userpage(event);
...@@ -277,7 +304,7 @@ static void arc_pmu_start(struct perf_event *event, int flags) ...@@ -277,7 +304,7 @@ static void arc_pmu_start(struct perf_event *event, int flags)
/* Enable interrupt for this counter */ /* Enable interrupt for this counter */
if (is_sampling_event(event)) if (is_sampling_event(event))
write_aux_reg(ARC_REG_PCT_INT_CTRL, write_aux_reg(ARC_REG_PCT_INT_CTRL,
read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
/* enable ARC pmu here */ /* enable ARC pmu here */
write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */ write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */
...@@ -295,9 +322,9 @@ static void arc_pmu_stop(struct perf_event *event, int flags) ...@@ -295,9 +322,9 @@ static void arc_pmu_stop(struct perf_event *event, int flags)
* Reset interrupt flag by writing of 1. This is required * Reset interrupt flag by writing of 1. This is required
* to make sure pending interrupt was not left. * to make sure pending interrupt was not left.
*/ */
write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
write_aux_reg(ARC_REG_PCT_INT_CTRL, write_aux_reg(ARC_REG_PCT_INT_CTRL,
read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx)); read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx));
} }
if (!(event->hw.state & PERF_HES_STOPPED)) { if (!(event->hw.state & PERF_HES_STOPPED)) {
...@@ -349,9 +376,10 @@ static int arc_pmu_add(struct perf_event *event, int flags) ...@@ -349,9 +376,10 @@ static int arc_pmu_add(struct perf_event *event, int flags)
if (is_sampling_event(event)) { if (is_sampling_event(event)) {
/* Mimic full counter overflow as other arches do */ /* Mimic full counter overflow as other arches do */
write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period); write_aux_reg(ARC_REG_PCT_INT_CNTL,
lower_32_bits(arc_pmu->max_period));
write_aux_reg(ARC_REG_PCT_INT_CNTH, write_aux_reg(ARC_REG_PCT_INT_CNTH,
(arc_pmu->max_period >> 32)); upper_32_bits(arc_pmu->max_period));
} }
write_aux_reg(ARC_REG_PCT_CONFIG, 0); write_aux_reg(ARC_REG_PCT_CONFIG, 0);
...@@ -392,7 +420,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) ...@@ -392,7 +420,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
idx = __ffs(active_ints); idx = __ffs(active_ints);
/* Reset interrupt flag by writing of 1 */ /* Reset interrupt flag by writing of 1 */
write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
/* /*
* On reset of "interrupt active" bit corresponding * On reset of "interrupt active" bit corresponding
...@@ -400,7 +428,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) ...@@ -400,7 +428,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
* Now we need to re-enable interrupt for the counter. * Now we need to re-enable interrupt for the counter.
*/ */
write_aux_reg(ARC_REG_PCT_INT_CTRL, write_aux_reg(ARC_REG_PCT_INT_CTRL,
read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
event = pmu_cpu->act_counter[idx]; event = pmu_cpu->act_counter[idx];
hwc = &event->hw; hwc = &event->hw;
...@@ -414,7 +442,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) ...@@ -414,7 +442,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
arc_pmu_stop(event, 0); arc_pmu_stop(event, 0);
} }
active_ints &= ~(1U << idx); active_ints &= ~BIT(idx);
} while (active_ints); } while (active_ints);
done: done:
...@@ -441,19 +469,108 @@ static void arc_cpu_pmu_irq_init(void *data) ...@@ -441,19 +469,108 @@ static void arc_cpu_pmu_irq_init(void *data)
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
} }
/* Event field occupies the bottom 15 bits of our config field */
PMU_FORMAT_ATTR(event, "config:0-14");
static struct attribute *arc_pmu_format_attrs[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute_group arc_pmu_format_attr_gr = {
.name = "format",
.attrs = arc_pmu_format_attrs,
};
static ssize_t arc_pmu_events_sysfs_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
}
/*
* We don't add attrs here as we don't have pre-defined list of perf events.
* We will generate and add attrs dynamically in probe() after we read HW
* configuration.
*/
static struct attribute_group arc_pmu_events_attr_gr = {
.name = "events",
};
static void arc_pmu_add_raw_event_attr(int j, char *str)
{
memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
arc_pmu->attr[j].id = j;
arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
}
static int arc_pmu_raw_alloc(struct device *dev)
{
arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
if (!arc_pmu->attr)
return -ENOMEM;
arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
if (!arc_pmu->attrs)
return -ENOMEM;
arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
if (!arc_pmu->raw_entry)
return -ENOMEM;
return 0;
}
static inline bool event_in_hw_event_map(int i, char *name)
{
if (!arc_pmu_ev_hw_map[i])
return false;
if (!strlen(arc_pmu_ev_hw_map[i]))
return false;
if (strcmp(arc_pmu_ev_hw_map[i], name))
return false;
return true;
}
static void arc_pmu_map_hw_event(int j, char *str)
{
int i;
/* See if HW condition has been mapped to a perf event_id */
for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
if (event_in_hw_event_map(i, str)) {
pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
i, str, j);
arc_pmu->ev_hw_idx[i] = j;
}
}
}
static int arc_pmu_device_probe(struct platform_device *pdev) static int arc_pmu_device_probe(struct platform_device *pdev)
{ {
struct arc_reg_pct_build pct_bcr; struct arc_reg_pct_build pct_bcr;
struct arc_reg_cc_build cc_bcr; struct arc_reg_cc_build cc_bcr;
int i, j, has_interrupts; int i, has_interrupts;
int counter_size; /* in bits */ int counter_size; /* in bits */
union cc_name { union cc_name {
struct { struct {
uint32_t word0, word1; u32 word0, word1;
char sentinel; char sentinel;
} indiv; } indiv;
char str[9]; char str[ARCPMU_EVENT_NAME_LEN];
} cc_name; } cc_name;
...@@ -463,15 +580,22 @@ static int arc_pmu_device_probe(struct platform_device *pdev) ...@@ -463,15 +580,22 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
return -ENODEV; return -ENODEV;
} }
BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32); BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS); if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS))
return -EINVAL;
READ_BCR(ARC_REG_CC_BUILD, cc_bcr); READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */ if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?"))
return -EINVAL;
arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
if (!arc_pmu) if (!arc_pmu)
return -ENOMEM; return -ENOMEM;
arc_pmu->n_events = cc_bcr.c;
if (arc_pmu_raw_alloc(&pdev->dev))
return -ENOMEM;
has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0; has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
arc_pmu->n_counters = pct_bcr.c; arc_pmu->n_counters = pct_bcr.c;
...@@ -481,30 +605,26 @@ static int arc_pmu_device_probe(struct platform_device *pdev) ...@@ -481,30 +605,26 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n", pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
arc_pmu->n_counters, counter_size, cc_bcr.c, arc_pmu->n_counters, counter_size, cc_bcr.c,
has_interrupts ? ", [overflow IRQ support]":""); has_interrupts ? ", [overflow IRQ support]" : "");
cc_name.str[8] = 0; cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0;
for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++) for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
arc_pmu->ev_hw_idx[i] = -1; arc_pmu->ev_hw_idx[i] = -1;
/* loop thru all available h/w condition indexes */ /* loop thru all available h/w condition indexes */
for (j = 0; j < cc_bcr.c; j++) { for (i = 0; i < cc_bcr.c; i++) {
write_aux_reg(ARC_REG_CC_INDEX, j); write_aux_reg(ARC_REG_CC_INDEX, i);
cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
/* See if it has been mapped to a perf event_id */ arc_pmu_map_hw_event(i, cc_name.str);
for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { arc_pmu_add_raw_event_attr(i, cc_name.str);
if (arc_pmu_ev_hw_map[i] &&
!strcmp(arc_pmu_ev_hw_map[i], cc_name.str) &&
strlen(arc_pmu_ev_hw_map[i])) {
pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
i, cc_name.str, j);
arc_pmu->ev_hw_idx[i] = j;
}
}
} }
arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
arc_pmu->pmu = (struct pmu) { arc_pmu->pmu = (struct pmu) {
.pmu_enable = arc_pmu_enable, .pmu_enable = arc_pmu_enable,
.pmu_disable = arc_pmu_disable, .pmu_disable = arc_pmu_disable,
...@@ -514,6 +634,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev) ...@@ -514,6 +634,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
.start = arc_pmu_start, .start = arc_pmu_start,
.stop = arc_pmu_stop, .stop = arc_pmu_stop,
.read = arc_pmu_read, .read = arc_pmu_read,
.attr_groups = arc_pmu->attr_groups,
}; };
if (has_interrupts) { if (has_interrupts) {
...@@ -535,17 +656,19 @@ static int arc_pmu_device_probe(struct platform_device *pdev) ...@@ -535,17 +656,19 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
} else } else
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); /*
* perf parser doesn't really like '-' symbol in events name, so let's
* use '_' in arc pct name as it goes to kernel PMU event prefix.
*/
return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
} }
#ifdef CONFIG_OF
static const struct of_device_id arc_pmu_match[] = { static const struct of_device_id arc_pmu_match[] = {
{ .compatible = "snps,arc700-pct" }, { .compatible = "snps,arc700-pct" },
{ .compatible = "snps,archs-pct" }, { .compatible = "snps,archs-pct" },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, arc_pmu_match); MODULE_DEVICE_TABLE(of, arc_pmu_match);
#endif
static struct platform_driver arc_pmu_driver = { static struct platform_driver arc_pmu_driver = {
.driver = { .driver = {
......
...@@ -123,6 +123,7 @@ static void read_arc_build_cfg_regs(void) ...@@ -123,6 +123,7 @@ static void read_arc_build_cfg_regs(void)
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
const struct id_to_str *tbl; const struct id_to_str *tbl;
struct bcr_isa_arcv2 isa; struct bcr_isa_arcv2 isa;
struct bcr_actionpoint ap;
FIX_PTR(cpu); FIX_PTR(cpu);
...@@ -195,6 +196,7 @@ static void read_arc_build_cfg_regs(void) ...@@ -195,6 +196,7 @@ static void read_arc_build_cfg_regs(void)
cpu->bpu.full = bpu.ft; cpu->bpu.full = bpu.ft;
cpu->bpu.num_cache = 256 << bpu.bce; cpu->bpu.num_cache = 256 << bpu.bce;
cpu->bpu.num_pred = 2048 << bpu.pte; cpu->bpu.num_pred = 2048 << bpu.pte;
cpu->bpu.ret_stk = 4 << bpu.rse;
if (cpu->core.family >= 0x54) { if (cpu->core.family >= 0x54) {
unsigned int exec_ctrl; unsigned int exec_ctrl;
...@@ -207,8 +209,11 @@ static void read_arc_build_cfg_regs(void) ...@@ -207,8 +209,11 @@ static void read_arc_build_cfg_regs(void)
} }
} }
READ_BCR(ARC_REG_AP_BCR, bcr); READ_BCR(ARC_REG_AP_BCR, ap);
cpu->extn.ap = bcr.ver ? 1 : 0; if (ap.ver) {
cpu->extn.ap_num = 2 << ap.num;
cpu->extn.ap_full = !!ap.min;
}
READ_BCR(ARC_REG_SMART_BCR, bcr); READ_BCR(ARC_REG_SMART_BCR, bcr);
cpu->extn.smart = bcr.ver ? 1 : 0; cpu->extn.smart = bcr.ver ? 1 : 0;
...@@ -216,8 +221,6 @@ static void read_arc_build_cfg_regs(void) ...@@ -216,8 +221,6 @@ static void read_arc_build_cfg_regs(void)
READ_BCR(ARC_REG_RTT_BCR, bcr); READ_BCR(ARC_REG_RTT_BCR, bcr);
cpu->extn.rtt = bcr.ver ? 1 : 0; cpu->extn.rtt = bcr.ver ? 1 : 0;
cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
READ_BCR(ARC_REG_ISA_CFG_BCR, isa); READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
/* some hacks for lack of feature BCR info in old ARC700 cores */ /* some hacks for lack of feature BCR info in old ARC700 cores */
...@@ -299,10 +302,10 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) ...@@ -299,10 +302,10 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
if (cpu->bpu.ver) if (cpu->bpu.ver)
n += scnprintf(buf + n, len - n, n += scnprintf(buf + n, len - n,
"BPU\t\t: %s%s match, cache:%d, Predict Table:%d", "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
IS_AVAIL1(cpu->bpu.full, "full"), IS_AVAIL1(cpu->bpu.full, "full"),
IS_AVAIL1(!cpu->bpu.full, "partial"), IS_AVAIL1(!cpu->bpu.full, "partial"),
cpu->bpu.num_cache, cpu->bpu.num_pred); cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
if (is_isa_arcv2()) { if (is_isa_arcv2()) {
struct bcr_lpb lpb; struct bcr_lpb lpb;
...@@ -336,11 +339,17 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) ...@@ -336,11 +339,17 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
IS_AVAIL1(cpu->extn.fpu_sp, "SP "), IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
IS_AVAIL1(cpu->extn.fpu_dp, "DP ")); IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
if (cpu->extn.debug) if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n", n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
IS_AVAIL1(cpu->extn.ap, "ActionPoint "),
IS_AVAIL1(cpu->extn.smart, "smaRT "), IS_AVAIL1(cpu->extn.smart, "smaRT "),
IS_AVAIL1(cpu->extn.rtt, "RTT ")); IS_AVAIL1(cpu->extn.rtt, "RTT "));
if (cpu->extn.ap_num) {
n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
cpu->extn.ap_num,
cpu->extn.ap_full ? "full":"min");
}
n += scnprintf(buf + n, len - n, "\n");
}
if (cpu->dccm.sz || cpu->iccm.sz) if (cpu->dccm.sz || cpu->iccm.sz)
n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n", n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <asm/arcregs.h> #include <asm/arcregs.h>
#include <asm/irqflags.h> #include <asm/irqflags.h>
#define ARC_PATH_MAX 256
/* /*
* Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
* -Prints 3 regs per line and a CR. * -Prints 3 regs per line and a CR.
...@@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs) ...@@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs)
print_reg_file(&(cregs->r13), 13); print_reg_file(&(cregs->r13), 13);
} }
static void print_task_path_n_nm(struct task_struct *tsk, char *buf) static void print_task_path_n_nm(struct task_struct *tsk)
{ {
char *path_nm = NULL; char *path_nm = NULL;
struct mm_struct *mm; struct mm_struct *mm;
struct file *exe_file; struct file *exe_file;
char buf[ARC_PATH_MAX];
mm = get_task_mm(tsk); mm = get_task_mm(tsk);
if (!mm) if (!mm)
...@@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf) ...@@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
mmput(mm); mmput(mm);
if (exe_file) { if (exe_file) {
path_nm = file_path(exe_file, buf, 255); path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
fput(exe_file); fput(exe_file);
} }
...@@ -80,10 +83,9 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf) ...@@ -80,10 +83,9 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
} }
static void show_faulting_vma(unsigned long address, char *buf) static void show_faulting_vma(unsigned long address)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
char *nm = buf;
struct mm_struct *active_mm = current->active_mm; struct mm_struct *active_mm = current->active_mm;
/* can't use print_vma_addr() yet as it doesn't check for /* can't use print_vma_addr() yet as it doesn't check for
...@@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf) ...@@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf)
* if the container VMA is not found * if the container VMA is not found
*/ */
if (vma && (vma->vm_start <= address)) { if (vma && (vma->vm_start <= address)) {
char buf[ARC_PATH_MAX];
char *nm = "?";
if (vma->vm_file) { if (vma->vm_file) {
nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1); nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
if (IS_ERR(nm)) if (IS_ERR(nm))
nm = "?"; nm = "?";
} }
...@@ -173,13 +178,14 @@ void show_regs(struct pt_regs *regs) ...@@ -173,13 +178,14 @@ void show_regs(struct pt_regs *regs)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct callee_regs *cregs; struct callee_regs *cregs;
char *buf;
buf = (char *)__get_free_page(GFP_KERNEL); /*
if (!buf) * generic code calls us with preemption disabled, but some calls
return; * here could sleep, so re-enable to avoid lockdep splat
*/
preempt_enable();
print_task_path_n_nm(tsk, buf); print_task_path_n_nm(tsk);
show_regs_print_info(KERN_INFO); show_regs_print_info(KERN_INFO);
show_ecr_verbose(regs); show_ecr_verbose(regs);
...@@ -189,7 +195,7 @@ void show_regs(struct pt_regs *regs) ...@@ -189,7 +195,7 @@ void show_regs(struct pt_regs *regs)
(void *)regs->blink, (void *)regs->ret); (void *)regs->blink, (void *)regs->ret);
if (user_mode(regs)) if (user_mode(regs))
show_faulting_vma(regs->ret, buf); /* faulting code, not data */ show_faulting_vma(regs->ret); /* faulting code, not data */
pr_info("[STAT32]: 0x%08lx", regs->status32); pr_info("[STAT32]: 0x%08lx", regs->status32);
...@@ -222,7 +228,7 @@ void show_regs(struct pt_regs *regs) ...@@ -222,7 +228,7 @@ void show_regs(struct pt_regs *regs)
if (cregs) if (cregs)
show_callee_regs(cregs); show_callee_regs(cregs);
free_page((unsigned long)buf); preempt_disable();
} }
void show_kernel_fault_diag(const char *str, struct pt_regs *regs, void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
......
...@@ -7,11 +7,39 @@ ...@@ -7,11 +7,39 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cache.h>
#undef PREALLOC_NOT_AVAIL /*
* The memset implementation below is optimized to use prefetchw and prealloc
* instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
* If you want to implement optimized memset for other possible L1 data cache
* line lengths (32B and 128B) you should rewrite code carefully checking
* we don't call any prefetchw/prealloc instruction for L1 cache lines which
* don't belongs to memset area.
*/
#if L1_CACHE_SHIFT == 6
.macro PREALLOC_INSTR reg, off
prealloc [\reg, \off]
.endm
.macro PREFETCHW_INSTR reg, off
prefetchw [\reg, \off]
.endm
#else
.macro PREALLOC_INSTR
.endm
.macro PREFETCHW_INSTR
.endm
#endif
ENTRY_CFI(memset) ENTRY_CFI(memset)
prefetchw [r0] ; Prefetch the write location PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
mov.f 0, r2 mov.f 0, r2
;;; if size is zero ;;; if size is zero
jz.d [blink] jz.d [blink]
...@@ -48,11 +76,8 @@ ENTRY_CFI(memset) ...@@ -48,11 +76,8 @@ ENTRY_CFI(memset)
lpnz @.Lset64bytes lpnz @.Lset64bytes
;; LOOP START ;; LOOP START
#ifdef PREALLOC_NOT_AVAIL PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
prefetchw [r3, 64] ;Prefetch the next write location
#else
prealloc [r3, 64]
#endif
#ifdef CONFIG_ARC_HAS_LL64 #ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
...@@ -85,7 +110,6 @@ ENTRY_CFI(memset) ...@@ -85,7 +110,6 @@ ENTRY_CFI(memset)
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes lpnz .Lset32bytes
;; LOOP START ;; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location
#ifdef CONFIG_ARC_HAS_LL64 #ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
......
...@@ -141,13 +141,18 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) ...@@ -141,13 +141,18 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
*/ */
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
/* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
if (fatal_signal_pending(current)) { if (fatal_signal_pending(current)) {
if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
up_read(&mm->mmap_sem); /*
if (user_mode(regs)) * if fault retry, mmap_sem already relinquished by core mm
* so OK to return to user mode (with signal handled first)
*/
if (fault & VM_FAULT_RETRY) {
if (!user_mode(regs))
goto no_context;
return; return;
} }
}
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
......
...@@ -119,7 +119,8 @@ void __init setup_arch_memory(void) ...@@ -119,7 +119,8 @@ void __init setup_arch_memory(void)
*/ */
memblock_add_node(low_mem_start, low_mem_sz, 0); memblock_add_node(low_mem_start, low_mem_sz, 0);
memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); memblock_reserve(CONFIG_LINUX_LINK_BASE,
__pa(_end) - CONFIG_LINUX_LINK_BASE);
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size) { if (phys_initrd_size) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment