Commit 6aec3bfe authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'coresight-next-v5.18-v2' of...

Merge tag 'coresight-next-v5.18-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/coresight/linux into char-misc-next

Suzuki writes:

coresight: changes for v5.18

The coresight update for v5.18 includes
  - TRBE erratum workarounds for Arm Cortex-A510
  - Fixes for leaking root namespace PIDs into non-root namespace
    trace sessions
  - Miscellaneous fixes and cleanups

Updated tag to reflect missing committer s-o-b tags.
Signed-off-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>

* tag 'coresight-next-v5.18-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/coresight/linux:
  coresight: Drop unused 'none' enum value for each component
  coresight: etm3x: Don't trace PID for non-root PID namespace
  coresight: etm4x: Don't trace PID for non-root PID namespace
  coresight: etm4x: Don't use virtual contextID for non-root PID namespace
  coresight: etm4x: Add lock for reading virtual context ID comparator
  coresight: trbe: Move check for kernel page table isolation from EL0 to probe
  coresight: no-op refactor to make INSTP0 check more idiomatic
  hwtracing: coresight: Replace acpi_bus_get_device()
  coresight: syscfg: Fix memleak on registration failure in cscfg_create_device
  coresight: Fix TRCCONFIGR.QE sysfs interface
  coresight: trbe: Work around the trace data corruption
  coresight: trbe: Work around the invalid prohibited states
  coresight: trbe: Work around the ignored system register writes
parents cc6ce5ac 286f9505
...@@ -807,7 +807,7 @@ config ARM64_ERRATUM_2224489 ...@@ -807,7 +807,7 @@ config ARM64_ERRATUM_2224489
config ARM64_ERRATUM_2064142 config ARM64_ERRATUM_2064142
bool "Cortex-A510: 2064142: workaround TRBE register writes while disabled" bool "Cortex-A510: 2064142: workaround TRBE register writes while disabled"
depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in depends on CORESIGHT_TRBE
default y default y
help help
This option adds the workaround for ARM Cortex-A510 erratum 2064142. This option adds the workaround for ARM Cortex-A510 erratum 2064142.
...@@ -825,7 +825,7 @@ config ARM64_ERRATUM_2064142 ...@@ -825,7 +825,7 @@ config ARM64_ERRATUM_2064142
config ARM64_ERRATUM_2038923 config ARM64_ERRATUM_2038923
bool "Cortex-A510: 2038923: workaround TRBE corruption with enable" bool "Cortex-A510: 2038923: workaround TRBE corruption with enable"
depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in depends on CORESIGHT_TRBE
default y default y
help help
This option adds the workaround for ARM Cortex-A510 erratum 2038923. This option adds the workaround for ARM Cortex-A510 erratum 2038923.
...@@ -848,7 +848,7 @@ config ARM64_ERRATUM_2038923 ...@@ -848,7 +848,7 @@ config ARM64_ERRATUM_2038923
config ARM64_ERRATUM_1902691 config ARM64_ERRATUM_1902691
bool "Cortex-A510: 1902691: workaround TRBE trace corruption" bool "Cortex-A510: 1902691: workaround TRBE trace corruption"
depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in depends on CORESIGHT_TRBE
default y default y
help help
This option adds the workaround for ARM Cortex-A510 erratum 1902691. This option adds the workaround for ARM Cortex-A510 erratum 1902691.
......
...@@ -1278,9 +1278,6 @@ static struct attribute *coresight_source_attrs[] = { ...@@ -1278,9 +1278,6 @@ static struct attribute *coresight_source_attrs[] = {
ATTRIBUTE_GROUPS(coresight_source); ATTRIBUTE_GROUPS(coresight_source);
static struct device_type coresight_dev_type[] = { static struct device_type coresight_dev_type[] = {
{
.name = "none",
},
{ {
.name = "sink", .name = "sink",
.groups = coresight_sink_groups, .groups = coresight_sink_groups,
......
...@@ -340,6 +340,10 @@ static int etm_parse_event_config(struct etm_drvdata *drvdata, ...@@ -340,6 +340,10 @@ static int etm_parse_event_config(struct etm_drvdata *drvdata,
config->ctrl = attr->config; config->ctrl = attr->config;
/* Don't trace contextID when runs in non-root PID namespace */
if (!task_is_in_init_pid_ns(current))
config->ctrl &= ~ETMCR_CTXID_SIZE;
/* /*
* Possible to have cores with PTM (supports ret stack) and ETM * Possible to have cores with PTM (supports ret stack) and ETM
* (never has ret stack) on the same SoC. So if we have a request * (never has ret stack) on the same SoC. So if we have a request
......
...@@ -656,7 +656,9 @@ static int etm4_parse_event_config(struct coresight_device *csdev, ...@@ -656,7 +656,9 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
config->cfg |= BIT(11); config->cfg |= BIT(11);
} }
if (attr->config & BIT(ETM_OPT_CTXTID)) /* Only trace contextID when runs in root PID namespace */
if ((attr->config & BIT(ETM_OPT_CTXTID)) &&
task_is_in_init_pid_ns(current))
/* bit[6], Context ID tracing bit */ /* bit[6], Context ID tracing bit */
config->cfg |= BIT(ETM4_CFG_BIT_CTXTID); config->cfg |= BIT(ETM4_CFG_BIT_CTXTID);
...@@ -670,7 +672,11 @@ static int etm4_parse_event_config(struct coresight_device *csdev, ...@@ -670,7 +672,11 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
config->cfg |= BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT);
/* Only trace virtual contextID when runs in root PID namespace */
if (task_is_in_init_pid_ns(current))
config->cfg |= BIT(ETM4_CFG_BIT_VMID) |
BIT(ETM4_CFG_BIT_VMID_OPT);
} }
/* return stack - enable if selected and supported */ /* return stack - enable if selected and supported */
...@@ -1091,7 +1097,7 @@ static void etm4_init_arch_data(void *info) ...@@ -1091,7 +1097,7 @@ static void etm4_init_arch_data(void *info)
etmidr0 = etm4x_relaxed_read32(csa, TRCIDR0); etmidr0 = etm4x_relaxed_read32(csa, TRCIDR0);
/* INSTP0, bits[2:1] P0 tracing support field */ /* INSTP0, bits[2:1] P0 tracing support field */
if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2)) if (BMVAL(etmidr0, 1, 2) == 0b11)
drvdata->instrp0 = true; drvdata->instrp0 = true;
else else
drvdata->instrp0 = false; drvdata->instrp0 = false;
......
...@@ -367,8 +367,12 @@ static ssize_t mode_store(struct device *dev, ...@@ -367,8 +367,12 @@ static ssize_t mode_store(struct device *dev,
mode = ETM_MODE_QELEM(config->mode); mode = ETM_MODE_QELEM(config->mode);
/* start by clearing QE bits */ /* start by clearing QE bits */
config->cfg &= ~(BIT(13) | BIT(14)); config->cfg &= ~(BIT(13) | BIT(14));
/* if supported, Q elements with instruction counts are enabled */ /*
if ((mode & BIT(0)) && (drvdata->q_support & BIT(0))) * if supported, Q elements with instruction counts are enabled.
* Always set the low bit for any requested mode. Valid combos are
* 0b00, 0b01 and 0b11.
*/
if (mode && drvdata->q_support)
config->cfg |= BIT(13); config->cfg |= BIT(13);
/* /*
* if supported, Q elements with and without instruction * if supported, Q elements with and without instruction
...@@ -2111,7 +2115,16 @@ static ssize_t vmid_val_show(struct device *dev, ...@@ -2111,7 +2115,16 @@ static ssize_t vmid_val_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config; struct etmv4_config *config = &drvdata->config;
/*
* Don't use virtual contextID tracing if coming from a PID namespace.
* See comment in ctxid_pid_store().
*/
if (!task_is_in_init_pid_ns(current))
return -EINVAL;
spin_lock(&drvdata->spinlock);
val = (unsigned long)config->vmid_val[config->vmid_idx]; val = (unsigned long)config->vmid_val[config->vmid_idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
} }
...@@ -2123,6 +2136,13 @@ static ssize_t vmid_val_store(struct device *dev, ...@@ -2123,6 +2136,13 @@ static ssize_t vmid_val_store(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config; struct etmv4_config *config = &drvdata->config;
/*
* Don't use virtual contextID tracing if coming from a PID namespace.
* See comment in ctxid_pid_store().
*/
if (!task_is_in_init_pid_ns(current))
return -EINVAL;
/* /*
* only implemented when vmid tracing is enabled, i.e. at least one * only implemented when vmid tracing is enabled, i.e. at least one
* vmid comparator is implemented and at least 8 bit vmid size * vmid comparator is implemented and at least 8 bit vmid size
...@@ -2146,6 +2166,13 @@ static ssize_t vmid_masks_show(struct device *dev, ...@@ -2146,6 +2166,13 @@ static ssize_t vmid_masks_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config; struct etmv4_config *config = &drvdata->config;
/*
* Don't use virtual contextID tracing if coming from a PID namespace.
* See comment in ctxid_pid_store().
*/
if (!task_is_in_init_pid_ns(current))
return -EINVAL;
spin_lock(&drvdata->spinlock); spin_lock(&drvdata->spinlock);
val1 = config->vmid_mask0; val1 = config->vmid_mask0;
val2 = config->vmid_mask1; val2 = config->vmid_mask1;
...@@ -2163,6 +2190,13 @@ static ssize_t vmid_masks_store(struct device *dev, ...@@ -2163,6 +2190,13 @@ static ssize_t vmid_masks_store(struct device *dev,
struct etmv4_config *config = &drvdata->config; struct etmv4_config *config = &drvdata->config;
int nr_inputs; int nr_inputs;
/*
* Don't use virtual contextID tracing if coming from a PID namespace.
* See comment in ctxid_pid_store().
*/
if (!task_is_in_init_pid_ns(current))
return -EINVAL;
/* /*
* only implemented when vmid tracing is enabled, i.e. at least one * only implemented when vmid tracing is enabled, i.e. at least one
* vmid comparator is implemented and at least 8 bit vmid size * vmid comparator is implemented and at least 8 bit vmid size
......
...@@ -626,7 +626,7 @@ static int acpi_coresight_parse_link(struct acpi_device *adev, ...@@ -626,7 +626,7 @@ static int acpi_coresight_parse_link(struct acpi_device *adev,
const union acpi_object *link, const union acpi_object *link,
struct coresight_connection *conn) struct coresight_connection *conn)
{ {
int rc, dir; int dir;
const union acpi_object *fields; const union acpi_object *fields;
struct acpi_device *r_adev; struct acpi_device *r_adev;
struct device *rdev; struct device *rdev;
...@@ -643,9 +643,9 @@ static int acpi_coresight_parse_link(struct acpi_device *adev, ...@@ -643,9 +643,9 @@ static int acpi_coresight_parse_link(struct acpi_device *adev,
fields[3].type != ACPI_TYPE_INTEGER) fields[3].type != ACPI_TYPE_INTEGER)
return -EINVAL; return -EINVAL;
rc = acpi_bus_get_device(fields[2].reference.handle, &r_adev); r_adev = acpi_fetch_acpi_dev(fields[2].reference.handle);
if (rc) if (!r_adev)
return rc; return -ENODEV;
dir = fields[3].integer.value; dir = fields[3].integer.value;
if (dir == ACPI_CORESIGHT_LINK_MASTER) { if (dir == ACPI_CORESIGHT_LINK_MASTER) {
......
...@@ -1049,7 +1049,7 @@ static int cscfg_create_device(void) ...@@ -1049,7 +1049,7 @@ static int cscfg_create_device(void)
err = device_register(dev); err = device_register(dev);
if (err) if (err)
cscfg_dev_release(dev); put_device(dev);
create_dev_exit_unlock: create_dev_exit_unlock:
mutex_unlock(&cscfg_mutex); mutex_unlock(&cscfg_mutex);
......
...@@ -91,10 +91,16 @@ struct trbe_buf { ...@@ -91,10 +91,16 @@ struct trbe_buf {
*/ */
#define TRBE_WORKAROUND_OVERWRITE_FILL_MODE 0 #define TRBE_WORKAROUND_OVERWRITE_FILL_MODE 0
#define TRBE_WORKAROUND_WRITE_OUT_OF_RANGE 1 #define TRBE_WORKAROUND_WRITE_OUT_OF_RANGE 1
#define TRBE_NEEDS_DRAIN_AFTER_DISABLE 2
#define TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE 3
#define TRBE_IS_BROKEN 4
static int trbe_errata_cpucaps[] = { static int trbe_errata_cpucaps[] = {
[TRBE_WORKAROUND_OVERWRITE_FILL_MODE] = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE, [TRBE_WORKAROUND_OVERWRITE_FILL_MODE] = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
[TRBE_WORKAROUND_WRITE_OUT_OF_RANGE] = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE, [TRBE_WORKAROUND_WRITE_OUT_OF_RANGE] = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
[TRBE_NEEDS_DRAIN_AFTER_DISABLE] = ARM64_WORKAROUND_2064142,
[TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE] = ARM64_WORKAROUND_2038923,
[TRBE_IS_BROKEN] = ARM64_WORKAROUND_1902691,
-1, /* Sentinel, must be the last entry */ -1, /* Sentinel, must be the last entry */
}; };
...@@ -167,6 +173,32 @@ static inline bool trbe_may_write_out_of_range(struct trbe_cpudata *cpudata) ...@@ -167,6 +173,32 @@ static inline bool trbe_may_write_out_of_range(struct trbe_cpudata *cpudata)
return trbe_has_erratum(cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE); return trbe_has_erratum(cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE);
} }
static inline bool trbe_needs_drain_after_disable(struct trbe_cpudata *cpudata)
{
/*
* Errata affected TRBE implementation will need TSB CSYNC and
* DSB in order to prevent subsequent writes into certain TRBE
* system registers from being ignored and not effected.
*/
return trbe_has_erratum(cpudata, TRBE_NEEDS_DRAIN_AFTER_DISABLE);
}
static inline bool trbe_needs_ctxt_sync_after_enable(struct trbe_cpudata *cpudata)
{
/*
* Errata affected TRBE implementation will need an additional
* context synchronization in order to prevent an inconsistent
* TRBE prohibited region view on the CPU which could possibly
* corrupt the TRBE buffer or the TRBE state.
*/
return trbe_has_erratum(cpudata, TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE);
}
static inline bool trbe_is_broken(struct trbe_cpudata *cpudata)
{
return trbe_has_erratum(cpudata, TRBE_IS_BROKEN);
}
static int trbe_alloc_node(struct perf_event *event) static int trbe_alloc_node(struct perf_event *event)
{ {
if (event->cpu == -1) if (event->cpu == -1)
...@@ -174,17 +206,31 @@ static int trbe_alloc_node(struct perf_event *event) ...@@ -174,17 +206,31 @@ static int trbe_alloc_node(struct perf_event *event)
return cpu_to_node(event->cpu); return cpu_to_node(event->cpu);
} }
static void trbe_drain_buffer(void) static inline void trbe_drain_buffer(void)
{ {
tsb_csync(); tsb_csync();
dsb(nsh); dsb(nsh);
} }
static void trbe_drain_and_disable_local(void) static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
{ {
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); /*
* Enable the TRBE without clearing LIMITPTR which
* might be required for fetching the buffer limits.
*/
trblimitr |= TRBLIMITR_ENABLE;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
trbe_drain_buffer(); /* Synchronize the TRBE enable event */
isb();
if (trbe_needs_ctxt_sync_after_enable(cpudata))
isb();
}
static inline void set_trbe_disabled(struct trbe_cpudata *cpudata)
{
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
/* /*
* Disable the TRBE without clearing LIMITPTR which * Disable the TRBE without clearing LIMITPTR which
...@@ -192,12 +238,21 @@ static void trbe_drain_and_disable_local(void) ...@@ -192,12 +238,21 @@ static void trbe_drain_and_disable_local(void)
*/ */
trblimitr &= ~TRBLIMITR_ENABLE; trblimitr &= ~TRBLIMITR_ENABLE;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
if (trbe_needs_drain_after_disable(cpudata))
trbe_drain_buffer();
isb(); isb();
} }
static void trbe_reset_local(void) static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata)
{ {
trbe_drain_and_disable_local(); trbe_drain_buffer();
set_trbe_disabled(cpudata);
}
static void trbe_reset_local(struct trbe_cpudata *cpudata)
{
trbe_drain_and_disable_local(cpudata);
write_sysreg_s(0, SYS_TRBLIMITR_EL1); write_sysreg_s(0, SYS_TRBLIMITR_EL1);
write_sysreg_s(0, SYS_TRBPTR_EL1); write_sysreg_s(0, SYS_TRBPTR_EL1);
write_sysreg_s(0, SYS_TRBBASER_EL1); write_sysreg_s(0, SYS_TRBBASER_EL1);
...@@ -234,7 +289,7 @@ static void trbe_stop_and_truncate_event(struct perf_output_handle *handle) ...@@ -234,7 +289,7 @@ static void trbe_stop_and_truncate_event(struct perf_output_handle *handle)
* at event_stop(). So disable the TRBE here and leave * at event_stop(). So disable the TRBE here and leave
* the update_buffer() to return a 0 size. * the update_buffer() to return a 0 size.
*/ */
trbe_drain_and_disable_local(); trbe_drain_and_disable_local(buf->cpudata);
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
perf_aux_output_end(handle, 0); perf_aux_output_end(handle, 0);
*this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL; *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
...@@ -536,9 +591,10 @@ static void clr_trbe_status(void) ...@@ -536,9 +591,10 @@ static void clr_trbe_status(void)
write_sysreg_s(trbsr, SYS_TRBSR_EL1); write_sysreg_s(trbsr, SYS_TRBSR_EL1);
} }
static void set_trbe_limit_pointer_enabled(unsigned long addr) static void set_trbe_limit_pointer_enabled(struct trbe_buf *buf)
{ {
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
unsigned long addr = buf->trbe_limit;
WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_LIMIT_SHIFT))); WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_LIMIT_SHIFT)));
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
...@@ -566,12 +622,7 @@ static void set_trbe_limit_pointer_enabled(unsigned long addr) ...@@ -566,12 +622,7 @@ static void set_trbe_limit_pointer_enabled(unsigned long addr)
trblimitr |= (TRBE_TRIG_MODE_IGNORE & TRBLIMITR_TRIG_MODE_MASK) << trblimitr |= (TRBE_TRIG_MODE_IGNORE & TRBLIMITR_TRIG_MODE_MASK) <<
TRBLIMITR_TRIG_MODE_SHIFT; TRBLIMITR_TRIG_MODE_SHIFT;
trblimitr |= (addr & PAGE_MASK); trblimitr |= (addr & PAGE_MASK);
set_trbe_enabled(buf->cpudata, trblimitr);
trblimitr |= TRBLIMITR_ENABLE;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
/* Synchronize the TRBE enable event */
isb();
} }
static void trbe_enable_hw(struct trbe_buf *buf) static void trbe_enable_hw(struct trbe_buf *buf)
...@@ -579,8 +630,7 @@ static void trbe_enable_hw(struct trbe_buf *buf) ...@@ -579,8 +630,7 @@ static void trbe_enable_hw(struct trbe_buf *buf)
WARN_ON(buf->trbe_hw_base < buf->trbe_base); WARN_ON(buf->trbe_hw_base < buf->trbe_base);
WARN_ON(buf->trbe_write < buf->trbe_hw_base); WARN_ON(buf->trbe_write < buf->trbe_hw_base);
WARN_ON(buf->trbe_write >= buf->trbe_limit); WARN_ON(buf->trbe_write >= buf->trbe_limit);
set_trbe_disabled(); set_trbe_disabled(buf->cpudata);
isb();
clr_trbe_status(); clr_trbe_status();
set_trbe_base_pointer(buf->trbe_hw_base); set_trbe_base_pointer(buf->trbe_hw_base);
set_trbe_write_pointer(buf->trbe_write); set_trbe_write_pointer(buf->trbe_write);
...@@ -590,7 +640,7 @@ static void trbe_enable_hw(struct trbe_buf *buf) ...@@ -590,7 +640,7 @@ static void trbe_enable_hw(struct trbe_buf *buf)
* till now before enabling the TRBE. * till now before enabling the TRBE.
*/ */
isb(); isb();
set_trbe_limit_pointer_enabled(buf->trbe_limit); set_trbe_limit_pointer_enabled(buf);
} }
static enum trbe_fault_action trbe_get_fault_act(struct perf_output_handle *handle, static enum trbe_fault_action trbe_get_fault_act(struct perf_output_handle *handle,
...@@ -775,7 +825,7 @@ static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev, ...@@ -775,7 +825,7 @@ static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
* the TRBE here will ensure that no IRQ could be generated when the perf * the TRBE here will ensure that no IRQ could be generated when the perf
* handle gets freed in etm_event_stop(). * handle gets freed in etm_event_stop().
*/ */
trbe_drain_and_disable_local(); trbe_drain_and_disable_local(cpudata);
/* Check if there is a pending interrupt and handle it here */ /* Check if there is a pending interrupt and handle it here */
status = read_sysreg_s(SYS_TRBSR_EL1); status = read_sysreg_s(SYS_TRBSR_EL1);
...@@ -986,7 +1036,7 @@ static int arm_trbe_disable(struct coresight_device *csdev) ...@@ -986,7 +1036,7 @@ static int arm_trbe_disable(struct coresight_device *csdev)
if (cpudata->mode != CS_MODE_PERF) if (cpudata->mode != CS_MODE_PERF)
return -EINVAL; return -EINVAL;
trbe_drain_and_disable_local(); trbe_drain_and_disable_local(cpudata);
buf->cpudata = NULL; buf->cpudata = NULL;
cpudata->buf = NULL; cpudata->buf = NULL;
cpudata->mode = CS_MODE_DISABLED; cpudata->mode = CS_MODE_DISABLED;
...@@ -995,16 +1045,15 @@ static int arm_trbe_disable(struct coresight_device *csdev) ...@@ -995,16 +1045,15 @@ static int arm_trbe_disable(struct coresight_device *csdev)
static void trbe_handle_spurious(struct perf_output_handle *handle) static void trbe_handle_spurious(struct perf_output_handle *handle)
{ {
u64 limitr = read_sysreg_s(SYS_TRBLIMITR_EL1); struct trbe_buf *buf = etm_perf_sink_config(handle);
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
/* /*
* If the IRQ was spurious, simply re-enable the TRBE * If the IRQ was spurious, simply re-enable the TRBE
* back without modifying the buffer parameters to * back without modifying the buffer parameters to
* retain the trace collected so far. * retain the trace collected so far.
*/ */
limitr |= TRBLIMITR_ENABLE; set_trbe_enabled(buf->cpudata, trblimitr);
write_sysreg_s(limitr, SYS_TRBLIMITR_EL1);
isb();
} }
static int trbe_handle_overflow(struct perf_output_handle *handle) static int trbe_handle_overflow(struct perf_output_handle *handle)
...@@ -1028,7 +1077,7 @@ static int trbe_handle_overflow(struct perf_output_handle *handle) ...@@ -1028,7 +1077,7 @@ static int trbe_handle_overflow(struct perf_output_handle *handle)
* is able to detect this with a disconnected handle * is able to detect this with a disconnected handle
* (handle->event = NULL). * (handle->event = NULL).
*/ */
trbe_drain_and_disable_local(); trbe_drain_and_disable_local(buf->cpudata);
*this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL; *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
return -EINVAL; return -EINVAL;
} }
...@@ -1062,6 +1111,7 @@ static irqreturn_t arm_trbe_irq_handler(int irq, void *dev) ...@@ -1062,6 +1111,7 @@ static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
{ {
struct perf_output_handle **handle_ptr = dev; struct perf_output_handle **handle_ptr = dev;
struct perf_output_handle *handle = *handle_ptr; struct perf_output_handle *handle = *handle_ptr;
struct trbe_buf *buf = etm_perf_sink_config(handle);
enum trbe_fault_action act; enum trbe_fault_action act;
u64 status; u64 status;
bool truncated = false; bool truncated = false;
...@@ -1082,7 +1132,7 @@ static irqreturn_t arm_trbe_irq_handler(int irq, void *dev) ...@@ -1082,7 +1132,7 @@ static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
* Ensure the trace is visible to the CPUs and * Ensure the trace is visible to the CPUs and
* any external aborts have been resolved. * any external aborts have been resolved.
*/ */
trbe_drain_and_disable_local(); trbe_drain_and_disable_local(buf->cpudata);
clr_trbe_irq(); clr_trbe_irq();
isb(); isb();
...@@ -1167,8 +1217,9 @@ static const struct attribute_group *arm_trbe_groups[] = { ...@@ -1167,8 +1217,9 @@ static const struct attribute_group *arm_trbe_groups[] = {
static void arm_trbe_enable_cpu(void *info) static void arm_trbe_enable_cpu(void *info)
{ {
struct trbe_drvdata *drvdata = info; struct trbe_drvdata *drvdata = info;
struct trbe_cpudata *cpudata = this_cpu_ptr(drvdata->cpudata);
trbe_reset_local(); trbe_reset_local(cpudata);
enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE); enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE);
} }
...@@ -1244,6 +1295,11 @@ static void arm_trbe_probe_cpu(void *info) ...@@ -1244,6 +1295,11 @@ static void arm_trbe_probe_cpu(void *info)
*/ */
trbe_check_errata(cpudata); trbe_check_errata(cpudata);
if (trbe_is_broken(cpudata)) {
pr_err("Disabling TRBE on cpu%d due to erratum\n", cpu);
goto cpu_clear;
}
/* /*
* If the TRBE is affected by erratum TRBE_WORKAROUND_OVERWRITE_FILL_MODE, * If the TRBE is affected by erratum TRBE_WORKAROUND_OVERWRITE_FILL_MODE,
* we must always program the TBRPTR_EL1, 256bytes from a page * we must always program the TBRPTR_EL1, 256bytes from a page
...@@ -1276,7 +1332,7 @@ static void arm_trbe_remove_coresight_cpu(void *info) ...@@ -1276,7 +1332,7 @@ static void arm_trbe_remove_coresight_cpu(void *info)
struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu); struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
disable_percpu_irq(drvdata->irq); disable_percpu_irq(drvdata->irq);
trbe_reset_local(); trbe_reset_local(cpudata);
if (trbe_csdev) { if (trbe_csdev) {
coresight_unregister(trbe_csdev); coresight_unregister(trbe_csdev);
cpudata->drvdata = NULL; cpudata->drvdata = NULL;
...@@ -1349,8 +1405,10 @@ static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node) ...@@ -1349,8 +1405,10 @@ static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node); struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) { if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
disable_percpu_irq(drvdata->irq); disable_percpu_irq(drvdata->irq);
trbe_reset_local(); trbe_reset_local(cpudata);
} }
return 0; return 0;
} }
...@@ -1423,6 +1481,12 @@ static int arm_trbe_device_probe(struct platform_device *pdev) ...@@ -1423,6 +1481,12 @@ static int arm_trbe_device_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
int ret; int ret;
/* Trace capture is not possible with kernel page table isolation */
if (arm64_kernel_unmapped_at_el0()) {
pr_err("TRBE wouldn't work if kernel gets unmapped at EL0\n");
return -EOPNOTSUPP;
}
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata) if (!drvdata)
return -ENOMEM; return -ENOMEM;
...@@ -1484,11 +1548,6 @@ static int __init arm_trbe_init(void) ...@@ -1484,11 +1548,6 @@ static int __init arm_trbe_init(void)
{ {
int ret; int ret;
if (arm64_kernel_unmapped_at_el0()) {
pr_err("TRBE wouldn't work if kernel gets unmapped at EL0\n");
return -EOPNOTSUPP;
}
ret = platform_driver_register(&arm_trbe_driver); ret = platform_driver_register(&arm_trbe_driver);
if (!ret) if (!ret)
return 0; return 0;
......
...@@ -91,14 +91,6 @@ static inline bool is_trbe_running(u64 trbsr) ...@@ -91,14 +91,6 @@ static inline bool is_trbe_running(u64 trbsr)
#define TRBE_FILL_MODE_WRAP 1 #define TRBE_FILL_MODE_WRAP 1
#define TRBE_FILL_MODE_CIRCULAR_BUFFER 3 #define TRBE_FILL_MODE_CIRCULAR_BUFFER 3
static inline void set_trbe_disabled(void)
{
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
trblimitr &= ~TRBLIMITR_ENABLE;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
}
static inline bool get_trbe_flag_update(u64 trbidr) static inline bool get_trbe_flag_update(u64 trbidr)
{ {
return trbidr & TRBIDR_FLAG; return trbidr & TRBIDR_FLAG;
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
extern struct bus_type coresight_bustype; extern struct bus_type coresight_bustype;
enum coresight_dev_type { enum coresight_dev_type {
CORESIGHT_DEV_TYPE_NONE,
CORESIGHT_DEV_TYPE_SINK, CORESIGHT_DEV_TYPE_SINK,
CORESIGHT_DEV_TYPE_LINK, CORESIGHT_DEV_TYPE_LINK,
CORESIGHT_DEV_TYPE_LINKSINK, CORESIGHT_DEV_TYPE_LINKSINK,
...@@ -46,7 +45,6 @@ enum coresight_dev_type { ...@@ -46,7 +45,6 @@ enum coresight_dev_type {
}; };
enum coresight_dev_subtype_sink { enum coresight_dev_subtype_sink {
CORESIGHT_DEV_SUBTYPE_SINK_NONE,
CORESIGHT_DEV_SUBTYPE_SINK_PORT, CORESIGHT_DEV_SUBTYPE_SINK_PORT,
CORESIGHT_DEV_SUBTYPE_SINK_BUFFER, CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM, CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM,
...@@ -54,21 +52,18 @@ enum coresight_dev_subtype_sink { ...@@ -54,21 +52,18 @@ enum coresight_dev_subtype_sink {
}; };
enum coresight_dev_subtype_link { enum coresight_dev_subtype_link {
CORESIGHT_DEV_SUBTYPE_LINK_NONE,
CORESIGHT_DEV_SUBTYPE_LINK_MERG, CORESIGHT_DEV_SUBTYPE_LINK_MERG,
CORESIGHT_DEV_SUBTYPE_LINK_SPLIT, CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
CORESIGHT_DEV_SUBTYPE_LINK_FIFO, CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
}; };
enum coresight_dev_subtype_source { enum coresight_dev_subtype_source {
CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
CORESIGHT_DEV_SUBTYPE_SOURCE_PROC, CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
CORESIGHT_DEV_SUBTYPE_SOURCE_BUS, CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE, CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
}; };
enum coresight_dev_subtype_helper { enum coresight_dev_subtype_helper {
CORESIGHT_DEV_SUBTYPE_HELPER_NONE,
CORESIGHT_DEV_SUBTYPE_HELPER_CATU, CORESIGHT_DEV_SUBTYPE_HELPER_CATU,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment