Commit 9ca2c16f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf updates from Thomas Gleixner:
 "Perf tool updates and kprobe fixes:

   - perf_mmap overwrite mode fixes/overhaul, prep work to get 'perf
     top' using it, making it bearable to use it in large core count
     systems such as Knights Landing/Mill Intel systems (Kan Liang)

   - s/390 now uses syscall.tbl, just like x86-64 to generate the
     syscall table id -> string tables used by 'perf trace' (Hendrik
     Brueckner)

   - Use strtoull() instead of home grown function (Andy Shevchenko)

   - Synchronize kernel ABI headers, v4.16-rc1 (Ingo Molnar)

   - Document missing 'perf data --force' option (Sangwon Hong)

   - Add perf vendor JSON metrics for ARM Cortex-A53 Processor (William
     Cohen)

   - Improve error handling and error propagation of ftrace based
     kprobes so failures when installing kprobes are not silently
     ignored and create disfunctional tracepoints"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits)
  kprobes: Propagate error from disarm_kprobe_ftrace()
  kprobes: Propagate error from arm_kprobe_ftrace()
  Revert "tools include s390: Grab a copy of arch/s390/include/uapi/asm/unistd.h"
  perf s390: Rework system call table creation by using syscall.tbl
  perf s390: Grab a copy of arch/s390/kernel/syscall/syscall.tbl
  tools/headers: Synchronize kernel ABI headers, v4.16-rc1
  perf test: Fix test trace+probe_libc_inet_pton.sh for s390x
  perf data: Document missing --force option
  perf tools: Substitute yet another strtoull()
  perf top: Check the latency of perf_top__mmap_read()
  perf top: Switch default mode to overwrite mode
  perf top: Remove lost events checking
  perf hists browser: Add parameter to disable lost event warning
  perf top: Add overwrite fall back
  perf evsel: Expose the perf_missing_features struct
  perf top: Check per-event overwrite term
  perf mmap: Discard legacy interface for mmap read
  perf test: Update mmap read functions for backward-ring-buffer test
  perf mmap: Introduce perf_mmap__read_event()
  perf mmap: Introduce perf_mmap__read_done()
  ...
parents 2d6c4e40 297f9233
...@@ -978,67 +978,90 @@ static int prepare_kprobe(struct kprobe *p) ...@@ -978,67 +978,90 @@ static int prepare_kprobe(struct kprobe *p)
} }
/* Caller must lock kprobe_mutex */ /* Caller must lock kprobe_mutex */
static void arm_kprobe_ftrace(struct kprobe *p) static int arm_kprobe_ftrace(struct kprobe *p)
{ {
int ret; int ret = 0;
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
(unsigned long)p->addr, 0, 0); (unsigned long)p->addr, 0, 0);
WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); if (ret) {
kprobe_ftrace_enabled++; pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
if (kprobe_ftrace_enabled == 1) { return ret;
}
if (kprobe_ftrace_enabled == 0) {
ret = register_ftrace_function(&kprobe_ftrace_ops); ret = register_ftrace_function(&kprobe_ftrace_ops);
WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); if (ret) {
pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
goto err_ftrace;
}
} }
kprobe_ftrace_enabled++;
return ret;
err_ftrace:
/*
* Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a
* non-empty filter_hash for IPMODIFY ops, we're safe from an accidental
* empty filter_hash which would undesirably trace all functions.
*/
ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
return ret;
} }
/* Caller must lock kprobe_mutex */ /* Caller must lock kprobe_mutex */
static void disarm_kprobe_ftrace(struct kprobe *p) static int disarm_kprobe_ftrace(struct kprobe *p)
{ {
int ret; int ret = 0;
kprobe_ftrace_enabled--; if (kprobe_ftrace_enabled == 1) {
if (kprobe_ftrace_enabled == 0) {
ret = unregister_ftrace_function(&kprobe_ftrace_ops); ret = unregister_ftrace_function(&kprobe_ftrace_ops);
WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
return ret;
} }
kprobe_ftrace_enabled--;
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
(unsigned long)p->addr, 1, 0); (unsigned long)p->addr, 1, 0);
WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret); WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
return ret;
} }
#else /* !CONFIG_KPROBES_ON_FTRACE */ #else /* !CONFIG_KPROBES_ON_FTRACE */
#define prepare_kprobe(p) arch_prepare_kprobe(p) #define prepare_kprobe(p) arch_prepare_kprobe(p)
#define arm_kprobe_ftrace(p) do {} while (0) #define arm_kprobe_ftrace(p) (-ENODEV)
#define disarm_kprobe_ftrace(p) do {} while (0) #define disarm_kprobe_ftrace(p) (-ENODEV)
#endif #endif
/* Arm a kprobe with text_mutex */ /* Arm a kprobe with text_mutex */
static void arm_kprobe(struct kprobe *kp) static int arm_kprobe(struct kprobe *kp)
{ {
if (unlikely(kprobe_ftrace(kp))) { if (unlikely(kprobe_ftrace(kp)))
arm_kprobe_ftrace(kp); return arm_kprobe_ftrace(kp);
return;
}
cpus_read_lock(); cpus_read_lock();
mutex_lock(&text_mutex); mutex_lock(&text_mutex);
__arm_kprobe(kp); __arm_kprobe(kp);
mutex_unlock(&text_mutex); mutex_unlock(&text_mutex);
cpus_read_unlock(); cpus_read_unlock();
return 0;
} }
/* Disarm a kprobe with text_mutex */ /* Disarm a kprobe with text_mutex */
static void disarm_kprobe(struct kprobe *kp, bool reopt) static int disarm_kprobe(struct kprobe *kp, bool reopt)
{ {
if (unlikely(kprobe_ftrace(kp))) { if (unlikely(kprobe_ftrace(kp)))
disarm_kprobe_ftrace(kp); return disarm_kprobe_ftrace(kp);
return;
}
cpus_read_lock(); cpus_read_lock();
mutex_lock(&text_mutex); mutex_lock(&text_mutex);
__disarm_kprobe(kp, reopt); __disarm_kprobe(kp, reopt);
mutex_unlock(&text_mutex); mutex_unlock(&text_mutex);
cpus_read_unlock(); cpus_read_unlock();
return 0;
} }
/* /*
...@@ -1362,9 +1385,15 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) ...@@ -1362,9 +1385,15 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
ap->flags &= ~KPROBE_FLAG_DISABLED; ap->flags &= ~KPROBE_FLAG_DISABLED;
if (!kprobes_all_disarmed) if (!kprobes_all_disarmed) {
/* Arm the breakpoint again. */ /* Arm the breakpoint again. */
arm_kprobe(ap); ret = arm_kprobe(ap);
if (ret) {
ap->flags |= KPROBE_FLAG_DISABLED;
list_del_rcu(&p->list);
synchronize_sched();
}
}
} }
return ret; return ret;
} }
...@@ -1573,8 +1602,14 @@ int register_kprobe(struct kprobe *p) ...@@ -1573,8 +1602,14 @@ int register_kprobe(struct kprobe *p)
hlist_add_head_rcu(&p->hlist, hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
if (!kprobes_all_disarmed && !kprobe_disabled(p)) if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
arm_kprobe(p); ret = arm_kprobe(p);
if (ret) {
hlist_del_rcu(&p->hlist);
synchronize_sched();
goto out;
}
}
/* Try to optimize kprobe */ /* Try to optimize kprobe */
try_to_optimize_kprobe(p); try_to_optimize_kprobe(p);
...@@ -1608,11 +1643,12 @@ static int aggr_kprobe_disabled(struct kprobe *ap) ...@@ -1608,11 +1643,12 @@ static int aggr_kprobe_disabled(struct kprobe *ap)
static struct kprobe *__disable_kprobe(struct kprobe *p) static struct kprobe *__disable_kprobe(struct kprobe *p)
{ {
struct kprobe *orig_p; struct kprobe *orig_p;
int ret;
/* Get an original kprobe for return */ /* Get an original kprobe for return */
orig_p = __get_valid_kprobe(p); orig_p = __get_valid_kprobe(p);
if (unlikely(orig_p == NULL)) if (unlikely(orig_p == NULL))
return NULL; return ERR_PTR(-EINVAL);
if (!kprobe_disabled(p)) { if (!kprobe_disabled(p)) {
/* Disable probe if it is a child probe */ /* Disable probe if it is a child probe */
...@@ -1626,8 +1662,13 @@ static struct kprobe *__disable_kprobe(struct kprobe *p) ...@@ -1626,8 +1662,13 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
* should have already been disarmed, so * should have already been disarmed, so
* skip unneed disarming process. * skip unneed disarming process.
*/ */
if (!kprobes_all_disarmed) if (!kprobes_all_disarmed) {
disarm_kprobe(orig_p, true); ret = disarm_kprobe(orig_p, true);
if (ret) {
p->flags &= ~KPROBE_FLAG_DISABLED;
return ERR_PTR(ret);
}
}
orig_p->flags |= KPROBE_FLAG_DISABLED; orig_p->flags |= KPROBE_FLAG_DISABLED;
} }
} }
...@@ -1644,8 +1685,8 @@ static int __unregister_kprobe_top(struct kprobe *p) ...@@ -1644,8 +1685,8 @@ static int __unregister_kprobe_top(struct kprobe *p)
/* Disable kprobe. This will disarm it if needed. */ /* Disable kprobe. This will disarm it if needed. */
ap = __disable_kprobe(p); ap = __disable_kprobe(p);
if (ap == NULL) if (IS_ERR(ap))
return -EINVAL; return PTR_ERR(ap);
if (ap == p) if (ap == p)
/* /*
...@@ -2078,12 +2119,14 @@ static void kill_kprobe(struct kprobe *p) ...@@ -2078,12 +2119,14 @@ static void kill_kprobe(struct kprobe *p)
int disable_kprobe(struct kprobe *kp) int disable_kprobe(struct kprobe *kp)
{ {
int ret = 0; int ret = 0;
struct kprobe *p;
mutex_lock(&kprobe_mutex); mutex_lock(&kprobe_mutex);
/* Disable this kprobe */ /* Disable this kprobe */
if (__disable_kprobe(kp) == NULL) p = __disable_kprobe(kp);
ret = -EINVAL; if (IS_ERR(p))
ret = PTR_ERR(p);
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
return ret; return ret;
...@@ -2116,7 +2159,9 @@ int enable_kprobe(struct kprobe *kp) ...@@ -2116,7 +2159,9 @@ int enable_kprobe(struct kprobe *kp)
if (!kprobes_all_disarmed && kprobe_disabled(p)) { if (!kprobes_all_disarmed && kprobe_disabled(p)) {
p->flags &= ~KPROBE_FLAG_DISABLED; p->flags &= ~KPROBE_FLAG_DISABLED;
arm_kprobe(p); ret = arm_kprobe(p);
if (ret)
p->flags |= KPROBE_FLAG_DISABLED;
} }
out: out:
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
...@@ -2407,11 +2452,12 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = { ...@@ -2407,11 +2452,12 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
.release = seq_release, .release = seq_release,
}; };
static void arm_all_kprobes(void) static int arm_all_kprobes(void)
{ {
struct hlist_head *head; struct hlist_head *head;
struct kprobe *p; struct kprobe *p;
unsigned int i; unsigned int i, total = 0, errors = 0;
int err, ret = 0;
mutex_lock(&kprobe_mutex); mutex_lock(&kprobe_mutex);
...@@ -2428,46 +2474,74 @@ static void arm_all_kprobes(void) ...@@ -2428,46 +2474,74 @@ static void arm_all_kprobes(void)
/* Arming kprobes doesn't optimize kprobe itself */ /* Arming kprobes doesn't optimize kprobe itself */
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, head, hlist) /* Arm all kprobes on a best-effort basis */
if (!kprobe_disabled(p)) hlist_for_each_entry_rcu(p, head, hlist) {
arm_kprobe(p); if (!kprobe_disabled(p)) {
err = arm_kprobe(p);
if (err) {
errors++;
ret = err;
}
total++;
}
}
} }
printk(KERN_INFO "Kprobes globally enabled\n"); if (errors)
pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
errors, total);
else
pr_info("Kprobes globally enabled\n");
already_enabled: already_enabled:
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
return; return ret;
} }
static void disarm_all_kprobes(void) static int disarm_all_kprobes(void)
{ {
struct hlist_head *head; struct hlist_head *head;
struct kprobe *p; struct kprobe *p;
unsigned int i; unsigned int i, total = 0, errors = 0;
int err, ret = 0;
mutex_lock(&kprobe_mutex); mutex_lock(&kprobe_mutex);
/* If kprobes are already disarmed, just return */ /* If kprobes are already disarmed, just return */
if (kprobes_all_disarmed) { if (kprobes_all_disarmed) {
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
return; return 0;
} }
kprobes_all_disarmed = true; kprobes_all_disarmed = true;
printk(KERN_INFO "Kprobes globally disabled\n");
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
/* Disarm all kprobes on a best-effort basis */
hlist_for_each_entry_rcu(p, head, hlist) { hlist_for_each_entry_rcu(p, head, hlist) {
if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
disarm_kprobe(p, false); err = disarm_kprobe(p, false);
if (err) {
errors++;
ret = err;
}
total++;
} }
} }
}
if (errors)
pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
errors, total);
else
pr_info("Kprobes globally disabled\n");
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
/* Wait for disarming all kprobes by optimizer */ /* Wait for disarming all kprobes by optimizer */
wait_for_kprobe_optimizer(); wait_for_kprobe_optimizer();
return ret;
} }
/* /*
...@@ -2494,6 +2568,7 @@ static ssize_t write_enabled_file_bool(struct file *file, ...@@ -2494,6 +2568,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
{ {
char buf[32]; char buf[32];
size_t buf_size; size_t buf_size;
int ret = 0;
buf_size = min(count, (sizeof(buf)-1)); buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size)) if (copy_from_user(buf, user_buf, buf_size))
...@@ -2504,17 +2579,20 @@ static ssize_t write_enabled_file_bool(struct file *file, ...@@ -2504,17 +2579,20 @@ static ssize_t write_enabled_file_bool(struct file *file,
case 'y': case 'y':
case 'Y': case 'Y':
case '1': case '1':
arm_all_kprobes(); ret = arm_all_kprobes();
break; break;
case 'n': case 'n':
case 'N': case 'N':
case '0': case '0':
disarm_all_kprobes(); ret = disarm_all_kprobes();
break; break;
default: default:
return -EINVAL; return -EINVAL;
} }
if (ret)
return ret;
return count; return count;
} }
......
...@@ -632,6 +632,8 @@ struct kvm_ppc_cpu_char { ...@@ -632,6 +632,8 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc) #define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
#define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd) #define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
/* Transactional Memory checkpointed state: /* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs * This is all GPRs, all VSX regs and a subset of SPRs
*/ */
......
This diff is collapsed.
...@@ -210,6 +210,7 @@ ...@@ -210,6 +210,7 @@
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
......
...@@ -86,6 +86,62 @@ enum i915_mocs_table_index { ...@@ -86,6 +86,62 @@ enum i915_mocs_table_index {
I915_MOCS_CACHED, I915_MOCS_CACHED,
}; };
/*
* Different engines serve different roles, and there may be more than one
* engine serving each role. enum drm_i915_gem_engine_class provides a
* classification of the role of the engine, which may be used when requesting
* operations to be performed on a certain subset of engines, or for providing
* information about that group.
*/
enum drm_i915_gem_engine_class {
I915_ENGINE_CLASS_RENDER = 0,
I915_ENGINE_CLASS_COPY = 1,
I915_ENGINE_CLASS_VIDEO = 2,
I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
I915_ENGINE_CLASS_INVALID = -1
};
/**
* DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
*
*/
enum drm_i915_pmu_engine_sample {
I915_SAMPLE_BUSY = 0,
I915_SAMPLE_WAIT = 1,
I915_SAMPLE_SEMA = 2
};
#define I915_PMU_SAMPLE_BITS (4)
#define I915_PMU_SAMPLE_MASK (0xf)
#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
#define I915_PMU_CLASS_SHIFT \
(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
#define __I915_PMU_ENGINE(class, instance, sample) \
((class) << I915_PMU_CLASS_SHIFT | \
(instance) << I915_PMU_SAMPLE_BITS | \
(sample))
#define I915_PMU_ENGINE_BUSY(class, instance) \
__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
#define I915_PMU_ENGINE_WAIT(class, instance) \
__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
#define I915_PMU_ENGINE_SEMA(class, instance) \
__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
/* Each region is a minimum of 16k, and there are at most 255 of them. /* Each region is a minimum of 16k, and there are at most 255 of them.
*/ */
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
...@@ -450,6 +506,27 @@ typedef struct drm_i915_irq_wait { ...@@ -450,6 +506,27 @@ typedef struct drm_i915_irq_wait {
*/ */
#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
/*
* Query whether every context (both per-file default and user created) is
* isolated (insofar as HW supports). If this parameter is not true, then
* freshly created contexts may inherit values from an existing context,
* rather than default HW values. If true, it also ensures (insofar as HW
* supports) that all state set by this context will not leak to any other
* context.
*
* As not every engine across every gen support contexts, the returned
* value reports the support of context isolation for individual engines by
* returning a bitmask of each engine class set to true if that class supports
* isolation.
*/
#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
* registers. This used to be fixed per platform but from CNL onwards, this
* might vary depending on the parts.
*/
#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
typedef struct drm_i915_getparam { typedef struct drm_i915_getparam {
__s32 param; __s32 param;
/* /*
......
...@@ -163,6 +163,7 @@ enum { ...@@ -163,6 +163,7 @@ enum {
IFLA_IF_NETNSID, IFLA_IF_NETNSID,
IFLA_CARRIER_UP_COUNT, IFLA_CARRIER_UP_COUNT,
IFLA_CARRIER_DOWN_COUNT, IFLA_CARRIER_DOWN_COUNT,
IFLA_NEW_IFINDEX,
__IFLA_MAX __IFLA_MAX
}; };
......
...@@ -1362,6 +1362,96 @@ struct kvm_s390_ucas_mapping { ...@@ -1362,6 +1362,96 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_S390_CMMA_MIGRATION */ /* Available with KVM_CAP_S390_CMMA_MIGRATION */
#define KVM_S390_GET_CMMA_BITS _IOWR(KVMIO, 0xb8, struct kvm_s390_cmma_log) #define KVM_S390_GET_CMMA_BITS _IOWR(KVMIO, 0xb8, struct kvm_s390_cmma_log)
#define KVM_S390_SET_CMMA_BITS _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log) #define KVM_S390_SET_CMMA_BITS _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log)
/* Memory Encryption Commands */
#define KVM_MEMORY_ENCRYPT_OP _IOWR(KVMIO, 0xba, unsigned long)
struct kvm_enc_region {
__u64 addr;
__u64 size;
};
#define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region)
#define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region)
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
KVM_SEV_INIT = 0,
KVM_SEV_ES_INIT,
/* Guest launch commands */
KVM_SEV_LAUNCH_START,
KVM_SEV_LAUNCH_UPDATE_DATA,
KVM_SEV_LAUNCH_UPDATE_VMSA,
KVM_SEV_LAUNCH_SECRET,
KVM_SEV_LAUNCH_MEASURE,
KVM_SEV_LAUNCH_FINISH,
/* Guest migration commands (outgoing) */
KVM_SEV_SEND_START,
KVM_SEV_SEND_UPDATE_DATA,
KVM_SEV_SEND_UPDATE_VMSA,
KVM_SEV_SEND_FINISH,
/* Guest migration commands (incoming) */
KVM_SEV_RECEIVE_START,
KVM_SEV_RECEIVE_UPDATE_DATA,
KVM_SEV_RECEIVE_UPDATE_VMSA,
KVM_SEV_RECEIVE_FINISH,
/* Guest status and debug commands */
KVM_SEV_GUEST_STATUS,
KVM_SEV_DBG_DECRYPT,
KVM_SEV_DBG_ENCRYPT,
/* Guest certificates commands */
KVM_SEV_CERT_EXPORT,
KVM_SEV_NR_MAX,
};
struct kvm_sev_cmd {
__u32 id;
__u64 data;
__u32 error;
__u32 sev_fd;
};
struct kvm_sev_launch_start {
__u32 handle;
__u32 policy;
__u64 dh_uaddr;
__u32 dh_len;
__u64 session_uaddr;
__u32 session_len;
};
struct kvm_sev_launch_update_data {
__u64 uaddr;
__u32 len;
};
struct kvm_sev_launch_secret {
__u64 hdr_uaddr;
__u32 hdr_len;
__u64 guest_uaddr;
__u32 guest_len;
__u64 trans_uaddr;
__u32 trans_len;
};
struct kvm_sev_launch_measure {
__u64 uaddr;
__u32 len;
};
struct kvm_sev_guest_status {
__u32 handle;
__u32 policy;
__u32 state;
};
struct kvm_sev_dbg {
__u64 src_uaddr;
__u64 dst_uaddr;
__u32 len;
};
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
......
...@@ -30,6 +30,10 @@ OPTIONS for 'convert' ...@@ -30,6 +30,10 @@ OPTIONS for 'convert'
-i:: -i::
Specify input perf data file path. Specify input perf data file path.
-f::
--force::
Don't complain, do it.
-v:: -v::
--verbose:: --verbose::
Be more verbose (show counter open errors, etc). Be more verbose (show counter open errors, etc).
......
...@@ -10,15 +10,19 @@ PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1 ...@@ -10,15 +10,19 @@ PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
out := $(OUTPUT)arch/s390/include/generated/asm out := $(OUTPUT)arch/s390/include/generated/asm
header := $(out)/syscalls_64.c header := $(out)/syscalls_64.c
sysdef := $(srctree)/tools/arch/s390/include/uapi/asm/unistd.h syskrn := $(srctree)/arch/s390/kernel/syscalls/syscall.tbl
sysprf := $(srctree)/tools/perf/arch/s390/entry/syscalls/ sysprf := $(srctree)/tools/perf/arch/s390/entry/syscalls
sysdef := $(sysprf)/syscall.tbl
systbl := $(sysprf)/mksyscalltbl systbl := $(sysprf)/mksyscalltbl
# Create output directory if not already present # Create output directory if not already present
_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
$(header): $(sysdef) $(systbl) $(header): $(sysdef) $(systbl)
$(Q)$(SHELL) '$(systbl)' '$(CC)' $(sysdef) > $@ @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
(diff -B $(sysdef) $(syskrn) >/dev/null) \
|| echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
$(Q)$(SHELL) '$(systbl)' $(sysdef) > $@
clean:: clean::
$(call QUIET_CLEAN, s390) $(RM) $(header) $(call QUIET_CLEAN, s390) $(RM) $(header)
......
...@@ -3,25 +3,23 @@ ...@@ -3,25 +3,23 @@
# #
# Generate system call table for perf # Generate system call table for perf
# #
# # Copyright IBM Corp. 2017, 2018
# Copyright IBM Corp. 2017
# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> # Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
# #
gcc=$1 SYSCALL_TBL=$1
input=$2
if ! test -r $input; then if ! test -r $SYSCALL_TBL; then
echo "Could not read input file" >&2 echo "Could not read input file" >&2
exit 1 exit 1
fi fi
create_table() create_table()
{ {
local max_nr local max_nr nr abi sc discard
echo 'static const char *syscalltbl_s390_64[] = {' echo 'static const char *syscalltbl_s390_64[] = {'
while read sc nr; do while read nr abi sc discard; do
printf '\t[%d] = "%s",\n' $nr $sc printf '\t[%d] = "%s",\n' $nr $sc
max_nr=$nr max_nr=$nr
done done
...@@ -29,8 +27,6 @@ create_table() ...@@ -29,8 +27,6 @@ create_table()
echo "#define SYSCALLTBL_S390_64_MAX_ID $max_nr" echo "#define SYSCALLTBL_S390_64_MAX_ID $max_nr"
} }
grep -E "^[[:digit:]]+[[:space:]]+(common|64)" $SYSCALL_TBL \
$gcc -m64 -E -dM -x c $input \ |sort -k1 -n \
|sed -ne 's/^#define __NR_//p' \
|sort -t' ' -k2 -nu \
|create_table |create_table
This diff is collapsed.
...@@ -2245,7 +2245,7 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he) ...@@ -2245,7 +2245,7 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
c2c_browser__update_nr_entries(browser); c2c_browser__update_nr_entries(browser);
while (1) { while (1) {
key = hist_browser__run(browser, "? - help"); key = hist_browser__run(browser, "? - help", true);
switch (key) { switch (key) {
case 's': case 's':
...@@ -2314,7 +2314,7 @@ static int perf_c2c__hists_browse(struct hists *hists) ...@@ -2314,7 +2314,7 @@ static int perf_c2c__hists_browse(struct hists *hists)
c2c_browser__update_nr_entries(browser); c2c_browser__update_nr_entries(browser);
while (1) { while (1) {
key = hist_browser__run(browser, "? - help"); key = hist_browser__run(browser, "? - help", true);
switch (key) { switch (key) {
case 'q': case 'q':
......
...@@ -530,7 +530,8 @@ static int report__browse_hists(struct report *rep) ...@@ -530,7 +530,8 @@ static int report__browse_hists(struct report *rep)
case 1: case 1:
ret = perf_evlist__tui_browse_hists(evlist, help, NULL, ret = perf_evlist__tui_browse_hists(evlist, help, NULL,
rep->min_percent, rep->min_percent,
&session->header.env); &session->header.env,
true);
/* /*
* Usually "ret" is the last pressed key, and we only * Usually "ret" is the last pressed key, and we only
* care if the key notifies us to switch data file. * care if the key notifies us to switch data file.
......
...@@ -283,8 +283,9 @@ static void perf_top__print_sym_table(struct perf_top *top) ...@@ -283,8 +283,9 @@ static void perf_top__print_sym_table(struct perf_top *top)
printf("%-*.*s\n", win_width, win_width, graph_dotted_line); printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
if (hists->stats.nr_lost_warned != if (!top->record_opts.overwrite &&
hists->stats.nr_events[PERF_RECORD_LOST]) { (hists->stats.nr_lost_warned !=
hists->stats.nr_events[PERF_RECORD_LOST])) {
hists->stats.nr_lost_warned = hists->stats.nr_lost_warned =
hists->stats.nr_events[PERF_RECORD_LOST]; hists->stats.nr_events[PERF_RECORD_LOST];
color_fprintf(stdout, PERF_COLOR_RED, color_fprintf(stdout, PERF_COLOR_RED,
...@@ -611,7 +612,8 @@ static void *display_thread_tui(void *arg) ...@@ -611,7 +612,8 @@ static void *display_thread_tui(void *arg)
perf_evlist__tui_browse_hists(top->evlist, help, &hbt, perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
top->min_percent, top->min_percent,
&top->session->header.env); &top->session->header.env,
!top->record_opts.overwrite);
done = 1; done = 1;
return NULL; return NULL;
...@@ -807,15 +809,23 @@ static void perf_event__process_sample(struct perf_tool *tool, ...@@ -807,15 +809,23 @@ static void perf_event__process_sample(struct perf_tool *tool,
static void perf_top__mmap_read_idx(struct perf_top *top, int idx) static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
{ {
struct record_opts *opts = &top->record_opts;
struct perf_evlist *evlist = top->evlist;
struct perf_sample sample; struct perf_sample sample;
struct perf_evsel *evsel; struct perf_evsel *evsel;
struct perf_mmap *md;
struct perf_session *session = top->session; struct perf_session *session = top->session;
union perf_event *event; union perf_event *event;
struct machine *machine; struct machine *machine;
u64 end, start;
int ret; int ret;
while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) { md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
ret = perf_evlist__parse_sample(top->evlist, event, &sample); if (perf_mmap__read_init(md, opts->overwrite, &start, &end) < 0)
return;
while ((event = perf_mmap__read_event(md, opts->overwrite, &start, end)) != NULL) {
ret = perf_evlist__parse_sample(evlist, event, &sample);
if (ret) { if (ret) {
pr_err("Can't parse sample, err = %d\n", ret); pr_err("Can't parse sample, err = %d\n", ret);
goto next_event; goto next_event;
...@@ -869,16 +879,120 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) ...@@ -869,16 +879,120 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
} else } else
++session->evlist->stats.nr_unknown_events; ++session->evlist->stats.nr_unknown_events;
next_event: next_event:
perf_evlist__mmap_consume(top->evlist, idx); perf_mmap__consume(md, opts->overwrite);
} }
perf_mmap__read_done(md);
} }
static void perf_top__mmap_read(struct perf_top *top) static void perf_top__mmap_read(struct perf_top *top)
{ {
bool overwrite = top->record_opts.overwrite;
struct perf_evlist *evlist = top->evlist;
unsigned long long start, end;
int i; int i;
start = rdclock();
if (overwrite)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
for (i = 0; i < top->evlist->nr_mmaps; i++) for (i = 0; i < top->evlist->nr_mmaps; i++)
perf_top__mmap_read_idx(top, i); perf_top__mmap_read_idx(top, i);
if (overwrite) {
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
}
end = rdclock();
if ((end - start) > (unsigned long long)top->delay_secs * NSEC_PER_SEC)
ui__warning("Too slow to read ring buffer.\n"
"Please try increasing the period (-c) or\n"
"decreasing the freq (-F) or\n"
"limiting the number of CPUs (-C)\n");
}
/*
* Check per-event overwrite term.
* perf top should support consistent term for all events.
* - All events don't have per-event term
* E.g. "cpu/cpu-cycles/,cpu/instructions/"
* Nothing change, return 0.
* - All events have same per-event term
* E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
* Using the per-event setting to replace the opts->overwrite if
* they are different, then return 0.
* - Events have different per-event term
* E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
* Return -1
* - Some of the event set per-event term, but some not.
* E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
* Return -1
*/
static int perf_top__overwrite_check(struct perf_top *top)
{
struct record_opts *opts = &top->record_opts;
struct perf_evlist *evlist = top->evlist;
struct perf_evsel_config_term *term;
struct list_head *config_terms;
struct perf_evsel *evsel;
int set, overwrite = -1;
evlist__for_each_entry(evlist, evsel) {
set = -1;
config_terms = &evsel->config_terms;
list_for_each_entry(term, config_terms, list) {
if (term->type == PERF_EVSEL__CONFIG_TERM_OVERWRITE)
set = term->val.overwrite ? 1 : 0;
}
/* no term for current and previous event (likely) */
if ((overwrite < 0) && (set < 0))
continue;
/* has term for both current and previous event, compare */
if ((overwrite >= 0) && (set >= 0) && (overwrite != set))
return -1;
/* no term for current event but has term for previous one */
if ((overwrite >= 0) && (set < 0))
return -1;
/* has term for current event */
if ((overwrite < 0) && (set >= 0)) {
/* if it's first event, set overwrite */
if (evsel == perf_evlist__first(evlist))
overwrite = set;
else
return -1;
}
}
if ((overwrite >= 0) && (opts->overwrite != overwrite))
opts->overwrite = overwrite;
return 0;
}
static int perf_top_overwrite_fallback(struct perf_top *top,
struct perf_evsel *evsel)
{
struct record_opts *opts = &top->record_opts;
struct perf_evlist *evlist = top->evlist;
struct perf_evsel *counter;
if (!opts->overwrite)
return 0;
/* only fall back when first event fails */
if (evsel != perf_evlist__first(evlist))
return 0;
evlist__for_each_entry(evlist, counter)
counter->attr.write_backward = false;
opts->overwrite = false;
ui__warning("fall back to non-overwrite mode\n");
return 1;
} }
static int perf_top__start_counters(struct perf_top *top) static int perf_top__start_counters(struct perf_top *top)
...@@ -888,12 +1002,33 @@ static int perf_top__start_counters(struct perf_top *top) ...@@ -888,12 +1002,33 @@ static int perf_top__start_counters(struct perf_top *top)
struct perf_evlist *evlist = top->evlist; struct perf_evlist *evlist = top->evlist;
struct record_opts *opts = &top->record_opts; struct record_opts *opts = &top->record_opts;
if (perf_top__overwrite_check(top)) {
ui__error("perf top only support consistent per-event "
"overwrite setting for all events\n");
goto out_err;
}
perf_evlist__config(evlist, opts, &callchain_param); perf_evlist__config(evlist, opts, &callchain_param);
evlist__for_each_entry(evlist, counter) { evlist__for_each_entry(evlist, counter) {
try_again: try_again:
if (perf_evsel__open(counter, top->evlist->cpus, if (perf_evsel__open(counter, top->evlist->cpus,
top->evlist->threads) < 0) { top->evlist->threads) < 0) {
/*
* Specially handle overwrite fall back.
* Because perf top is the only tool which has
* overwrite mode by default, support
* both overwrite and non-overwrite mode, and
* require consistent mode for all events.
*
* May move it to generic code with more tools
* have similar attribute.
*/
if (perf_missing_features.write_backward &&
perf_top_overwrite_fallback(top, counter))
goto try_again;
if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
if (verbose > 0) if (verbose > 0)
ui__warning("%s\n", msg); ui__warning("%s\n", msg);
...@@ -1033,7 +1168,7 @@ static int __cmd_top(struct perf_top *top) ...@@ -1033,7 +1168,7 @@ static int __cmd_top(struct perf_top *top)
perf_top__mmap_read(top); perf_top__mmap_read(top);
if (hits == top->samples) if (opts->overwrite || (hits == top->samples))
ret = perf_evlist__poll(top->evlist, 100); ret = perf_evlist__poll(top->evlist, 100);
if (resize) { if (resize) {
...@@ -1127,6 +1262,7 @@ int cmd_top(int argc, const char **argv) ...@@ -1127,6 +1262,7 @@ int cmd_top(int argc, const char **argv)
.uses_mmap = true, .uses_mmap = true,
}, },
.proc_map_timeout = 500, .proc_map_timeout = 500,
.overwrite = 1,
}, },
.max_stack = sysctl_perf_event_max_stack, .max_stack = sysctl_perf_event_max_stack,
.sym_pcnt_filter = 5, .sym_pcnt_filter = 5,
......
...@@ -33,7 +33,6 @@ arch/s390/include/uapi/asm/kvm.h ...@@ -33,7 +33,6 @@ arch/s390/include/uapi/asm/kvm.h
arch/s390/include/uapi/asm/kvm_perf.h arch/s390/include/uapi/asm/kvm_perf.h
arch/s390/include/uapi/asm/ptrace.h arch/s390/include/uapi/asm/ptrace.h
arch/s390/include/uapi/asm/sie.h arch/s390/include/uapi/asm/sie.h
arch/s390/include/uapi/asm/unistd.h
arch/arm/include/uapi/asm/kvm.h arch/arm/include/uapi/asm/kvm.h
arch/arm64/include/uapi/asm/kvm.h arch/arm64/include/uapi/asm/kvm.h
arch/alpha/include/uapi/asm/errno.h arch/alpha/include/uapi/asm/errno.h
......
[
{,
"EventCode": "0x7A",
"EventName": "BR_INDIRECT_SPEC",
"BriefDescription": "Branch speculatively executed - Indirect branch"
},
{,
"EventCode": "0xC9",
"EventName": "BR_COND",
"BriefDescription": "Conditional branch executed"
},
{,
"EventCode": "0xCA",
"EventName": "BR_INDIRECT_MISPRED",
"BriefDescription": "Indirect branch mispredicted"
},
{,
"EventCode": "0xCB",
"EventName": "BR_INDIRECT_MISPRED_ADDR",
"BriefDescription": "Indirect branch mispredicted because of address miscompare"
},
{,
"EventCode": "0xCC",
"EventName": "BR_COND_MISPRED",
"BriefDescription": "Conditional branch mispredicted"
}
]
[
{,
"EventCode": "0x60",
"EventName": "BUS_ACCESS_LD",
"BriefDescription": "Bus access - Read"
},
{,
"EventCode": "0x61",
"EventName": "BUS_ACCESS_ST",
"BriefDescription": "Bus access - Write"
},
{,
"EventCode": "0xC0",
"EventName": "EXT_MEM_REQ",
"BriefDescription": "External memory request"
},
{,
"EventCode": "0xC1",
"EventName": "EXT_MEM_REQ_NC",
"BriefDescription": "Non-cacheable external memory request"
}
]
[
{,
"EventCode": "0xC2",
"EventName": "PREFETCH_LINEFILL",
"BriefDescription": "Linefill because of prefetch"
},
{,
"EventCode": "0xC3",
"EventName": "PREFETCH_LINEFILL_DROP",
"BriefDescription": "Instruction Cache Throttle occurred"
},
{,
"EventCode": "0xC4",
"EventName": "READ_ALLOC_ENTER",
"BriefDescription": "Entering read allocate mode"
},
{,
"EventCode": "0xC5",
"EventName": "READ_ALLOC",
"BriefDescription": "Read allocate mode"
},
{,
"EventCode": "0xC8",
"EventName": "EXT_SNOOP",
"BriefDescription": "SCU Snooped data from another CPU for this CPU"
}
]
[
{,
"EventCode": "0x60",
"EventName": "BUS_ACCESS_LD",
"BriefDescription": "Bus access - Read"
},
{,
"EventCode": "0x61",
"EventName": "BUS_ACCESS_ST",
"BriefDescription": "Bus access - Write"
},
{,
"EventCode": "0xC0",
"EventName": "EXT_MEM_REQ",
"BriefDescription": "External memory request"
},
{,
"EventCode": "0xC1",
"EventName": "EXT_MEM_REQ_NC",
"BriefDescription": "Non-cacheable external memory request"
}
]
[
{,
"EventCode": "0x86",
"EventName": "EXC_IRQ",
"BriefDescription": "Exception taken, IRQ"
},
{,
"EventCode": "0x87",
"EventName": "EXC_FIQ",
"BriefDescription": "Exception taken, FIQ"
},
{,
"EventCode": "0xC6",
"EventName": "PRE_DECODE_ERR",
"BriefDescription": "Pre-decode error"
},
{,
"EventCode": "0xD0",
"EventName": "L1I_CACHE_ERR",
"BriefDescription": "L1 Instruction Cache (data or tag) memory error"
},
{,
"EventCode": "0xD1",
"EventName": "L1D_CACHE_ERR",
"BriefDescription": "L1 Data Cache (data, tag or dirty) memory error, correctable or non-correctable"
},
{,
"EventCode": "0xD2",
"EventName": "TLB_ERR",
"BriefDescription": "TLB memory error"
}
]
[
{,
"EventCode": "0xC7",
"EventName": "STALL_SB_FULL",
"BriefDescription": "Data Write operation that stalls the pipeline because the store buffer is full"
},
{,
"EventCode": "0xE0",
"EventName": "OTHER_IQ_DEP_STALL",
"BriefDescription": "Cycles that the DPU IQ is empty and that is not because of a recent micro-TLB miss, instruction cache miss or pre-decode error"
},
{,
"EventCode": "0xE1",
"EventName": "IC_DEP_STALL",
"BriefDescription": "Cycles the DPU IQ is empty and there is an instruction cache miss being processed"
},
{,
"EventCode": "0xE2",
"EventName": "IUTLB_DEP_STALL",
"BriefDescription": "Cycles the DPU IQ is empty and there is an instruction micro-TLB miss being processed"
},
{,
"EventCode": "0xE3",
"EventName": "DECODE_DEP_STALL",
"BriefDescription": "Cycles the DPU IQ is empty and there is a pre-decode error being processed"
},
{,
"EventCode": "0xE4",
"EventName": "OTHER_INTERLOCK_STALL",
"BriefDescription": "Cycles there is an interlock other than Advanced SIMD/Floating-point instructions or load/store instruction"
},
{,
"EventCode": "0xE5",
"EventName": "AGU_DEP_STALL",
"BriefDescription": "Cycles there is an interlock for a load/store instruction waiting for data to calculate the address in the AGU"
},
{,
"EventCode": "0xE6",
"EventName": "SIMD_DEP_STALL",
"BriefDescription": "Cycles there is an interlock for an Advanced SIMD/Floating-point operation."
},
{,
"EventCode": "0xE7",
"EventName": "LD_DEP_STALL",
"BriefDescription": "Cycles there is a stall in the Wr stage because of a load miss"
},
{,
"EventCode": "0xE8",
"EventName": "ST_DEP_STALL",
"BriefDescription": "Cycles there is a stall in the Wr stage because of a store"
}
]
...@@ -13,3 +13,4 @@ ...@@ -13,3 +13,4 @@
# #
#Family-model,Version,Filename,EventType #Family-model,Version,Filename,EventType
0x00000000420f5160,v1,cavium,core 0x00000000420f5160,v1,cavium,core
0x00000000410fd03[[:xdigit:]],v1,cortex-a53,core
...@@ -31,10 +31,12 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count, ...@@ -31,10 +31,12 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
int i; int i;
for (i = 0; i < evlist->nr_mmaps; i++) { for (i = 0; i < evlist->nr_mmaps; i++) {
struct perf_mmap *map = &evlist->overwrite_mmap[i];
union perf_event *event; union perf_event *event;
u64 start, end;
perf_mmap__read_catchup(&evlist->overwrite_mmap[i]); perf_mmap__read_init(map, true, &start, &end);
while ((event = perf_mmap__read_backward(&evlist->overwrite_mmap[i])) != NULL) { while ((event = perf_mmap__read_event(map, true, &start, end)) != NULL) {
const u32 type = event->header.type; const u32 type = event->header.type;
switch (type) { switch (type) {
...@@ -49,6 +51,7 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count, ...@@ -49,6 +51,7 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
return TEST_FAIL; return TEST_FAIL;
} }
} }
perf_mmap__read_done(map);
} }
return TEST_OK; return TEST_OK;
} }
......
...@@ -22,10 +22,23 @@ trace_libc_inet_pton_backtrace() { ...@@ -22,10 +22,23 @@ trace_libc_inet_pton_backtrace() {
expected[4]="rtt min.*" expected[4]="rtt min.*"
expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)" expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
expected[6]=".*inet_pton[[:space:]]\($libc\)$" expected[6]=".*inet_pton[[:space:]]\($libc\)$"
case "$(uname -m)" in
s390x)
eventattr='call-graph=dwarf'
expected[7]="gaih_inet[[:space:]]\(inlined\)$"
expected[8]="__GI_getaddrinfo[[:space:]]\(inlined\)$"
expected[9]="main[[:space:]]\(.*/bin/ping.*\)$"
expected[10]="__libc_start_main[[:space:]]\($libc\)$"
expected[11]="_start[[:space:]]\(.*/bin/ping.*\)$"
;;
*)
eventattr='max-stack=3'
expected[7]="getaddrinfo[[:space:]]\($libc\)$" expected[7]="getaddrinfo[[:space:]]\($libc\)$"
expected[8]=".*\(.*/bin/ping.*\)$" expected[8]=".*\(.*/bin/ping.*\)$"
;;
esac
perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do perf trace --no-syscalls -e probe_libc:inet_pton/$eventattr/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
echo $line echo $line
echo "$line" | egrep -q "${expected[$idx]}" echo "$line" | egrep -q "${expected[$idx]}"
if [ $? -ne 0 ] ; then if [ $? -ne 0 ] ; then
...@@ -33,7 +46,7 @@ trace_libc_inet_pton_backtrace() { ...@@ -33,7 +46,7 @@ trace_libc_inet_pton_backtrace() {
exit 1 exit 1
fi fi
let idx+=1 let idx+=1
[ $idx -eq 9 ] && break [ -z "${expected[$idx]}" ] && break
done done
} }
......
...@@ -608,7 +608,8 @@ static int hist_browser__title(struct hist_browser *browser, char *bf, size_t si ...@@ -608,7 +608,8 @@ static int hist_browser__title(struct hist_browser *browser, char *bf, size_t si
return browser->title ? browser->title(browser, bf, size) : 0; return browser->title ? browser->title(browser, bf, size) : 0;
} }
int hist_browser__run(struct hist_browser *browser, const char *help) int hist_browser__run(struct hist_browser *browser, const char *help,
bool warn_lost_event)
{ {
int key; int key;
char title[160]; char title[160];
...@@ -638,8 +639,9 @@ int hist_browser__run(struct hist_browser *browser, const char *help) ...@@ -638,8 +639,9 @@ int hist_browser__run(struct hist_browser *browser, const char *help)
nr_entries = hist_browser__nr_entries(browser); nr_entries = hist_browser__nr_entries(browser);
ui_browser__update_nr_entries(&browser->b, nr_entries); ui_browser__update_nr_entries(&browser->b, nr_entries);
if (browser->hists->stats.nr_lost_warned != if (warn_lost_event &&
browser->hists->stats.nr_events[PERF_RECORD_LOST]) { (browser->hists->stats.nr_lost_warned !=
browser->hists->stats.nr_events[PERF_RECORD_LOST])) {
browser->hists->stats.nr_lost_warned = browser->hists->stats.nr_lost_warned =
browser->hists->stats.nr_events[PERF_RECORD_LOST]; browser->hists->stats.nr_events[PERF_RECORD_LOST];
ui_browser__warn_lost_events(&browser->b); ui_browser__warn_lost_events(&browser->b);
...@@ -2763,7 +2765,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, ...@@ -2763,7 +2765,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
bool left_exits, bool left_exits,
struct hist_browser_timer *hbt, struct hist_browser_timer *hbt,
float min_pcnt, float min_pcnt,
struct perf_env *env) struct perf_env *env,
bool warn_lost_event)
{ {
struct hists *hists = evsel__hists(evsel); struct hists *hists = evsel__hists(evsel);
struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env); struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env);
...@@ -2844,7 +2847,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, ...@@ -2844,7 +2847,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
nr_options = 0; nr_options = 0;
key = hist_browser__run(browser, helpline); key = hist_browser__run(browser, helpline,
warn_lost_event);
if (browser->he_selection != NULL) { if (browser->he_selection != NULL) {
thread = hist_browser__selected_thread(browser); thread = hist_browser__selected_thread(browser);
...@@ -3184,7 +3188,8 @@ static void perf_evsel_menu__write(struct ui_browser *browser, ...@@ -3184,7 +3188,8 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
static int perf_evsel_menu__run(struct perf_evsel_menu *menu, static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
int nr_events, const char *help, int nr_events, const char *help,
struct hist_browser_timer *hbt) struct hist_browser_timer *hbt,
bool warn_lost_event)
{ {
struct perf_evlist *evlist = menu->b.priv; struct perf_evlist *evlist = menu->b.priv;
struct perf_evsel *pos; struct perf_evsel *pos;
...@@ -3203,7 +3208,9 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu, ...@@ -3203,7 +3208,9 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
case K_TIMER: case K_TIMER:
hbt->timer(hbt->arg); hbt->timer(hbt->arg);
if (!menu->lost_events_warned && menu->lost_events) { if (!menu->lost_events_warned &&
menu->lost_events &&
warn_lost_event) {
ui_browser__warn_lost_events(&menu->b); ui_browser__warn_lost_events(&menu->b);
menu->lost_events_warned = true; menu->lost_events_warned = true;
} }
...@@ -3224,7 +3231,8 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu, ...@@ -3224,7 +3231,8 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
key = perf_evsel__hists_browse(pos, nr_events, help, key = perf_evsel__hists_browse(pos, nr_events, help,
true, hbt, true, hbt,
menu->min_pcnt, menu->min_pcnt,
menu->env); menu->env,
warn_lost_event);
ui_browser__show_title(&menu->b, title); ui_browser__show_title(&menu->b, title);
switch (key) { switch (key) {
case K_TAB: case K_TAB:
...@@ -3282,7 +3290,8 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, ...@@ -3282,7 +3290,8 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
int nr_entries, const char *help, int nr_entries, const char *help,
struct hist_browser_timer *hbt, struct hist_browser_timer *hbt,
float min_pcnt, float min_pcnt,
struct perf_env *env) struct perf_env *env,
bool warn_lost_event)
{ {
struct perf_evsel *pos; struct perf_evsel *pos;
struct perf_evsel_menu menu = { struct perf_evsel_menu menu = {
...@@ -3309,13 +3318,15 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, ...@@ -3309,13 +3318,15 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
menu.b.width = line_len; menu.b.width = line_len;
} }
return perf_evsel_menu__run(&menu, nr_entries, help, hbt); return perf_evsel_menu__run(&menu, nr_entries, help,
hbt, warn_lost_event);
} }
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
struct hist_browser_timer *hbt, struct hist_browser_timer *hbt,
float min_pcnt, float min_pcnt,
struct perf_env *env) struct perf_env *env,
bool warn_lost_event)
{ {
int nr_entries = evlist->nr_entries; int nr_entries = evlist->nr_entries;
...@@ -3325,7 +3336,7 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, ...@@ -3325,7 +3336,7 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
return perf_evsel__hists_browse(first, nr_entries, help, return perf_evsel__hists_browse(first, nr_entries, help,
false, hbt, min_pcnt, false, hbt, min_pcnt,
env); env, warn_lost_event);
} }
if (symbol_conf.event_group) { if (symbol_conf.event_group) {
...@@ -3342,5 +3353,6 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, ...@@ -3342,5 +3353,6 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
} }
return __perf_evlist__tui_browse_hists(evlist, nr_entries, help, return __perf_evlist__tui_browse_hists(evlist, nr_entries, help,
hbt, min_pcnt, env); hbt, min_pcnt, env,
warn_lost_event);
} }
...@@ -28,7 +28,8 @@ struct hist_browser { ...@@ -28,7 +28,8 @@ struct hist_browser {
struct hist_browser *hist_browser__new(struct hists *hists); struct hist_browser *hist_browser__new(struct hists *hists);
void hist_browser__delete(struct hist_browser *browser); void hist_browser__delete(struct hist_browser *browser);
int hist_browser__run(struct hist_browser *browser, const char *help); int hist_browser__run(struct hist_browser *browser, const char *help,
bool warn_lost_event);
void hist_browser__init(struct hist_browser *browser, void hist_browser__init(struct hist_browser *browser,
struct hists *hists); struct hists *hists);
#endif /* _PERF_UI_BROWSER_HISTS_H_ */ #endif /* _PERF_UI_BROWSER_HISTS_H_ */
...@@ -715,28 +715,11 @@ union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int ...@@ -715,28 +715,11 @@ union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int
return perf_mmap__read_forward(md); return perf_mmap__read_forward(md);
} }
union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
{
struct perf_mmap *md = &evlist->mmap[idx];
/*
* No need to check messup for backward ring buffer:
* We can always read arbitrary long data from a backward
* ring buffer unless we forget to pause it before reading.
*/
return perf_mmap__read_backward(md);
}
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
{ {
return perf_evlist__mmap_read_forward(evlist, idx); return perf_evlist__mmap_read_forward(evlist, idx);
} }
void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
{
perf_mmap__read_catchup(&evlist->mmap[idx]);
}
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
{ {
perf_mmap__consume(&evlist->mmap[idx], false); perf_mmap__consume(&evlist->mmap[idx], false);
......
...@@ -133,10 +133,6 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx); ...@@ -133,10 +133,6 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
int idx); int idx);
union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist,
int idx);
void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx); void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
int perf_evlist__open(struct perf_evlist *evlist); int perf_evlist__open(struct perf_evlist *evlist);
......
...@@ -41,17 +41,7 @@ ...@@ -41,17 +41,7 @@
#include "sane_ctype.h" #include "sane_ctype.h"
static struct { struct perf_missing_features perf_missing_features;
bool sample_id_all;
bool exclude_guest;
bool mmap2;
bool cloexec;
bool clockid;
bool clockid_wrong;
bool lbr_flags;
bool write_backward;
bool group_read;
} perf_missing_features;
static clockid_t clockid; static clockid_t clockid;
......
...@@ -149,6 +149,20 @@ union u64_swap { ...@@ -149,6 +149,20 @@ union u64_swap {
u32 val32[2]; u32 val32[2];
}; };
struct perf_missing_features {
bool sample_id_all;
bool exclude_guest;
bool mmap2;
bool cloexec;
bool clockid;
bool clockid_wrong;
bool lbr_flags;
bool write_backward;
bool group_read;
};
extern struct perf_missing_features perf_missing_features;
struct cpu_map; struct cpu_map;
struct target; struct target;
struct thread_map; struct thread_map;
......
...@@ -430,7 +430,8 @@ int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel, ...@@ -430,7 +430,8 @@ int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
struct hist_browser_timer *hbt, struct hist_browser_timer *hbt,
float min_pcnt, float min_pcnt,
struct perf_env *env); struct perf_env *env,
bool warn_lost_event);
int script_browse(const char *script_opt); int script_browse(const char *script_opt);
#else #else
static inline static inline
...@@ -438,7 +439,8 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused, ...@@ -438,7 +439,8 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
const char *help __maybe_unused, const char *help __maybe_unused,
struct hist_browser_timer *hbt __maybe_unused, struct hist_browser_timer *hbt __maybe_unused,
float min_pcnt __maybe_unused, float min_pcnt __maybe_unused,
struct perf_env *env __maybe_unused) struct perf_env *env __maybe_unused,
bool warn_lost_event __maybe_unused)
{ {
return 0; return 0;
} }
......
...@@ -22,29 +22,27 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map) ...@@ -22,29 +22,27 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map)
/* When check_messup is true, 'end' must points to a good entry */ /* When check_messup is true, 'end' must points to a good entry */
static union perf_event *perf_mmap__read(struct perf_mmap *map, static union perf_event *perf_mmap__read(struct perf_mmap *map,
u64 start, u64 end, u64 *prev) u64 *startp, u64 end)
{ {
unsigned char *data = map->base + page_size; unsigned char *data = map->base + page_size;
union perf_event *event = NULL; union perf_event *event = NULL;
int diff = end - start; int diff = end - *startp;
if (diff >= (int)sizeof(event->header)) { if (diff >= (int)sizeof(event->header)) {
size_t size; size_t size;
event = (union perf_event *)&data[start & map->mask]; event = (union perf_event *)&data[*startp & map->mask];
size = event->header.size; size = event->header.size;
if (size < sizeof(event->header) || diff < (int)size) { if (size < sizeof(event->header) || diff < (int)size)
event = NULL; return NULL;
goto broken_event;
}
/* /*
* Event straddles the mmap boundary -- header should always * Event straddles the mmap boundary -- header should always
* be inside due to u64 alignment of output. * be inside due to u64 alignment of output.
*/ */
if ((start & map->mask) + size != ((start + size) & map->mask)) { if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
unsigned int offset = start; unsigned int offset = *startp;
unsigned int len = min(sizeof(*event), size), cpy; unsigned int len = min(sizeof(*event), size), cpy;
void *dst = map->event_copy; void *dst = map->event_copy;
...@@ -59,20 +57,19 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map, ...@@ -59,20 +57,19 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
event = (union perf_event *)map->event_copy; event = (union perf_event *)map->event_copy;
} }
start += size; *startp += size;
} }
broken_event:
if (prev)
*prev = start;
return event; return event;
} }
/*
* legacy interface for mmap read.
* Don't use it. Use perf_mmap__read_event().
*/
union perf_event *perf_mmap__read_forward(struct perf_mmap *map) union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
{ {
u64 head; u64 head;
u64 old = map->prev;
/* /*
* Check if event was unmapped due to a POLLHUP/POLLERR. * Check if event was unmapped due to a POLLHUP/POLLERR.
...@@ -82,13 +79,26 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *map) ...@@ -82,13 +79,26 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
head = perf_mmap__read_head(map); head = perf_mmap__read_head(map);
return perf_mmap__read(map, old, head, &map->prev); return perf_mmap__read(map, &map->prev, head);
} }
union perf_event *perf_mmap__read_backward(struct perf_mmap *map) /*
* Read event from ring buffer one by one.
* Return one event for each call.
*
* Usage:
* perf_mmap__read_init()
* while(event = perf_mmap__read_event()) {
* //process the event
* perf_mmap__consume()
* }
* perf_mmap__read_done()
*/
union perf_event *perf_mmap__read_event(struct perf_mmap *map,
bool overwrite,
u64 *startp, u64 end)
{ {
u64 head, end; union perf_event *event;
u64 start = map->prev;
/* /*
* Check if event was unmapped due to a POLLHUP/POLLERR. * Check if event was unmapped due to a POLLHUP/POLLERR.
...@@ -96,40 +106,19 @@ union perf_event *perf_mmap__read_backward(struct perf_mmap *map) ...@@ -96,40 +106,19 @@ union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
if (!refcount_read(&map->refcnt)) if (!refcount_read(&map->refcnt))
return NULL; return NULL;
head = perf_mmap__read_head(map); if (startp == NULL)
if (!head)
return NULL; return NULL;
/* /* non-overwirte doesn't pause the ringbuffer */
* 'head' pointer starts from 0. Kernel minus sizeof(record) form if (!overwrite)
* it each time when kernel writes to it, so in fact 'head' is end = perf_mmap__read_head(map);
* negative. 'end' pointer is made manually by adding the size of
* the ring buffer to 'head' pointer, means the validate data can
* read is the whole ring buffer. If 'end' is positive, the ring
* buffer has not fully filled, so we must adjust 'end' to 0.
*
* However, since both 'head' and 'end' is unsigned, we can't
* simply compare 'end' against 0. Here we compare '-head' and
* the size of the ring buffer, where -head is the number of bytes
* kernel write to the ring buffer.
*/
if (-head < (u64)(map->mask + 1))
end = 0;
else
end = head + map->mask + 1;
return perf_mmap__read(map, start, end, &map->prev);
}
void perf_mmap__read_catchup(struct perf_mmap *map) event = perf_mmap__read(map, startp, end);
{
u64 head;
if (!refcount_read(&map->refcnt)) if (!overwrite)
return; map->prev = *startp;
head = perf_mmap__read_head(map); return event;
map->prev = head;
} }
static bool perf_mmap__empty(struct perf_mmap *map) static bool perf_mmap__empty(struct perf_mmap *map)
...@@ -267,41 +256,60 @@ static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u6 ...@@ -267,41 +256,60 @@ static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u6
return -1; return -1;
} }
int perf_mmap__push(struct perf_mmap *md, bool overwrite, /*
void *to, int push(void *to, void *buf, size_t size)) * Report the start and end of the available data in ringbuffer
*/
int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
u64 *startp, u64 *endp)
{ {
u64 head = perf_mmap__read_head(md); u64 head = perf_mmap__read_head(md);
u64 old = md->prev; u64 old = md->prev;
u64 end = head, start = old;
unsigned char *data = md->base + page_size; unsigned char *data = md->base + page_size;
unsigned long size; unsigned long size;
void *buf;
int rc = 0;
start = overwrite ? head : old; *startp = overwrite ? head : old;
end = overwrite ? old : head; *endp = overwrite ? old : head;
if (start == end) if (*startp == *endp)
return 0; return -EAGAIN;
size = end - start; size = *endp - *startp;
if (size > (unsigned long)(md->mask) + 1) { if (size > (unsigned long)(md->mask) + 1) {
if (!overwrite) { if (!overwrite) {
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n"); WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
md->prev = head; md->prev = head;
perf_mmap__consume(md, overwrite); perf_mmap__consume(md, overwrite);
return 0; return -EAGAIN;
} }
/* /*
* Backward ring buffer is full. We still have a chance to read * Backward ring buffer is full. We still have a chance to read
* most of data from it. * most of data from it.
*/ */
if (overwrite_rb_find_range(data, md->mask, head, &start, &end)) if (overwrite_rb_find_range(data, md->mask, head, startp, endp))
return -1; return -EINVAL;
} }
return 0;
}
int perf_mmap__push(struct perf_mmap *md, bool overwrite,
void *to, int push(void *to, void *buf, size_t size))
{
u64 head = perf_mmap__read_head(md);
u64 end, start;
unsigned char *data = md->base + page_size;
unsigned long size;
void *buf;
int rc = 0;
rc = perf_mmap__read_init(md, overwrite, &start, &end);
if (rc < 0)
return (rc == -EAGAIN) ? 0 : -1;
size = end - start;
if ((start & md->mask) + size != (end & md->mask)) { if ((start & md->mask) + size != (end & md->mask)) {
buf = &data[start & md->mask]; buf = &data[start & md->mask];
size = md->mask + 1 - (start & md->mask); size = md->mask + 1 - (start & md->mask);
...@@ -327,3 +335,14 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite, ...@@ -327,3 +335,14 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite,
out: out:
return rc; return rc;
} }
/*
* Mandatory for overwrite mode
* The direction of overwrite mode is backward.
* The last perf_mmap__read() will set tail to map->prev.
* Need to correct the map->prev to head which is the end of next read.
*/
void perf_mmap__read_done(struct perf_mmap *map)
{
map->prev = perf_mmap__read_head(map);
}
...@@ -65,8 +65,6 @@ void perf_mmap__put(struct perf_mmap *map); ...@@ -65,8 +65,6 @@ void perf_mmap__put(struct perf_mmap *map);
void perf_mmap__consume(struct perf_mmap *map, bool overwrite); void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
void perf_mmap__read_catchup(struct perf_mmap *md);
static inline u64 perf_mmap__read_head(struct perf_mmap *mm) static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
{ {
struct perf_event_mmap_page *pc = mm->base; struct perf_event_mmap_page *pc = mm->base;
...@@ -87,11 +85,17 @@ static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail) ...@@ -87,11 +85,17 @@ static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
} }
union perf_event *perf_mmap__read_forward(struct perf_mmap *map); union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
union perf_event *perf_mmap__read_event(struct perf_mmap *map,
bool overwrite,
u64 *startp, u64 end);
int perf_mmap__push(struct perf_mmap *md, bool backward, int perf_mmap__push(struct perf_mmap *md, bool backward,
void *to, int push(void *to, void *buf, size_t size)); void *to, int push(void *to, void *buf, size_t size));
size_t perf_mmap__mmap_len(struct perf_mmap *map); size_t perf_mmap__mmap_len(struct perf_mmap *map);
int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
u64 *startp, u64 *endp);
void perf_mmap__read_done(struct perf_mmap *map);
#endif /*__PERF_MMAP_H */ #endif /*__PERF_MMAP_H */
...@@ -340,35 +340,15 @@ size_t hex_width(u64 v) ...@@ -340,35 +340,15 @@ size_t hex_width(u64 v)
return n; return n;
} }
static int hex(char ch)
{
if ((ch >= '0') && (ch <= '9'))
return ch - '0';
if ((ch >= 'a') && (ch <= 'f'))
return ch - 'a' + 10;
if ((ch >= 'A') && (ch <= 'F'))
return ch - 'A' + 10;
return -1;
}
/* /*
* While we find nice hex chars, build a long_val. * While we find nice hex chars, build a long_val.
* Return number of chars processed. * Return number of chars processed.
*/ */
int hex2u64(const char *ptr, u64 *long_val) int hex2u64(const char *ptr, u64 *long_val)
{ {
const char *p = ptr; char *p;
*long_val = 0;
while (*p) { *long_val = strtoull(ptr, &p, 16);
const int hex_val = hex(*p);
if (hex_val < 0)
break;
*long_val = (*long_val << 4) | hex_val;
p++;
}
return p - ptr; return p - ptr;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment