Commit bb7c5126 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-tools-fixes-for-v5.19-2022-07-02' of...

Merge tag 'perf-tools-fixes-for-v5.19-2022-07-02' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

Pull perf tools fixes from Arnaldo Carvalho de Melo:

 - BPF program info linear (BPIL) data is accessed assuming 64-bit
   alignment resulting in undefined behavior as the data is just byte
   aligned. Fix it, Found using -fsanitize=undefined.

 - Fix 'perf offcpu' build on old kernels wrt task_struct's
   state/__state field.

 - Fix perf_event_attr.sample_type setting on the 'offcpu-time' event
   synthesized by the 'perf offcpu' tool.

 - Don't bail out when synthesizing PERF_RECORD_ events for pre-existing
   threads when one goes away while parsing its procfs entries.

 - Don't sort the task scan result from /proc, its not needed and
   introduces bugs when the main thread isn't the first one to be
   processed.

 - Fix uninitialized 'offset' variable on aarch64 in the unwind code.

 - Sync KVM headers with the kernel sources.

* tag 'perf-tools-fixes-for-v5.19-2022-07-02' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux:
  perf synthetic-events: Ignore dead threads during event synthesis
  perf synthetic-events: Don't sort the task scan result from /proc
  perf unwind: Fix unitialized 'offset' variable on aarch64
  tools headers UAPI: Sync linux/kvm.h with the kernel sources
  perf bpf: 8 byte align bpil data
  tools kvm headers arm64: Update KVM headers from the kernel sources
  perf offcpu: Accept allowed sample types only
  perf offcpu: Fix build failure on old kernels
parents 5411de07 ff898552
...@@ -139,8 +139,10 @@ struct kvm_guest_debug_arch { ...@@ -139,8 +139,10 @@ struct kvm_guest_debug_arch {
__u64 dbg_wvr[KVM_ARM_MAX_DBG_REGS]; __u64 dbg_wvr[KVM_ARM_MAX_DBG_REGS];
}; };
#define KVM_DEBUG_ARCH_HSR_HIGH_VALID (1 << 0)
struct kvm_debug_exit_arch { struct kvm_debug_exit_arch {
__u32 hsr; __u32 hsr;
__u32 hsr_high; /* ESR_EL2[61:32] */
__u64 far; /* used for watchpoints */ __u64 far; /* used for watchpoints */
}; };
...@@ -332,6 +334,40 @@ struct kvm_arm_copy_mte_tags { ...@@ -332,6 +334,40 @@ struct kvm_arm_copy_mte_tags {
#define KVM_ARM64_SVE_VLS_WORDS \ #define KVM_ARM64_SVE_VLS_WORDS \
((KVM_ARM64_SVE_VQ_MAX - KVM_ARM64_SVE_VQ_MIN) / 64 + 1) ((KVM_ARM64_SVE_VQ_MAX - KVM_ARM64_SVE_VQ_MIN) / 64 + 1)
/* Bitmap feature firmware registers */
#define KVM_REG_ARM_FW_FEAT_BMAP (0x0016 << KVM_REG_ARM_COPROC_SHIFT)
#define KVM_REG_ARM_FW_FEAT_BMAP_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_FW_FEAT_BMAP | \
((r) & 0xffff))
#define KVM_REG_ARM_STD_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(0)
enum {
KVM_REG_ARM_STD_BIT_TRNG_V1_0 = 0,
#ifdef __KERNEL__
KVM_REG_ARM_STD_BMAP_BIT_COUNT,
#endif
};
#define KVM_REG_ARM_STD_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(1)
enum {
KVM_REG_ARM_STD_HYP_BIT_PV_TIME = 0,
#ifdef __KERNEL__
KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT,
#endif
};
#define KVM_REG_ARM_VENDOR_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(2)
enum {
KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT = 0,
KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1,
#ifdef __KERNEL__
KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT,
#endif
};
/* Device Control API: ARM VGIC */ /* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
......
...@@ -444,6 +444,9 @@ struct kvm_run { ...@@ -444,6 +444,9 @@ struct kvm_run {
#define KVM_SYSTEM_EVENT_SHUTDOWN 1 #define KVM_SYSTEM_EVENT_SHUTDOWN 1
#define KVM_SYSTEM_EVENT_RESET 2 #define KVM_SYSTEM_EVENT_RESET 2
#define KVM_SYSTEM_EVENT_CRASH 3 #define KVM_SYSTEM_EVENT_CRASH 3
#define KVM_SYSTEM_EVENT_WAKEUP 4
#define KVM_SYSTEM_EVENT_SUSPEND 5
#define KVM_SYSTEM_EVENT_SEV_TERM 6
__u32 type; __u32 type;
__u32 ndata; __u32 ndata;
union { union {
...@@ -646,6 +649,7 @@ struct kvm_vapic_addr { ...@@ -646,6 +649,7 @@ struct kvm_vapic_addr {
#define KVM_MP_STATE_OPERATING 7 #define KVM_MP_STATE_OPERATING 7
#define KVM_MP_STATE_LOAD 8 #define KVM_MP_STATE_LOAD 8
#define KVM_MP_STATE_AP_RESET_HOLD 9 #define KVM_MP_STATE_AP_RESET_HOLD 9
#define KVM_MP_STATE_SUSPENDED 10
struct kvm_mp_state { struct kvm_mp_state {
__u32 mp_state; __u32 mp_state;
...@@ -1150,8 +1154,9 @@ struct kvm_ppc_resize_hpt { ...@@ -1150,8 +1154,9 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_MEM_OP_EXTENSION 211 #define KVM_CAP_S390_MEM_OP_EXTENSION 211
#define KVM_CAP_PMU_CAPABILITY 212 #define KVM_CAP_PMU_CAPABILITY 212
#define KVM_CAP_DISABLE_QUIRKS2 213 #define KVM_CAP_DISABLE_QUIRKS2 213
/* #define KVM_CAP_VM_TSC_CONTROL 214 */ #define KVM_CAP_VM_TSC_CONTROL 214
#define KVM_CAP_SYSTEM_EVENT_DATA 215 #define KVM_CAP_SYSTEM_EVENT_DATA 215
#define KVM_CAP_ARM_SYSTEM_SUSPEND 216
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
...@@ -1240,6 +1245,7 @@ struct kvm_x86_mce { ...@@ -1240,6 +1245,7 @@ struct kvm_x86_mce {
#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) #define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3) #define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
struct kvm_xen_hvm_config { struct kvm_xen_hvm_config {
__u32 flags; __u32 flags;
...@@ -1478,7 +1484,8 @@ struct kvm_s390_ucas_mapping { ...@@ -1478,7 +1484,8 @@ struct kvm_s390_ucas_mapping {
#define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2) #define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2)
/* Available with KVM_CAP_PPC_GET_PVINFO */ /* Available with KVM_CAP_PPC_GET_PVINFO */
#define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo) #define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo)
/* Available with KVM_CAP_TSC_CONTROL */ /* Available with KVM_CAP_TSC_CONTROL for a vCPU, or with
* KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
#define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2) #define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2)
#define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3) #define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3)
/* Available with KVM_CAP_PCI_2_3 */ /* Available with KVM_CAP_PCI_2_3 */
...@@ -1694,6 +1701,32 @@ struct kvm_xen_hvm_attr { ...@@ -1694,6 +1701,32 @@ struct kvm_xen_hvm_attr {
struct { struct {
__u64 gfn; __u64 gfn;
} shared_info; } shared_info;
struct {
__u32 send_port;
__u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */
__u32 flags;
#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0)
#define KVM_XEN_EVTCHN_UPDATE (1 << 1)
#define KVM_XEN_EVTCHN_RESET (1 << 2)
/*
* Events sent by the guest are either looped back to
* the guest itself (potentially on a different port#)
* or signalled via an eventfd.
*/
union {
struct {
__u32 port;
__u32 vcpu;
__u32 priority;
} port;
struct {
__u32 port; /* Zero for eventfd */
__s32 fd;
} eventfd;
__u32 padding[4];
} deliver;
} evtchn;
__u32 xen_version;
__u64 pad[8]; __u64 pad[8];
} u; } u;
}; };
...@@ -1702,11 +1735,17 @@ struct kvm_xen_hvm_attr { ...@@ -1702,11 +1735,17 @@ struct kvm_xen_hvm_attr {
#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2 #define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3
#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4
/* Per-vCPU Xen attributes */ /* Per-vCPU Xen attributes */
#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) #define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr) #define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr)
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
#define KVM_XEN_HVM_EVTCHN_SEND _IOW(KVMIO, 0xd0, struct kvm_irq_routing_xen_evtchn)
#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2) #define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2)
#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2) #define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2)
...@@ -1724,6 +1763,13 @@ struct kvm_xen_vcpu_attr { ...@@ -1724,6 +1763,13 @@ struct kvm_xen_vcpu_attr {
__u64 time_blocked; __u64 time_blocked;
__u64 time_offline; __u64 time_offline;
} runstate; } runstate;
__u32 vcpu_id;
struct {
__u32 port;
__u32 priority;
__u64 expires_ns;
} timer;
__u8 vector;
} u; } u;
}; };
...@@ -1734,6 +1780,10 @@ struct kvm_xen_vcpu_attr { ...@@ -1734,6 +1780,10 @@ struct kvm_xen_vcpu_attr {
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3 #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4 #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5 #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6
#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7
#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8
/* Secure Encrypted Virtualization command */ /* Secure Encrypted Virtualization command */
enum sev_cmd_id { enum sev_cmd_id {
......
...@@ -149,11 +149,10 @@ get_bpf_prog_info_linear(int fd, __u64 arrays) ...@@ -149,11 +149,10 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
data_len += count * size; data_len += roundup(count * size, sizeof(__u64));
} }
/* step 3: allocate continuous memory */ /* step 3: allocate continuous memory */
data_len = roundup(data_len, sizeof(__u64));
info_linear = malloc(sizeof(struct perf_bpil) + data_len); info_linear = malloc(sizeof(struct perf_bpil) + data_len);
if (!info_linear) if (!info_linear)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -180,7 +179,7 @@ get_bpf_prog_info_linear(int fd, __u64 arrays) ...@@ -180,7 +179,7 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
bpf_prog_info_set_offset_u64(&info_linear->info, bpf_prog_info_set_offset_u64(&info_linear->info,
desc->array_offset, desc->array_offset,
ptr_to_u64(ptr)); ptr_to_u64(ptr));
ptr += count * size; ptr += roundup(count * size, sizeof(__u64));
} }
/* step 5: call syscall again to get required arrays */ /* step 5: call syscall again to get required arrays */
......
...@@ -265,6 +265,12 @@ int off_cpu_write(struct perf_session *session) ...@@ -265,6 +265,12 @@ int off_cpu_write(struct perf_session *session)
sample_type = evsel->core.attr.sample_type; sample_type = evsel->core.attr.sample_type;
if (sample_type & ~OFFCPU_SAMPLE_TYPES) {
pr_err("not supported sample type: %llx\n",
(unsigned long long)sample_type);
return -1;
}
if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) { if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
if (evsel->core.id) if (evsel->core.id)
sid = evsel->core.id[0]; sid = evsel->core.id[0];
...@@ -319,7 +325,6 @@ int off_cpu_write(struct perf_session *session) ...@@ -319,7 +325,6 @@ int off_cpu_write(struct perf_session *session)
} }
if (sample_type & PERF_SAMPLE_CGROUP) if (sample_type & PERF_SAMPLE_CGROUP)
data.array[n++] = key.cgroup_id; data.array[n++] = key.cgroup_id;
/* TODO: handle more sample types */
size = n * sizeof(u64); size = n * sizeof(u64);
data.hdr.size = size; data.hdr.size = size;
......
...@@ -71,6 +71,11 @@ struct { ...@@ -71,6 +71,11 @@ struct {
__uint(max_entries, 1); __uint(max_entries, 1);
} cgroup_filter SEC(".maps"); } cgroup_filter SEC(".maps");
/* new kernel task_struct definition */
struct task_struct___new {
long __state;
} __attribute__((preserve_access_index));
/* old kernel task_struct definition */ /* old kernel task_struct definition */
struct task_struct___old { struct task_struct___old {
long state; long state;
...@@ -93,14 +98,17 @@ const volatile bool uses_cgroup_v1 = false; ...@@ -93,14 +98,17 @@ const volatile bool uses_cgroup_v1 = false;
*/ */
static inline int get_task_state(struct task_struct *t) static inline int get_task_state(struct task_struct *t)
{ {
if (bpf_core_field_exists(t->__state)) /* recast pointer to capture new type for compiler */
return BPF_CORE_READ(t, __state); struct task_struct___new *t_new = (void *)t;
/* recast pointer to capture task_struct___old type for compiler */ if (bpf_core_field_exists(t_new->__state)) {
struct task_struct___old *t_old = (void *)t; return BPF_CORE_READ(t_new, __state);
} else {
/* recast pointer to capture old type for compiler */
struct task_struct___old *t_old = (void *)t;
/* now use old "state" name of the field */ return BPF_CORE_READ(t_old, state);
return BPF_CORE_READ(t_old, state); }
} }
static inline __u64 get_cgroup_id(struct task_struct *t) static inline __u64 get_cgroup_id(struct task_struct *t)
......
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include "util.h" #include "util.h"
#include "hashmap.h" #include "hashmap.h"
#include "pmu-hybrid.h" #include "pmu-hybrid.h"
#include "off_cpu.h"
#include "../perf-sys.h" #include "../perf-sys.h"
#include "util/parse-branch-options.h" #include "util/parse-branch-options.h"
#include <internal/xyarray.h> #include <internal/xyarray.h>
...@@ -1102,6 +1103,11 @@ static void evsel__set_default_freq_period(struct record_opts *opts, ...@@ -1102,6 +1103,11 @@ static void evsel__set_default_freq_period(struct record_opts *opts,
} }
} }
static bool evsel__is_offcpu_event(struct evsel *evsel)
{
return evsel__is_bpf_output(evsel) && !strcmp(evsel->name, OFFCPU_EVENT);
}
/* /*
* The enable_on_exec/disabled value strategy: * The enable_on_exec/disabled value strategy:
* *
...@@ -1366,6 +1372,9 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts, ...@@ -1366,6 +1372,9 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
*/ */
if (evsel__is_dummy_event(evsel)) if (evsel__is_dummy_event(evsel))
evsel__reset_sample_bit(evsel, BRANCH_STACK); evsel__reset_sample_bit(evsel, BRANCH_STACK);
if (evsel__is_offcpu_event(evsel))
evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES;
} }
int evsel__set_filter(struct evsel *evsel, const char *filter) int evsel__set_filter(struct evsel *evsel, const char *filter)
......
#ifndef PERF_UTIL_OFF_CPU_H #ifndef PERF_UTIL_OFF_CPU_H
#define PERF_UTIL_OFF_CPU_H #define PERF_UTIL_OFF_CPU_H
#include <linux/perf_event.h>
struct evlist; struct evlist;
struct target; struct target;
struct perf_session; struct perf_session;
...@@ -8,6 +10,13 @@ struct record_opts; ...@@ -8,6 +10,13 @@ struct record_opts;
#define OFFCPU_EVENT "offcpu-time" #define OFFCPU_EVENT "offcpu-time"
#define OFFCPU_SAMPLE_TYPES (PERF_SAMPLE_IDENTIFIER | PERF_SAMPLE_IP | \
PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
PERF_SAMPLE_ID | PERF_SAMPLE_CPU | \
PERF_SAMPLE_PERIOD | PERF_SAMPLE_CALLCHAIN | \
PERF_SAMPLE_CGROUP)
#ifdef HAVE_BPF_SKEL #ifdef HAVE_BPF_SKEL
int off_cpu_prepare(struct evlist *evlist, struct target *target, int off_cpu_prepare(struct evlist *evlist, struct target *target,
struct record_opts *opts); struct record_opts *opts);
......
...@@ -754,7 +754,7 @@ static int __event__synthesize_thread(union perf_event *comm_event, ...@@ -754,7 +754,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
snprintf(filename, sizeof(filename), "%s/proc/%d/task", snprintf(filename, sizeof(filename), "%s/proc/%d/task",
machine->root_dir, pid); machine->root_dir, pid);
n = scandir(filename, &dirent, filter_task, alphasort); n = scandir(filename, &dirent, filter_task, NULL);
if (n < 0) if (n < 0)
return n; return n;
...@@ -767,11 +767,12 @@ static int __event__synthesize_thread(union perf_event *comm_event, ...@@ -767,11 +767,12 @@ static int __event__synthesize_thread(union perf_event *comm_event,
if (*end) if (*end)
continue; continue;
rc = -1; /* some threads may exit just after scan, ignore it */
if (perf_event__prepare_comm(comm_event, pid, _pid, machine, if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
&tgid, &ppid, &kernel_thread) != 0) &tgid, &ppid, &kernel_thread) != 0)
break; continue;
rc = -1;
if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
ppid, process, machine) < 0) ppid, process, machine) < 0)
break; break;
...@@ -987,7 +988,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool, ...@@ -987,7 +988,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
return 0; return 0;
snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
n = scandir(proc_path, &dirent, filter_task, alphasort); n = scandir(proc_path, &dirent, filter_task, NULL);
if (n < 0) if (n < 0)
return err; return err;
......
...@@ -197,7 +197,7 @@ static int elf_section_address_and_offset(int fd, const char *name, u64 *address ...@@ -197,7 +197,7 @@ static int elf_section_address_and_offset(int fd, const char *name, u64 *address
#ifndef NO_LIBUNWIND_DEBUG_FRAME #ifndef NO_LIBUNWIND_DEBUG_FRAME
static u64 elf_section_offset(int fd, const char *name) static u64 elf_section_offset(int fd, const char *name)
{ {
u64 address, offset; u64 address, offset = 0;
if (elf_section_address_and_offset(fd, name, &address, &offset)) if (elf_section_address_and_offset(fd, name, &address, &offset))
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment