Commit 41d0c467 authored by Yusheng Zheng's avatar Yusheng Zheng Committed by Andrii Nakryiko

libbpf: Fix some typos in comments

Fix some spelling errors in the code comments of libbpf:

betwen -> between
paremeters -> parameters
knowning -> knowing
definiton -> definition
compatiblity -> compatibility
overriden -> overridden
occured -> occurred
proccess -> process
managment -> management
nessary -> necessary
Signed-off-by: default avatarYusheng Zheng <yunwei356@gmail.com>
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20240909225952.30324-1-yunwei356@gmail.com
parent 72d8508e
...@@ -341,7 +341,7 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym; ...@@ -341,7 +341,7 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym;
* I.e., it looks almost like high-level for each loop in other languages, * I.e., it looks almost like high-level for each loop in other languages,
* supports continue/break, and is verifiable by BPF verifier. * supports continue/break, and is verifiable by BPF verifier.
* *
* For iterating integers, the difference betwen bpf_for_each(num, i, N, M) * For iterating integers, the difference between bpf_for_each(num, i, N, M)
* and bpf_for(i, N, M) is in that bpf_for() provides additional proof to * and bpf_for(i, N, M) is in that bpf_for() provides additional proof to
* verifier that i is in [N, M) range, and in bpf_for_each() case i is `int * verifier that i is in [N, M) range, and in bpf_for_each() case i is `int
* *`, not just `int`. So for integers bpf_for() is more convenient. * *`, not just `int`. So for integers bpf_for() is more convenient.
......
...@@ -808,7 +808,7 @@ struct pt_regs; ...@@ -808,7 +808,7 @@ struct pt_regs;
* tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
* low-level way of getting kprobe input arguments from struct pt_regs, and * low-level way of getting kprobe input arguments from struct pt_regs, and
* provides a familiar typed and named function arguments syntax and * provides a familiar typed and named function arguments syntax and
* semantics of accessing kprobe input paremeters. * semantics of accessing kprobe input parameters.
* *
* Original struct pt_regs* context is preserved as 'ctx' argument. This might * Original struct pt_regs* context is preserved as 'ctx' argument. This might
* be necessary when using BPF helpers like bpf_perf_event_output(). * be necessary when using BPF helpers like bpf_perf_event_output().
......
...@@ -4230,7 +4230,7 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id ...@@ -4230,7 +4230,7 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id
* consists of portions of the graph that come from multiple compilation units. * consists of portions of the graph that come from multiple compilation units.
* This is due to the fact that types within single compilation unit are always * This is due to the fact that types within single compilation unit are always
* deduplicated and FWDs are already resolved, if referenced struct/union * deduplicated and FWDs are already resolved, if referenced struct/union
* definiton is available. So, if we had unresolved FWD and found corresponding * definition is available. So, if we had unresolved FWD and found corresponding
* STRUCT/UNION, they will be from different compilation units. This * STRUCT/UNION, they will be from different compilation units. This
* consequently means that when we "link" FWD to corresponding STRUCT/UNION, * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
* type graph will likely have at least two different BTF types that describe * type graph will likely have at least two different BTF types that describe
......
...@@ -286,7 +286,7 @@ LIBBPF_API void btf_dump__free(struct btf_dump *d); ...@@ -286,7 +286,7 @@ LIBBPF_API void btf_dump__free(struct btf_dump *d);
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id); LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
struct btf_dump_emit_type_decl_opts { struct btf_dump_emit_type_decl_opts {
/* size of this struct, for forward/backward compatiblity */ /* size of this struct, for forward/backward compatibility */
size_t sz; size_t sz;
/* optional field name for type declaration, e.g.: /* optional field name for type declaration, e.g.:
* - struct my_struct <FNAME> * - struct my_struct <FNAME>
......
...@@ -304,7 +304,7 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id) ...@@ -304,7 +304,7 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
* definition, in which case they have to be declared inline as part of field * definition, in which case they have to be declared inline as part of field
* type declaration; or as a top-level anonymous enum, typically used for * type declaration; or as a top-level anonymous enum, typically used for
* declaring global constants. It's impossible to distinguish between two * declaring global constants. It's impossible to distinguish between two
* without knowning whether given enum type was referenced from other type: * without knowing whether given enum type was referenced from other type:
* top-level anonymous enum won't be referenced by anything, while embedded * top-level anonymous enum won't be referenced by anything, while embedded
* one will. * one will.
*/ */
......
...@@ -152,7 +152,7 @@ struct bpf_object_open_opts { ...@@ -152,7 +152,7 @@ struct bpf_object_open_opts {
* log_buf and log_level settings. * log_buf and log_level settings.
* *
* If specified, this log buffer will be passed for: * If specified, this log buffer will be passed for:
* - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overridden
* with bpf_program__set_log() on per-program level, to get * with bpf_program__set_log() on per-program level, to get
* BPF verifier log output. * BPF verifier log output.
* - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get * - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get
...@@ -455,7 +455,7 @@ LIBBPF_API int bpf_link__destroy(struct bpf_link *link); ...@@ -455,7 +455,7 @@ LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
/** /**
* @brief **bpf_program__attach()** is a generic function for attaching * @brief **bpf_program__attach()** is a generic function for attaching
* a BPF program based on auto-detection of program type, attach type, * a BPF program based on auto-detection of program type, attach type,
* and extra paremeters, where applicable. * and extra parameters, where applicable.
* *
* @param prog BPF program to attach * @param prog BPF program to attach
* @return Reference to the newly created BPF link; or NULL is returned on error, * @return Reference to the newly created BPF link; or NULL is returned on error,
...@@ -679,7 +679,7 @@ struct bpf_uprobe_opts { ...@@ -679,7 +679,7 @@ struct bpf_uprobe_opts {
/** /**
* @brief **bpf_program__attach_uprobe()** attaches a BPF program * @brief **bpf_program__attach_uprobe()** attaches a BPF program
* to the userspace function which is found by binary path and * to the userspace function which is found by binary path and
* offset. You can optionally specify a particular proccess to attach * offset. You can optionally specify a particular process to attach
* to. You can also optionally attach the program to the function * to. You can also optionally attach the program to the function
* exit instead of entry. * exit instead of entry.
* *
...@@ -1593,11 +1593,11 @@ LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_i ...@@ -1593,11 +1593,11 @@ LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_i
* memory region of the ring buffer. * memory region of the ring buffer.
* This ring buffer can be used to implement a custom events consumer. * This ring buffer can be used to implement a custom events consumer.
* The ring buffer starts with the *struct perf_event_mmap_page*, which * The ring buffer starts with the *struct perf_event_mmap_page*, which
* holds the ring buffer managment fields, when accessing the header * holds the ring buffer management fields, when accessing the header
* structure it's important to be SMP aware. * structure it's important to be SMP aware.
* You can refer to *perf_event_read_simple* for a simple example. * You can refer to *perf_event_read_simple* for a simple example.
* @param pb the perf buffer structure * @param pb the perf buffer structure
* @param buf_idx the buffer index to retreive * @param buf_idx the buffer index to retrieve
* @param buf (out) gets the base pointer of the mmap()'ed memory * @param buf (out) gets the base pointer of the mmap()'ed memory
* @param buf_size (out) gets the size of the mmap()'ed region * @param buf_size (out) gets the size of the mmap()'ed region
* @return 0 on success, negative error code for failure * @return 0 on success, negative error code for failure
......
...@@ -76,7 +76,7 @@ enum libbpf_strict_mode { ...@@ -76,7 +76,7 @@ enum libbpf_strict_mode {
* first BPF program or map creation operation. This is done only if * first BPF program or map creation operation. This is done only if
* kernel is too old to support memcg-based memory accounting for BPF * kernel is too old to support memcg-based memory accounting for BPF
* subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY, * subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY,
* but it can be overriden with libbpf_set_memlock_rlim() API. * but it can be overridden with libbpf_set_memlock_rlim() API.
* Note that libbpf_set_memlock_rlim() needs to be called before * Note that libbpf_set_memlock_rlim() needs to be called before
* the very first bpf_prog_load(), bpf_map_create() or bpf_object__load() * the very first bpf_prog_load(), bpf_map_create() or bpf_object__load()
* operation. * operation.
...@@ -97,7 +97,7 @@ LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode); ...@@ -97,7 +97,7 @@ LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
* @brief **libbpf_get_error()** extracts the error code from the passed * @brief **libbpf_get_error()** extracts the error code from the passed
* pointer * pointer
* @param ptr pointer returned from libbpf API function * @param ptr pointer returned from libbpf API function
* @return error code; or 0 if no error occured * @return error code; or 0 if no error occurred
* *
* Note, as of libbpf 1.0 this function is not necessary and not recommended * Note, as of libbpf 1.0 this function is not necessary and not recommended
* to be used. Libbpf doesn't return error code embedded into the pointer * to be used. Libbpf doesn't return error code embedded into the pointer
......
...@@ -107,7 +107,7 @@ static inline void skel_free(const void *p) ...@@ -107,7 +107,7 @@ static inline void skel_free(const void *p)
* The loader program will perform probe_read_kernel() from maps.rodata.initial_value. * The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
* skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and * skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
* does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree * does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
* is not nessary. * is not necessary.
* *
* For user space: * For user space:
* skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly. * skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment