Commit de763fbb authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'libbpf: Fixed various checkpatch issues'

Kang Minchul says:

====================
This patch series contains various checkpatch fixes
in btf.c, libbpf.c, ringbuf.c.

I know these are trivial but some issues are hard to ignore
and I think these checkpatch issues are accumulating.

v1 -> v2: changed cover letter message.
====================
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents e662c775 b486d19a
......@@ -1724,7 +1724,8 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
/* and now restore original strings section size; types data size
* wasn't modified, so doesn't need restoring, see big comment above */
* wasn't modified, so doesn't need restoring, see big comment above
*/
btf->hdr->str_len = old_strs_len;
hashmap__free(p.str_off_map);
......@@ -2329,7 +2330,7 @@ int btf__add_restrict(struct btf *btf, int ref_type_id)
*/
int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
{
if (!value|| !value[0])
if (!value || !value[0])
return libbpf_err(-EINVAL);
return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
......
......@@ -347,7 +347,8 @@ enum sec_def_flags {
SEC_ATTACHABLE = 2,
SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
/* attachment target is specified through BTF ID in either kernel or
* other BPF program's BTF object */
* other BPF program's BTF object
*/
SEC_ATTACH_BTF = 4,
/* BPF program type allows sleeping/blocking in kernel */
SEC_SLEEPABLE = 8,
......@@ -488,7 +489,7 @@ struct bpf_map {
char *name;
/* real_name is defined for special internal maps (.rodata*,
* .data*, .bss, .kconfig) and preserves their original ELF section
* name. This is important to be be able to find corresponding BTF
* name. This is important to be able to find corresponding BTF
* DATASEC information.
*/
char *real_name;
......@@ -1863,12 +1864,20 @@ static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
return -ERANGE;
}
switch (ext->kcfg.sz) {
case 1: *(__u8 *)ext_val = value; break;
case 2: *(__u16 *)ext_val = value; break;
case 4: *(__u32 *)ext_val = value; break;
case 8: *(__u64 *)ext_val = value; break;
default:
return -EINVAL;
case 1:
*(__u8 *)ext_val = value;
break;
case 2:
*(__u16 *)ext_val = value;
break;
case 4:
*(__u32 *)ext_val = value;
break;
case 8:
*(__u64 *)ext_val = value;
break;
default:
return -EINVAL;
}
ext->is_set = true;
return 0;
......@@ -2770,7 +2779,7 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
m->type = enum64_placeholder_id;
m->offset = 0;
}
}
}
}
return 0;
......@@ -3518,7 +3527,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
}
/* sort BPF programs by section name and in-section instruction offset
* for faster search */
* for faster search
*/
if (obj->nr_programs)
qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
......@@ -3817,7 +3827,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return -EINVAL;
}
ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
&ext->kcfg.is_signed);
&ext->kcfg.is_signed);
if (ext->kcfg.type == KCFG_UNKNOWN) {
pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name);
return -ENOTSUP;
......@@ -4965,9 +4975,9 @@ bpf_object__reuse_map(struct bpf_map *map)
err = bpf_map__reuse_fd(map, pin_fd);
close(pin_fd);
if (err) {
if (err)
return err;
}
map->pinned = true;
pr_debug("reused pinned map at '%s'\n", map->pin_path);
......@@ -5485,7 +5495,7 @@ static int load_module_btfs(struct bpf_object *obj)
}
err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
if (err)
goto err_out;
......@@ -6237,7 +6247,8 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
* prog; each main prog can have a different set of
* subprograms appended (potentially in different order as
* well), so position of any subprog can be different for
* different main programs */
* different main programs
*/
insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
......@@ -10995,7 +11006,7 @@ struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
usdt_provider, usdt_name, usdt_cookie);
usdt_provider, usdt_name, usdt_cookie);
err = libbpf_get_error(link);
if (err)
return libbpf_err_ptr(err);
......@@ -12304,7 +12315,7 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
btf = bpf_object__btf(s->obj);
if (!btf) {
pr_warn("subskeletons require BTF at runtime (object %s)\n",
bpf_object__name(s->obj));
bpf_object__name(s->obj));
return libbpf_err(-errno);
}
......
......@@ -128,7 +128,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
/* Map read-only producer page and data pages. We map twice as big
* data size to allow simple reading of samples that wrap around the
* end of a ring buffer. See kernel implementation for details.
* */
*/
tmp = mmap(NULL, rb->page_size + 2 * info.max_entries, PROT_READ,
MAP_SHARED, map_fd, rb->page_size);
if (tmp == MAP_FAILED) {
......@@ -220,7 +220,7 @@ static inline int roundup_len(__u32 len)
return (len + 7) / 8 * 8;
}
static int64_t ringbuf_process_ring(struct ring* r)
static int64_t ringbuf_process_ring(struct ring *r)
{
int *len_ptr, len, err;
/* 64-bit to avoid overflow in case of extreme application behavior */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment