Commit 22a39c3d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'locking-core-2022-08-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "This was a fairly quiet cycle for the locking subsystem:

   - lockdep: Fix a handful of the more complex lockdep_init_map_*()
     primitives that can lose the lock_type & cause false reports. No
     such mishap was observed in the wild.

   - jump_label improvements: simplify the cross-arch support of initial
     NOP patching by making it arch-specific code (used on MIPS only),
     and remove the s390 initial NOP patching that was superfluous"

* tag 'locking-core-2022-08-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/lockdep: Fix lockdep_init_map_*() confusion
  jump_label: make initial NOP patching the special case
  jump_label: mips: move module NOP patching into arch code
  jump_label: s390: avoid pointless initial NOP patching
parents b167fdff eae6d58d
......@@ -201,9 +201,6 @@ static_key->entry field makes use of the two least significant bits.
* ``void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type)``,
see: arch/x86/kernel/jump_label.c
* ``__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type)``,
see: arch/x86/kernel/jump_label.c
* ``struct jump_entry``,
see: arch/x86/include/asm/jump_label.h
......
......@@ -96,19 +96,6 @@ void arch_jump_label_transform(struct jump_entry *entry,
flush_icache_range(entry->code, entry->code + JUMP_LABEL_NOP_SIZE);
}
void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
/*
* We use only one NOP type (1x, 4 byte) in arch_static_branch, so
* there's no need to patch an identical NOP over the top of it here.
* The generic code calls 'arch_jump_label_transform' if the NOP needs
* to be replaced by a branch, so 'arch_jump_label_transform_static' is
* never called with type other than JUMP_LABEL_NOP.
*/
BUG_ON(type != JUMP_LABEL_NOP);
}
#ifdef CONFIG_ARC_DBG_JUMP_LABEL
#define SELFTEST_MSG "ARC: instruction generation self-test: "
......
......@@ -27,9 +27,3 @@ void arch_jump_label_transform(struct jump_entry *entry,
{
__arch_jump_label_transform(entry, type, false);
}
void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
__arch_jump_label_transform(entry, type, true);
}
......@@ -26,14 +26,3 @@ void arch_jump_label_transform(struct jump_entry *entry,
aarch64_insn_patch_text_nosync(addr, insn);
}
void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
/*
* We use the architected A64 NOP in arch_static_branch, so there's no
* need to patch an identical A64 NOP over the top of it here. The core
* will call arch_jump_label_transform from a module notifier if the
* NOP needs to be replaced by a branch.
*/
}
......@@ -8,6 +8,8 @@
#ifndef _ASM_MIPS_JUMP_LABEL_H
#define _ASM_MIPS_JUMP_LABEL_H
#define arch_jump_label_transform_static arch_jump_label_transform
#ifndef __ASSEMBLY__
#include <linux/types.h>
......
......@@ -88,3 +88,22 @@ void arch_jump_label_transform(struct jump_entry *e,
mutex_unlock(&text_mutex);
}
#ifdef CONFIG_MODULES
void jump_label_apply_nops(struct module *mod)
{
struct jump_entry *iter_start = mod->jump_entries;
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
struct jump_entry *iter;
/* if the module doesn't have jump label entries, just return */
if (iter_start == iter_stop)
return;
for (iter = iter_start; iter < iter_stop; iter++) {
/* Only write NOPs for arch_branch_static(). */
if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
arch_jump_label_transform(iter, JUMP_LABEL_NOP);
}
}
#endif
......@@ -21,6 +21,7 @@
#include <linux/spinlock.h>
#include <linux/jump_label.h>
extern void jump_label_apply_nops(struct module *mod);
struct mips_hi16 {
struct mips_hi16 *next;
......@@ -428,7 +429,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *s;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
/* Make jump label nops. */
if (IS_ENABLED(CONFIG_JUMP_LABEL))
jump_label_apply_nops(me);
INIT_LIST_HEAD(&me->arch.dbe_list);
......
......@@ -42,14 +42,3 @@ void arch_jump_label_transform(struct jump_entry *entry,
patch_text(addr, insn);
}
void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
/*
* We use the architected NOP in arch_static_branch, so there's no
* need to patch an identical NOP over the top of it here. The core
* will call arch_jump_label_transform from a module notifier if the
* NOP needs to be replaced by a branch.
*/
}
......@@ -39,15 +39,3 @@ void arch_jump_label_transform(struct jump_entry *entry,
patch_text_nosync(addr, &insn, sizeof(insn));
mutex_unlock(&text_mutex);
}
void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
/*
* We use the same instructions in the arch_static_branch and
* arch_static_branch_jump inline functions, so there's no
* need to patch them up here.
* The core will call arch_jump_label_transform when those
* instructions need to be replaced.
*/
}
......@@ -10,7 +10,6 @@
#include <linux/stringify.h>
#define JUMP_LABEL_NOP_SIZE 6
#define JUMP_LABEL_NOP_OFFSET 2
#ifdef CONFIG_CC_IS_CLANG
#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "i"
......@@ -21,12 +20,12 @@
#endif
/*
* We use a brcl 0,2 instruction for jump labels at compile time so it
* We use a brcl 0,<offset> instruction for jump labels so it
* can be easily distinguished from a hotpatch generated instruction.
*/
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
asm_volatile_goto("0: brcl 0,%l[label]\n"
".pushsection __jump_table,\"aw\"\n"
".balign 8\n"
".long 0b-.,%l[label]-.\n"
......
......@@ -44,14 +44,8 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
panic("Corrupted kernel text");
}
static struct insn orignop = {
.opcode = 0xc004,
.offset = JUMP_LABEL_NOP_OFFSET >> 1,
};
static void jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
int init)
enum jump_label_type type)
{
void *code = (void *)jump_entry_code(entry);
struct insn old, new;
......@@ -63,27 +57,22 @@ static void jump_label_transform(struct jump_entry *entry,
jump_label_make_branch(entry, &old);
jump_label_make_nop(entry, &new);
}
if (init) {
if (memcmp(code, &orignop, sizeof(orignop)))
jump_label_bug(entry, &orignop, &new);
} else {
if (memcmp(code, &old, sizeof(old)))
jump_label_bug(entry, &old, &new);
}
s390_kernel_write(code, &new, sizeof(new));
}
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
jump_label_transform(entry, type, 0);
jump_label_transform(entry, type);
text_poke_sync();
}
bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type)
{
jump_label_transform(entry, type, 0);
jump_label_transform(entry, type);
return true;
}
......@@ -91,10 +80,3 @@ void arch_jump_label_transform_apply(void)
{
text_poke_sync();
}
void __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
jump_label_transform(entry, type, 1);
text_poke_sync();
}
......@@ -548,6 +548,5 @@ int module_finalize(const Elf_Ehdr *hdr,
#endif /* CONFIG_FUNCTION_TRACER */
}
jump_label_apply_nops(me);
return 0;
}
......@@ -208,9 +208,6 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
/* make jump label nops */
jump_label_apply_nops(me);
do_patch_sections(hdr, sechdrs);
/* Cheetah's I-cache is fully coherent. */
......
......@@ -146,16 +146,3 @@ void arch_jump_label_transform_apply(void)
text_poke_finish();
mutex_unlock(&text_mutex);
}
static enum {
JL_STATE_START,
JL_STATE_NO_UPDATE,
JL_STATE_UPDATE,
} jlstate __initdata_or_module = JL_STATE_START;
__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
if (jlstate == JL_STATE_UPDATE)
jump_label_transform(entry, type, 1);
}
......@@ -310,9 +310,6 @@ int module_finalize(const Elf_Ehdr *hdr,
tseg, tseg + text->sh_size);
}
/* make jump label nops */
jump_label_apply_nops(me);
if (orc && orc_ip)
unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
(void *)orc->sh_addr, orc->sh_size);
......
......@@ -220,8 +220,6 @@ extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type);
extern void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type);
extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type);
extern void arch_jump_label_transform_apply(void);
......@@ -230,12 +228,12 @@ extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void static_key_slow_inc_cpuslocked(struct static_key *key);
extern void static_key_slow_dec_cpuslocked(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
extern void static_key_disable(struct static_key *key);
extern void static_key_enable_cpuslocked(struct static_key *key);
extern void static_key_disable_cpuslocked(struct static_key *key);
extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
/*
* We should be using ATOMIC_INIT() for initializing .enabled, but
......@@ -303,11 +301,6 @@ static inline int jump_label_text_reserved(void *start, void *end)
static inline void jump_label_lock(void) {}
static inline void jump_label_unlock(void) {}
static inline int jump_label_apply_nops(struct module *mod)
{
return 0;
}
static inline void static_key_enable(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
......
......@@ -188,7 +188,7 @@ static inline void
lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass, u8 inner, u8 outer)
{
lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL);
lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL);
}
static inline void
......@@ -211,24 +211,28 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
* or they are too narrow (they suffer from a false class-split):
*/
#define lockdep_set_class(lock, key) \
lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \
lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \
(lock)->dep_map.wait_type_inner, \
(lock)->dep_map.wait_type_outer)
(lock)->dep_map.wait_type_outer, \
(lock)->dep_map.lock_type)
#define lockdep_set_class_and_name(lock, key, name) \
lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \
lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \
(lock)->dep_map.wait_type_inner, \
(lock)->dep_map.wait_type_outer)
(lock)->dep_map.wait_type_outer, \
(lock)->dep_map.lock_type)
#define lockdep_set_class_and_subclass(lock, key, sub) \
lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\
lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \
(lock)->dep_map.wait_type_inner, \
(lock)->dep_map.wait_type_outer)
(lock)->dep_map.wait_type_outer, \
(lock)->dep_map.lock_type)
#define lockdep_set_subclass(lock, sub) \
lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
(lock)->dep_map.wait_type_inner, \
(lock)->dep_map.wait_type_outer)
(lock)->dep_map.wait_type_outer, \
(lock)->dep_map.lock_type)
#define lockdep_set_novalidate_class(lock) \
lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
......
......@@ -332,17 +332,13 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start,
return 0;
}
/*
* Update code which is definitely not currently executing.
* Architectures which need heavyweight synchronization to modify
* running code can override this to make the non-live update case
* cheaper.
*/
void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
#ifndef arch_jump_label_transform_static
static void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
arch_jump_label_transform(entry, type);
/* nothing to do on most architectures */
}
#endif
static inline struct jump_entry *static_key_entries(struct static_key *key)
{
......@@ -508,7 +504,7 @@ void __init jump_label_init(void)
#ifdef CONFIG_MODULES
static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
enum jump_label_type jump_label_init_type(struct jump_entry *entry)
{
struct static_key *key = jump_entry_key(entry);
bool type = static_key_type(key);
......@@ -596,31 +592,6 @@ static void __jump_label_mod_update(struct static_key *key)
}
}
/***
* apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
* @mod: module to patch
*
* Allow for run-time selection of the optimal nops. Before the module
* loads patch these with arch_get_jump_label_nop(), which is specified by
* the arch specific jump label code.
*/
void jump_label_apply_nops(struct module *mod)
{
struct jump_entry *iter_start = mod->jump_entries;
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
struct jump_entry *iter;
/* if the module doesn't have jump label entries, just return */
if (iter_start == iter_stop)
return;
for (iter = iter_start; iter < iter_stop; iter++) {
/* Only write NOPs for arch_branch_static(). */
if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
}
}
static int jump_label_add_module(struct module *mod)
{
struct jump_entry *iter_start = mod->jump_entries;
......
......@@ -5238,9 +5238,10 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
return 0;
}
lockdep_init_map_waits(lock, name, key, 0,
lockdep_init_map_type(lock, name, key, 0,
lock->wait_type_inner,
lock->wait_type_outer);
lock->wait_type_outer,
lock->lock_type);
class = register_lock_class(lock, subclass, 0);
hlock->class_idx = class - lock_classes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment