Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
8bcfdd7c
Commit
8bcfdd7c
authored
Feb 17, 2021
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'perf/kprobes' into perf/core, to pick up finished branch
Signed-off-by:
Ingo Molnar
<
mingo@kernel.org
>
parents
838342a6
abd82e53
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
81 additions
and
98 deletions
+81
-98
arch/x86/include/asm/kprobes.h
arch/x86/include/asm/kprobes.h
+7
-4
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kprobes/core.c
+74
-94
No files found.
arch/x86/include/asm/kprobes.h
View file @
8bcfdd7c
...
...
@@ -58,14 +58,17 @@ struct arch_specific_insn {
/* copy of the original instruction */
kprobe_opcode_t
*
insn
;
/*
* boostable =
false
: This instruction type is not boostable.
* boostable =
true
: This instruction has been boosted: we have
* boostable =
0
: This instruction type is not boostable.
* boostable =
1
: This instruction has been boosted: we have
* added a relative jump after the instruction copy in insn,
* so no single-step and fixup are needed (unless there's
* a post_handler).
*/
bool
boostable
;
bool
if_modifier
;
unsigned
boostable
:
1
;
unsigned
if_modifier
:
1
;
unsigned
is_call
:
1
;
unsigned
is_pushf
:
1
;
unsigned
is_abs_ip
:
1
;
/* Number of bytes of text poked */
int
tp_len
;
};
...
...
arch/x86/kernel/kprobes/core.c
View file @
8bcfdd7c
...
...
@@ -132,26 +132,6 @@ void synthesize_relcall(void *dest, void *from, void *to)
}
NOKPROBE_SYMBOL
(
synthesize_relcall
);
/*
* Skip the prefixes of the instruction.
*/
static
kprobe_opcode_t
*
skip_prefixes
(
kprobe_opcode_t
*
insn
)
{
insn_attr_t
attr
;
attr
=
inat_get_opcode_attribute
((
insn_byte_t
)
*
insn
);
while
(
inat_is_legacy_prefix
(
attr
))
{
insn
++
;
attr
=
inat_get_opcode_attribute
((
insn_byte_t
)
*
insn
);
}
#ifdef CONFIG_X86_64
if
(
inat_is_rex_prefix
(
attr
))
insn
++
;
#endif
return
insn
;
}
NOKPROBE_SYMBOL
(
skip_prefixes
);
/*
* Returns non-zero if INSN is boostable.
* RIP relative instructions are adjusted at copying time in 64 bits mode
...
...
@@ -311,25 +291,6 @@ static int can_probe(unsigned long paddr)
return
(
addr
==
paddr
);
}
/*
* Returns non-zero if opcode modifies the interrupt flag.
*/
static
int
is_IF_modifier
(
kprobe_opcode_t
*
insn
)
{
/* Skip prefixes */
insn
=
skip_prefixes
(
insn
);
switch
(
*
insn
)
{
case
0xfa
:
/* cli */
case
0xfb
:
/* sti */
case
0xcf
:
/* iret/iretd */
case
0x9d
:
/* popf/popfd */
return
1
;
}
return
0
;
}
/*
* Copy an instruction with recovering modified instruction by kprobes
* and adjust the displacement if the instruction uses the %rip-relative
...
...
@@ -411,9 +372,9 @@ static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
synthesize_reljump
(
buf
+
len
,
p
->
ainsn
.
insn
+
len
,
p
->
addr
+
insn
->
length
);
len
+=
JMP32_INSN_SIZE
;
p
->
ainsn
.
boostable
=
true
;
p
->
ainsn
.
boostable
=
1
;
}
else
{
p
->
ainsn
.
boostable
=
false
;
p
->
ainsn
.
boostable
=
0
;
}
return
len
;
...
...
@@ -450,6 +411,67 @@ void free_insn_page(void *page)
module_memfree
(
page
);
}
static
void
set_resume_flags
(
struct
kprobe
*
p
,
struct
insn
*
insn
)
{
insn_byte_t
opcode
=
insn
->
opcode
.
bytes
[
0
];
switch
(
opcode
)
{
case
0xfa
:
/* cli */
case
0xfb
:
/* sti */
case
0x9d
:
/* popf/popfd */
/* Check whether the instruction modifies Interrupt Flag or not */
p
->
ainsn
.
if_modifier
=
1
;
break
;
case
0x9c
:
/* pushfl */
p
->
ainsn
.
is_pushf
=
1
;
break
;
case
0xcf
:
/* iret */
p
->
ainsn
.
if_modifier
=
1
;
fallthrough
;
case
0xc2
:
/* ret/lret */
case
0xc3
:
case
0xca
:
case
0xcb
:
case
0xea
:
/* jmp absolute -- ip is correct */
/* ip is already adjusted, no more changes required */
p
->
ainsn
.
is_abs_ip
=
1
;
/* Without resume jump, this is boostable */
p
->
ainsn
.
boostable
=
1
;
break
;
case
0xe8
:
/* call relative - Fix return addr */
p
->
ainsn
.
is_call
=
1
;
break
;
#ifdef CONFIG_X86_32
case
0x9a
:
/* call absolute -- same as call absolute, indirect */
p
->
ainsn
.
is_call
=
1
;
p
->
ainsn
.
is_abs_ip
=
1
;
break
;
#endif
case
0xff
:
opcode
=
insn
->
opcode
.
bytes
[
1
];
if
((
opcode
&
0x30
)
==
0x10
)
{
/*
* call absolute, indirect
* Fix return addr; ip is correct.
* But this is not boostable
*/
p
->
ainsn
.
is_call
=
1
;
p
->
ainsn
.
is_abs_ip
=
1
;
break
;
}
else
if
(((
opcode
&
0x31
)
==
0x20
)
||
((
opcode
&
0x31
)
==
0x21
))
{
/*
* jmp near and far, absolute indirect
* ip is correct.
*/
p
->
ainsn
.
is_abs_ip
=
1
;
/* Without resume jump, this is boostable */
p
->
ainsn
.
boostable
=
1
;
}
break
;
}
}
static
int
arch_copy_kprobe
(
struct
kprobe
*
p
)
{
struct
insn
insn
;
...
...
@@ -467,8 +489,8 @@ static int arch_copy_kprobe(struct kprobe *p)
*/
len
=
prepare_boost
(
buf
,
p
,
&
insn
);
/*
Check whether the instruction modifies Interrupt Flag or not
*/
p
->
ainsn
.
if_modifier
=
is_IF_modifier
(
buf
);
/*
Analyze the opcode and set resume flags
*/
set_resume_flags
(
p
,
&
insn
);
/* Also, displacement change doesn't affect the first byte */
p
->
opcode
=
buf
[
0
];
...
...
@@ -491,6 +513,9 @@ int arch_prepare_kprobe(struct kprobe *p)
if
(
!
can_probe
((
unsigned
long
)
p
->
addr
))
return
-
EILSEQ
;
memset
(
&
p
->
ainsn
,
0
,
sizeof
(
p
->
ainsn
));
/* insn: must be on special executable page on x86. */
p
->
ainsn
.
insn
=
get_insn_slot
();
if
(
!
p
->
ainsn
.
insn
)
...
...
@@ -806,11 +831,6 @@ NOKPROBE_SYMBOL(trampoline_handler);
* 2) If the single-stepped instruction was a call, the return address
* that is atop the stack is the address following the copied instruction.
* We need to make it the address following the original instruction.
*
* If this is the first time we've single-stepped the instruction at
* this probepoint, and the instruction is boostable, boost it: add a
* jump instruction after the copied instruction, that jumps to the next
* instruction after the probepoint.
*/
static
void
resume_execution
(
struct
kprobe
*
p
,
struct
pt_regs
*
regs
,
struct
kprobe_ctlblk
*
kcb
)
...
...
@@ -818,60 +838,20 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
unsigned
long
*
tos
=
stack_addr
(
regs
);
unsigned
long
copy_ip
=
(
unsigned
long
)
p
->
ainsn
.
insn
;
unsigned
long
orig_ip
=
(
unsigned
long
)
p
->
addr
;
kprobe_opcode_t
*
insn
=
p
->
ainsn
.
insn
;
/* Skip prefixes */
insn
=
skip_prefixes
(
insn
);
regs
->
flags
&=
~
X86_EFLAGS_TF
;
switch
(
*
insn
)
{
case
0x9c
:
/* pushfl */
/* Fixup the contents of top of stack */
if
(
p
->
ainsn
.
is_pushf
)
{
*
tos
&=
~
(
X86_EFLAGS_TF
|
X86_EFLAGS_IF
);
*
tos
|=
kcb
->
kprobe_old_flags
;
break
;
case
0xc2
:
/* iret/ret/lret */
case
0xc3
:
case
0xca
:
case
0xcb
:
case
0xcf
:
case
0xea
:
/* jmp absolute -- ip is correct */
/* ip is already adjusted, no more changes required */
p
->
ainsn
.
boostable
=
true
;
goto
no_change
;
case
0xe8
:
/* call relative - Fix return addr */
}
else
if
(
p
->
ainsn
.
is_call
)
{
*
tos
=
orig_ip
+
(
*
tos
-
copy_ip
);
break
;
#ifdef CONFIG_X86_32
case
0x9a
:
/* call absolute -- same as call absolute, indirect */
*
tos
=
orig_ip
+
(
*
tos
-
copy_ip
);
goto
no_change
;
#endif
case
0xff
:
if
((
insn
[
1
]
&
0x30
)
==
0x10
)
{
/*
* call absolute, indirect
* Fix return addr; ip is correct.
* But this is not boostable
*/
*
tos
=
orig_ip
+
(
*
tos
-
copy_ip
);
goto
no_change
;
}
else
if
(((
insn
[
1
]
&
0x31
)
==
0x20
)
||
((
insn
[
1
]
&
0x31
)
==
0x21
))
{
/*
* jmp near and far, absolute indirect
* ip is correct. And this is boostable
*/
p
->
ainsn
.
boostable
=
true
;
goto
no_change
;
}
break
;
default:
break
;
}
regs
->
ip
+=
orig_ip
-
copy_ip
;
if
(
!
p
->
ainsn
.
is_abs_ip
)
regs
->
ip
+=
orig_ip
-
copy_ip
;
no_change:
restore_btf
();
}
NOKPROBE_SYMBOL
(
resume_execution
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment