Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Open sidebar
Kirill Smelkov
linux
Commits
26d05b36
Commit
26d05b36
authored
4 years ago
by
Paolo Bonzini
Browse files
Options
Download
Plain Diff
Merge branch 'kvm-async-pf-int' into HEAD
parents
0ed076c7
b1d40575
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
40 additions
and
13 deletions
+40
-13
arch/x86/Kconfig
arch/x86/Kconfig
+1
-0
arch/x86/include/asm/idtentry.h
arch/x86/include/asm/idtentry.h
+4
-0
arch/x86/include/asm/kvm_para.h
arch/x86/include/asm/kvm_para.h
+1
-0
arch/x86/kernel/kvm.c
arch/x86/kernel/kvm.c
+34
-13
No files found.
arch/x86/Kconfig
View file @
26d05b36
...
...
@@ -802,6 +802,7 @@ config KVM_GUEST
depends on PARAVIRT
select PARAVIRT_CLOCK
select ARCH_CPUIDLE_HALTPOLL
select X86_HV_CALLBACK_VECTOR
default y
help
This option enables various optimizations for running under the KVM
...
...
This diff is collapsed.
Click to expand it.
arch/x86/include/asm/idtentry.h
View file @
26d05b36
...
...
@@ -647,6 +647,10 @@ DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_acrn_hv_callback);
DECLARE_IDTENTRY_SYSVEC
(
HYPERVISOR_CALLBACK_VECTOR
,
sysvec_xen_hvm_callback
);
#endif
#ifdef CONFIG_KVM_GUEST
DECLARE_IDTENTRY_SYSVEC
(
HYPERVISOR_CALLBACK_VECTOR
,
sysvec_kvm_asyncpf_interrupt
);
#endif
#undef X86_TRAP_OTHER
#endif
This diff is collapsed.
Click to expand it.
arch/x86/include/asm/kvm_para.h
View file @
26d05b36
...
...
@@ -4,6 +4,7 @@
#include <asm/processor.h>
#include <asm/alternative.h>
#include <linux/interrupt.h>
#include <uapi/asm/kvm_para.h>
extern
void
kvmclock_init
(
void
);
...
...
This diff is collapsed.
Click to expand it.
arch/x86/kernel/kvm.c
View file @
26d05b36
...
...
@@ -9,6 +9,7 @@
#include <linux/context_tracking.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/kvm_para.h>
#include <linux/cpu.h>
...
...
@@ -232,16 +233,11 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
noinstr
bool
__kvm_handle_async_pf
(
struct
pt_regs
*
regs
,
u32
token
)
{
u32
reason
=
kvm_read_and_reset_apf_flags
();
u32
flags
=
kvm_read_and_reset_apf_flags
();
bool
rcu_exit
;
switch
(
reason
)
{
case
KVM_PV_REASON_PAGE_NOT_PRESENT
:
case
KVM_PV_REASON_PAGE_READY
:
break
;
default:
if
(
!
flags
)
return
false
;
}
rcu_exit
=
idtentry_enter_cond_rcu
(
regs
);
instrumentation_begin
();
...
...
@@ -254,13 +250,13 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
if
(
unlikely
(
!
(
regs
->
flags
&
X86_EFLAGS_IF
)))
panic
(
"Host injected async #PF in interrupt disabled region
\n
"
);
if
(
reason
==
KVM_PV_REASON_PAGE_NOT_PRESENT
)
{
if
(
flags
&
KVM_PV_REASON_PAGE_NOT_PRESENT
)
{
if
(
unlikely
(
!
(
user_mode
(
regs
))))
panic
(
"Host injected async #PF in kernel mode
\n
"
);
/* Page is swapped out by the host. */
kvm_async_pf_task_wait_schedule
(
token
);
}
else
{
kvm_async_pf_task_wake
(
token
);
WARN_ONCE
(
1
,
"Unexpected async PF flags: %x
\n
"
,
flags
);
}
instrumentation_end
();
...
...
@@ -268,6 +264,27 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
return
true
;
}
DEFINE_IDTENTRY_SYSVEC
(
sysvec_kvm_asyncpf_interrupt
)
{
struct
pt_regs
*
old_regs
=
set_irq_regs
(
regs
);
u32
token
;
bool
rcu_exit
;
rcu_exit
=
idtentry_enter_cond_rcu
(
regs
);
inc_irq_stat
(
irq_hv_callback_count
);
if
(
__this_cpu_read
(
apf_reason
.
enabled
))
{
token
=
__this_cpu_read
(
apf_reason
.
token
);
kvm_async_pf_task_wake
(
token
);
__this_cpu_write
(
apf_reason
.
token
,
0
);
wrmsrl
(
MSR_KVM_ASYNC_PF_ACK
,
1
);
}
idtentry_exit_cond_rcu
(
regs
,
rcu_exit
);
set_irq_regs
(
old_regs
);
}
static
void
__init
paravirt_ops_setup
(
void
)
{
pv_info
.
name
=
"KVM"
;
...
...
@@ -311,17 +328,19 @@ static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
static
void
kvm_guest_cpu_init
(
void
)
{
if
(
kvm_para_has_feature
(
KVM_FEATURE_ASYNC_PF
)
&&
kvmapf
)
{
u64
pa
;
if
(
kvm_para_has_feature
(
KVM_FEATURE_ASYNC_PF
_INT
)
&&
kvmapf
)
{
u64
pa
=
slow_virt_to_phys
(
this_cpu_ptr
(
&
apf_reason
))
;
WARN_ON_ONCE
(
!
static_branch_likely
(
&
kvm_async_pf_enabled
));
pa
=
slow_virt_to_phys
(
this_cpu_ptr
(
&
apf_reason
));
pa
|=
KVM_ASYNC_PF_ENABLED
;
pa
|=
KVM_ASYNC_PF_ENABLED
|
KVM_ASYNC_PF_DELIVERY_AS_INT
;
if
(
kvm_para_has_feature
(
KVM_FEATURE_ASYNC_PF_VMEXIT
))
pa
|=
KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT
;
wrmsrl
(
MSR_KVM_ASYNC_PF_INT
,
HYPERVISOR_CALLBACK_VECTOR
);
wrmsrl
(
MSR_KVM_ASYNC_PF_EN
,
pa
);
__this_cpu_write
(
apf_reason
.
enabled
,
1
);
pr_info
(
"KVM setup async PF for cpu %d
\n
"
,
smp_processor_id
());
...
...
@@ -646,8 +665,10 @@ static void __init kvm_guest_init(void)
if
(
kvm_para_has_feature
(
KVM_FEATURE_PV_EOI
))
apic_set_eoi_write
(
kvm_guest_apic_eoi_write
);
if
(
kvm_para_has_feature
(
KVM_FEATURE_ASYNC_PF
)
&&
kvmapf
)
if
(
kvm_para_has_feature
(
KVM_FEATURE_ASYNC_PF
_INT
)
&&
kvmapf
)
{
static_branch_enable
(
&
kvm_async_pf_enabled
);
alloc_intr_gate
(
HYPERVISOR_CALLBACK_VECTOR
,
asm_sysvec_kvm_asyncpf_interrupt
);
}
#ifdef CONFIG_SMP
smp_ops
.
smp_prepare_cpus
=
kvm_smp_prepare_cpus
;
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment