Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
16314874
Commit
16314874
authored
Jul 30, 2020
by
Marc Zyngier
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'kvm-arm64/misc-5.9' into kvmarm-master/next
Signed-off-by:
Marc Zyngier
<
maz@kernel.org
>
parents
236a5599
022c8328
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
20 additions
and
18 deletions
+20
-18
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_emulate.h
+1
-1
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/include/hyp/switch.h
+1
-1
arch/arm64/kvm/mmio.c
arch/arm64/kvm/mmio.c
+0
-6
arch/arm64/kvm/mmu.c
arch/arm64/kvm/mmu.c
+17
-9
include/trace/events/kvm.h
include/trace/events/kvm.h
+1
-1
No files found.
arch/arm64/include/asm/kvm_emulate.h
View file @
16314874
...
@@ -345,7 +345,7 @@ static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vc
...
@@ -345,7 +345,7 @@ static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vc
return
kvm_vcpu_get_esr
(
vcpu
)
&
ESR_ELx_FSC_TYPE
;
return
kvm_vcpu_get_esr
(
vcpu
)
&
ESR_ELx_FSC_TYPE
;
}
}
static
__always_inline
bool
kvm_vcpu_
dabt_isextabt
(
const
struct
kvm_vcpu
*
vcpu
)
static
__always_inline
bool
kvm_vcpu_
abt_issea
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
switch
(
kvm_vcpu_trap_get_fault
(
vcpu
))
{
switch
(
kvm_vcpu_trap_get_fault
(
vcpu
))
{
case
FSC_SEA
:
case
FSC_SEA
:
...
...
arch/arm64/kvm/hyp/include/hyp/switch.h
View file @
16314874
...
@@ -444,7 +444,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
...
@@ -444,7 +444,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
valid
=
kvm_vcpu_trap_get_class
(
vcpu
)
==
ESR_ELx_EC_DABT_LOW
&&
valid
=
kvm_vcpu_trap_get_class
(
vcpu
)
==
ESR_ELx_EC_DABT_LOW
&&
kvm_vcpu_trap_get_fault_type
(
vcpu
)
==
FSC_FAULT
&&
kvm_vcpu_trap_get_fault_type
(
vcpu
)
==
FSC_FAULT
&&
kvm_vcpu_dabt_isvalid
(
vcpu
)
&&
kvm_vcpu_dabt_isvalid
(
vcpu
)
&&
!
kvm_vcpu_
dabt_isextabt
(
vcpu
)
&&
!
kvm_vcpu_
abt_issea
(
vcpu
)
&&
!
kvm_vcpu_dabt_iss1tw
(
vcpu
);
!
kvm_vcpu_dabt_iss1tw
(
vcpu
);
if
(
valid
)
{
if
(
valid
)
{
...
...
arch/arm64/kvm/mmio.c
View file @
16314874
...
@@ -145,12 +145,6 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
...
@@ -145,12 +145,6 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
return
-
ENOSYS
;
return
-
ENOSYS
;
}
}
/* Page table accesses IO mem: tell guest to fix its TTBR */
if
(
kvm_vcpu_dabt_iss1tw
(
vcpu
))
{
kvm_inject_dabt
(
vcpu
,
kvm_vcpu_get_hfar
(
vcpu
));
return
1
;
}
/*
/*
* Prepare MMIO operation. First decode the syndrome data we get
* Prepare MMIO operation. First decode the syndrome data we get
* from the CPU. Then try if some in-kernel emulation feels
* from the CPU. Then try if some in-kernel emulation feels
...
...
arch/arm64/kvm/mmu.c
View file @
16314874
...
@@ -2111,18 +2111,15 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -2111,18 +2111,15 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
is_iabt
=
kvm_vcpu_trap_is_iabt
(
vcpu
);
is_iabt
=
kvm_vcpu_trap_is_iabt
(
vcpu
);
/* Synchronous External Abort? */
/* Synchronous External Abort? */
if
(
kvm_vcpu_
dabt_isextabt
(
vcpu
))
{
if
(
kvm_vcpu_
abt_issea
(
vcpu
))
{
/*
/*
* For RAS the host kernel may handle this abort.
* For RAS the host kernel may handle this abort.
* There is no need to pass the error into the guest.
* There is no need to pass the error into the guest.
*/
*/
if
(
!
kvm_handle_guest_sea
(
fault_ipa
,
kvm_vcpu_get_esr
(
vcpu
)))
if
(
kvm_handle_guest_sea
(
fault_ipa
,
kvm_vcpu_get_esr
(
vcpu
)))
return
1
;
if
(
unlikely
(
!
is_iabt
))
{
kvm_inject_vabt
(
vcpu
);
kvm_inject_vabt
(
vcpu
);
return
1
;
}
return
1
;
}
}
trace_kvm_guest_fault
(
*
vcpu_pc
(
vcpu
),
kvm_vcpu_get_esr
(
vcpu
),
trace_kvm_guest_fault
(
*
vcpu_pc
(
vcpu
),
kvm_vcpu_get_esr
(
vcpu
),
...
@@ -2145,12 +2142,23 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -2145,12 +2142,23 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
hva
=
gfn_to_hva_memslot_prot
(
memslot
,
gfn
,
&
writable
);
hva
=
gfn_to_hva_memslot_prot
(
memslot
,
gfn
,
&
writable
);
write_fault
=
kvm_is_write_fault
(
vcpu
);
write_fault
=
kvm_is_write_fault
(
vcpu
);
if
(
kvm_is_error_hva
(
hva
)
||
(
write_fault
&&
!
writable
))
{
if
(
kvm_is_error_hva
(
hva
)
||
(
write_fault
&&
!
writable
))
{
/*
* The guest has put either its instructions or its page-tables
* somewhere it shouldn't have. Userspace won't be able to do
* anything about this (there's no syndrome for a start), so
* re-inject the abort back into the guest.
*/
if
(
is_iabt
)
{
if
(
is_iabt
)
{
/* Prefetch Abort on I/O address */
ret
=
-
ENOEXEC
;
ret
=
-
ENOEXEC
;
goto
out
;
goto
out
;
}
}
if
(
kvm_vcpu_dabt_iss1tw
(
vcpu
))
{
kvm_inject_dabt
(
vcpu
,
kvm_vcpu_get_hfar
(
vcpu
));
ret
=
1
;
goto
out_unlock
;
}
/*
/*
* Check for a cache maintenance operation. Since we
* Check for a cache maintenance operation. Since we
* ended-up here, we know it is outside of any memory
* ended-up here, we know it is outside of any memory
...
@@ -2161,7 +2169,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -2161,7 +2169,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
* So let's assume that the guest is just being
* So let's assume that the guest is just being
* cautious, and skip the instruction.
* cautious, and skip the instruction.
*/
*/
if
(
kvm_vcpu_dabt_is_cm
(
vcpu
))
{
if
(
kvm_
is_error_hva
(
hva
)
&&
kvm_
vcpu_dabt_is_cm
(
vcpu
))
{
kvm_skip_instr
(
vcpu
,
kvm_vcpu_trap_il_is32bit
(
vcpu
));
kvm_skip_instr
(
vcpu
,
kvm_vcpu_trap_il_is32bit
(
vcpu
));
ret
=
1
;
ret
=
1
;
goto
out_unlock
;
goto
out_unlock
;
...
...
include/trace/events/kvm.h
View file @
16314874
...
@@ -17,7 +17,7 @@
...
@@ -17,7 +17,7 @@
ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \
ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \
ERSN(HYPERV)
ERSN(HYPERV)
, ERSN(ARM_NISV)
TRACE_EVENT
(
kvm_userspace_exit
,
TRACE_EVENT
(
kvm_userspace_exit
,
TP_PROTO
(
__u32
reason
,
int
errno
),
TP_PROTO
(
__u32
reason
,
int
errno
),
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment