Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
5fbb0df6
Commit
5fbb0df6
authored
Mar 19, 2018
by
Marc Zyngier
Browse files
Options
Browse Files
Download
Plain Diff
Merge tag 'kvm-arm-fixes-for-v4.16-2' into HEAD
Resolve conflicts with current mainline
parents
4b472ffd
27e91ad1
Changes
12
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
181 additions
and
72 deletions
+181
-72
arch/arm/kvm/hyp/Makefile
arch/arm/kvm/hyp/Makefile
+5
-0
arch/arm/kvm/hyp/banked-sr.c
arch/arm/kvm/hyp/banked-sr.c
+4
-0
include/kvm/arm_vgic.h
include/kvm/arm_vgic.h
+1
-0
include/linux/irqchip/arm-gic-v3.h
include/linux/irqchip/arm-gic-v3.h
+1
-0
include/linux/irqchip/arm-gic.h
include/linux/irqchip/arm-gic.h
+1
-0
virt/kvm/arm/arch_timer.c
virt/kvm/arm/arch_timer.c
+69
-53
virt/kvm/arm/hyp/vgic-v3-sr.c
virt/kvm/arm/hyp/vgic-v3-sr.c
+4
-2
virt/kvm/arm/vgic/vgic-mmio.c
virt/kvm/arm/vgic/vgic-mmio.c
+3
-0
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v2.c
+9
-2
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic-v3.c
+8
-1
virt/kvm/arm/vgic/vgic.c
virt/kvm/arm/vgic/vgic.c
+73
-14
virt/kvm/arm/vgic/vgic.h
virt/kvm/arm/vgic/vgic.h
+3
-0
No files found.
arch/arm/kvm/hyp/Makefile
View file @
5fbb0df6
...
...
@@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM
=
../../../../virt/kvm
CFLAGS_ARMV7VE
:=
$(
call
cc-option,
-march
=
armv7ve
)
obj-$(CONFIG_KVM_ARM_HOST)
+=
$(KVM)
/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
$(KVM)
/arm/hyp/timer-sr.o
...
...
@@ -14,7 +16,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
cp15-sr.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
vfp.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
banked-sr.o
CFLAGS_banked-sr.o
+=
$(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST)
+=
entry.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
hyp-entry.o
obj-$(CONFIG_KVM_ARM_HOST)
+=
switch.o
CFLAGS_switch.o
+=
$(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST)
+=
s2-setup.o
arch/arm/kvm/hyp/banked-sr.c
View file @
5fbb0df6
...
...
@@ -20,6 +20,10 @@
#include <asm/kvm_hyp.h>
/*
* gcc before 4.9 doesn't understand -march=armv7ve, so we have to
* trick the assembler.
*/
__asm__
(
".arch_extension virt"
);
void
__hyp_text
__banked_save_state
(
struct
kvm_cpu_context
*
ctxt
)
...
...
include/kvm/arm_vgic.h
View file @
5fbb0df6
...
...
@@ -358,6 +358,7 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu);
bool
kvm_vcpu_has_pending_irqs
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_vgic_sync_hwstate
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_vgic_flush_hwstate
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_vgic_reset_mapped_irq
(
struct
kvm_vcpu
*
vcpu
,
u32
vintid
);
void
vgic_v3_dispatch_sgi
(
struct
kvm_vcpu
*
vcpu
,
u64
reg
);
...
...
include/linux/irqchip/arm-gic-v3.h
View file @
5fbb0df6
...
...
@@ -503,6 +503,7 @@
#define ICH_HCR_EN (1 << 0)
#define ICH_HCR_UIE (1 << 1)
#define ICH_HCR_NPIE (1 << 3)
#define ICH_HCR_TC (1 << 10)
#define ICH_HCR_TALL0 (1 << 11)
#define ICH_HCR_TALL1 (1 << 12)
...
...
include/linux/irqchip/arm-gic.h
View file @
5fbb0df6
...
...
@@ -84,6 +84,7 @@
#define GICH_HCR_EN (1 << 0)
#define GICH_HCR_UIE (1 << 1)
#define GICH_HCR_NPIE (1 << 3)
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
...
...
virt/kvm/arm/arch_timer.c
View file @
5fbb0df6
...
...
@@ -36,6 +36,8 @@ static struct timecounter *timecounter;
static
unsigned
int
host_vtimer_irq
;
static
u32
host_vtimer_irq_flags
;
static
DEFINE_STATIC_KEY_FALSE
(
has_gic_active_state
);
static
const
struct
kvm_irq_level
default_ptimer_irq
=
{
.
irq
=
30
,
.
level
=
1
,
...
...
@@ -56,6 +58,12 @@ u64 kvm_phys_timer_read(void)
return
timecounter
->
cc
->
read
(
timecounter
->
cc
);
}
static
inline
bool
userspace_irqchip
(
struct
kvm
*
kvm
)
{
return
static_branch_unlikely
(
&
userspace_irqchip_in_use
)
&&
unlikely
(
!
irqchip_in_kernel
(
kvm
));
}
static
void
soft_timer_start
(
struct
hrtimer
*
hrt
,
u64
ns
)
{
hrtimer_start
(
hrt
,
ktime_add_ns
(
ktime_get
(),
ns
),
...
...
@@ -69,25 +77,6 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work)
cancel_work_sync
(
work
);
}
static
void
kvm_vtimer_update_mask_user
(
struct
kvm_vcpu
*
vcpu
)
{
struct
arch_timer_context
*
vtimer
=
vcpu_vtimer
(
vcpu
);
/*
* When using a userspace irqchip with the architected timers, we must
* prevent continuously exiting from the guest, and therefore mask the
* physical interrupt by disabling it on the host interrupt controller
* when the virtual level is high, such that the guest can make
* forward progress. Once we detect the output level being
* de-asserted, we unmask the interrupt again so that we exit from the
* guest when the timer fires.
*/
if
(
vtimer
->
irq
.
level
)
disable_percpu_irq
(
host_vtimer_irq
);
else
enable_percpu_irq
(
host_vtimer_irq
,
0
);
}
static
irqreturn_t
kvm_arch_timer_handler
(
int
irq
,
void
*
dev_id
)
{
struct
kvm_vcpu
*
vcpu
=
*
(
struct
kvm_vcpu
**
)
dev_id
;
...
...
@@ -106,9 +95,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
if
(
kvm_timer_should_fire
(
vtimer
))
kvm_timer_update_irq
(
vcpu
,
true
,
vtimer
);
if
(
static_branch_unlikely
(
&
userspace_irqchip_in_use
)
&&
unlikely
(
!
irqchip_in_kernel
(
vcpu
->
kvm
)
))
kvm_vtimer_update_mask_user
(
vcpu
);
if
(
userspace_irqchip
(
vcpu
->
kvm
)
&&
!
static_branch_unlikely
(
&
has_gic_active_state
))
disable_percpu_irq
(
host_vtimer_irq
);
return
IRQ_HANDLED
;
}
...
...
@@ -290,8 +279,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
trace_kvm_timer_update_irq
(
vcpu
->
vcpu_id
,
timer_ctx
->
irq
.
irq
,
timer_ctx
->
irq
.
level
);
if
(
!
static_branch_unlikely
(
&
userspace_irqchip_in_use
)
||
likely
(
irqchip_in_kernel
(
vcpu
->
kvm
)))
{
if
(
!
userspace_irqchip
(
vcpu
->
kvm
))
{
ret
=
kvm_vgic_inject_irq
(
vcpu
->
kvm
,
vcpu
->
vcpu_id
,
timer_ctx
->
irq
.
irq
,
timer_ctx
->
irq
.
level
,
...
...
@@ -350,12 +338,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
phys_timer_emulate
(
vcpu
);
}
static
void
__timer_snapshot_state
(
struct
arch_timer_context
*
timer
)
{
timer
->
cnt_ctl
=
read_sysreg_el0
(
cntv_ctl
);
timer
->
cnt_cval
=
read_sysreg_el0
(
cntv_cval
);
}
static
void
vtimer_save_state
(
struct
kvm_vcpu
*
vcpu
)
{
struct
arch_timer_cpu
*
timer
=
&
vcpu
->
arch
.
timer_cpu
;
...
...
@@ -367,8 +349,10 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
if
(
!
vtimer
->
loaded
)
goto
out
;
if
(
timer
->
enabled
)
__timer_snapshot_state
(
vtimer
);
if
(
timer
->
enabled
)
{
vtimer
->
cnt_ctl
=
read_sysreg_el0
(
cntv_ctl
);
vtimer
->
cnt_cval
=
read_sysreg_el0
(
cntv_cval
);
}
/* Disable the virtual timer */
write_sysreg_el0
(
0
,
cntv_ctl
);
...
...
@@ -460,23 +444,43 @@ static void set_cntvoff(u64 cntvoff)
kvm_call_hyp
(
__kvm_timer_set_cntvoff
,
low
,
high
);
}
static
void
kvm_timer_vcpu_load_vgic
(
struct
kvm_vcpu
*
vcpu
)
static
inline
void
set_vtimer_irq_phys_active
(
struct
kvm_vcpu
*
vcpu
,
bool
active
)
{
int
r
;
r
=
irq_set_irqchip_state
(
host_vtimer_irq
,
IRQCHIP_STATE_ACTIVE
,
active
);
WARN_ON
(
r
);
}
static
void
kvm_timer_vcpu_load_gic
(
struct
kvm_vcpu
*
vcpu
)
{
struct
arch_timer_context
*
vtimer
=
vcpu_vtimer
(
vcpu
);
bool
phys_active
;
int
ret
;
phys_active
=
kvm_vgic_map_is_active
(
vcpu
,
vtimer
->
irq
.
irq
);
ret
=
irq_set_irqchip_state
(
host_vtimer_irq
,
IRQCHIP_STATE_ACTIVE
,
phys_active
);
WARN_ON
(
ret
);
if
(
irqchip_in_kernel
(
vcpu
->
kvm
))
phys_active
=
kvm_vgic_map_is_active
(
vcpu
,
vtimer
->
irq
.
irq
);
else
phys_active
=
vtimer
->
irq
.
level
;
set_vtimer_irq_phys_active
(
vcpu
,
phys_active
);
}
static
void
kvm_timer_vcpu_load_
user
(
struct
kvm_vcpu
*
vcpu
)
static
void
kvm_timer_vcpu_load_
nogic
(
struct
kvm_vcpu
*
vcpu
)
{
kvm_vtimer_update_mask_user
(
vcpu
);
struct
arch_timer_context
*
vtimer
=
vcpu_vtimer
(
vcpu
);
/*
* When using a userspace irqchip with the architected timers and a
* host interrupt controller that doesn't support an active state, we
* must still prevent continuously exiting from the guest, and
* therefore mask the physical interrupt by disabling it on the host
* interrupt controller when the virtual level is high, such that the
* guest can make forward progress. Once we detect the output level
* being de-asserted, we unmask the interrupt again so that we exit
* from the guest when the timer fires.
*/
if
(
vtimer
->
irq
.
level
)
disable_percpu_irq
(
host_vtimer_irq
);
else
enable_percpu_irq
(
host_vtimer_irq
,
host_vtimer_irq_flags
);
}
void
kvm_timer_vcpu_load
(
struct
kvm_vcpu
*
vcpu
)
...
...
@@ -487,10 +491,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
if
(
unlikely
(
!
timer
->
enabled
))
return
;
if
(
unlikely
(
!
irqchip_in_kernel
(
vcpu
->
kvm
)
))
kvm_timer_vcpu_load_
user
(
vcpu
);
if
(
static_branch_likely
(
&
has_gic_active_state
))
kvm_timer_vcpu_load_
gic
(
vcpu
);
else
kvm_timer_vcpu_load_
v
gic
(
vcpu
);
kvm_timer_vcpu_load_
no
gic
(
vcpu
);
set_cntvoff
(
vtimer
->
cntvoff
);
...
...
@@ -557,22 +561,29 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
{
struct
arch_timer_context
*
vtimer
=
vcpu_vtimer
(
vcpu
);
if
(
unlikely
(
!
irqchip_in_kernel
(
vcpu
->
kvm
)
))
{
__timer_snapshot_state
(
vtimer
);
if
(
!
kvm_timer_should_fire
(
vtimer
))
{
kvm_timer_update_irq
(
vcpu
,
false
,
vtimer
);
kvm_vtimer_update_mask_user
(
vcpu
);
}
if
(
!
kvm_timer_should_fire
(
vtimer
))
{
kvm_timer_update_irq
(
vcpu
,
false
,
vtimer
);
if
(
static_branch_likely
(
&
has_gic_active_state
))
set_vtimer_irq_phys_active
(
vcpu
,
false
);
else
enable_percpu_irq
(
host_vtimer_irq
,
host_vtimer_irq_flags
);
}
}
void
kvm_timer_sync_hwstate
(
struct
kvm_vcpu
*
vcpu
)
{
unmask_vtimer_irq_user
(
vcpu
);
struct
arch_timer_cpu
*
timer
=
&
vcpu
->
arch
.
timer_cpu
;
if
(
unlikely
(
!
timer
->
enabled
))
return
;
if
(
unlikely
(
!
irqchip_in_kernel
(
vcpu
->
kvm
)))
unmask_vtimer_irq_user
(
vcpu
);
}
int
kvm_timer_vcpu_reset
(
struct
kvm_vcpu
*
vcpu
)
{
struct
arch_timer_cpu
*
timer
=
&
vcpu
->
arch
.
timer_cpu
;
struct
arch_timer_context
*
vtimer
=
vcpu_vtimer
(
vcpu
);
struct
arch_timer_context
*
ptimer
=
vcpu_ptimer
(
vcpu
);
...
...
@@ -586,6 +597,9 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
ptimer
->
cnt_ctl
=
0
;
kvm_timer_update_state
(
vcpu
);
if
(
timer
->
enabled
&&
irqchip_in_kernel
(
vcpu
->
kvm
))
kvm_vgic_reset_mapped_irq
(
vcpu
,
vtimer
->
irq
.
irq
);
return
0
;
}
...
...
@@ -755,9 +769,11 @@ int kvm_timer_hyp_init(bool has_gic)
kvm_err
(
"kvm_arch_timer: error setting vcpu affinity
\n
"
);
goto
out_free_irq
;
}
static_branch_enable
(
&
has_gic_active_state
);
}
kvm_
info
(
"virtual timer IRQ%d
\n
"
,
host_vtimer_irq
);
kvm_
debug
(
"virtual timer IRQ%d
\n
"
,
host_vtimer_irq
);
cpuhp_setup_state
(
CPUHP_AP_KVM_ARM_TIMER_STARTING
,
"kvm/arm/timer:starting"
,
kvm_timer_starting_cpu
,
...
...
virt/kvm/arm/hyp/vgic-v3-sr.c
View file @
5fbb0df6
...
...
@@ -216,8 +216,10 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
* LRs, and when reading back the VMCR on non-VHE systems.
*/
if
(
used_lrs
||
!
has_vhe
())
{
if
(
!
cpu_if
->
vgic_sre
)
dsb
(
st
);
if
(
!
cpu_if
->
vgic_sre
)
{
dsb
(
sy
);
isb
();
}
}
if
(
used_lrs
)
{
...
...
virt/kvm/arm/vgic/vgic-mmio.c
View file @
5fbb0df6
...
...
@@ -113,9 +113,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
/* Loop over all IRQs affected by this read */
for
(
i
=
0
;
i
<
len
*
8
;
i
++
)
{
struct
vgic_irq
*
irq
=
vgic_get_irq
(
vcpu
->
kvm
,
vcpu
,
intid
+
i
);
unsigned
long
flags
;
spin_lock_irqsave
(
&
irq
->
irq_lock
,
flags
);
if
(
irq_is_pending
(
irq
))
value
|=
(
1U
<<
i
);
spin_unlock_irqrestore
(
&
irq
->
irq_lock
,
flags
);
vgic_put_irq
(
vcpu
->
kvm
,
irq
);
}
...
...
virt/kvm/arm/vgic/vgic-v2.c
View file @
5fbb0df6
...
...
@@ -37,6 +37,13 @@ void vgic_v2_init_lrs(void)
vgic_v2_write_lr
(
i
,
0
);
}
void
vgic_v2_set_npie
(
struct
kvm_vcpu
*
vcpu
)
{
struct
vgic_v2_cpu_if
*
cpuif
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v2
;
cpuif
->
vgic_hcr
|=
GICH_HCR_NPIE
;
}
void
vgic_v2_set_underflow
(
struct
kvm_vcpu
*
vcpu
)
{
struct
vgic_v2_cpu_if
*
cpuif
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v2
;
...
...
@@ -64,7 +71,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
int
lr
;
unsigned
long
flags
;
cpuif
->
vgic_hcr
&=
~
GICH_HCR_UIE
;
cpuif
->
vgic_hcr
&=
~
(
GICH_HCR_UIE
|
GICH_HCR_NPIE
)
;
for
(
lr
=
0
;
lr
<
vgic_cpu
->
used_lrs
;
lr
++
)
{
u32
val
=
cpuif
->
vgic_lr
[
lr
];
...
...
@@ -396,7 +403,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
kvm_vgic_global_state
.
type
=
VGIC_V2
;
kvm_vgic_global_state
.
max_gic_vcpus
=
VGIC_V2_MAX_CPUS
;
kvm_
info
(
"vgic-v2@%llx
\n
"
,
info
->
vctrl
.
start
);
kvm_
debug
(
"vgic-v2@%llx
\n
"
,
info
->
vctrl
.
start
);
return
0
;
out:
...
...
virt/kvm/arm/vgic/vgic-v3.c
View file @
5fbb0df6
...
...
@@ -27,6 +27,13 @@ static bool group1_trap;
static
bool
common_trap
;
static
bool
gicv4_enable
;
void
vgic_v3_set_npie
(
struct
kvm_vcpu
*
vcpu
)
{
struct
vgic_v3_cpu_if
*
cpuif
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v3
;
cpuif
->
vgic_hcr
|=
ICH_HCR_NPIE
;
}
void
vgic_v3_set_underflow
(
struct
kvm_vcpu
*
vcpu
)
{
struct
vgic_v3_cpu_if
*
cpuif
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v3
;
...
...
@@ -48,7 +55,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
int
lr
;
unsigned
long
flags
;
cpuif
->
vgic_hcr
&=
~
ICH_HCR_UIE
;
cpuif
->
vgic_hcr
&=
~
(
ICH_HCR_UIE
|
ICH_HCR_NPIE
)
;
for
(
lr
=
0
;
lr
<
vgic_cpu
->
used_lrs
;
lr
++
)
{
u64
val
=
cpuif
->
vgic_lr
[
lr
];
...
...
virt/kvm/arm/vgic/vgic.c
View file @
5fbb0df6
...
...
@@ -496,6 +496,32 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
return
ret
;
}
/**
* kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
* @vcpu: The VCPU pointer
* @vintid: The INTID of the interrupt
*
* Reset the active and pending states of a mapped interrupt. Kernel
* subsystems injecting mapped interrupts should reset their interrupt lines
* when we are doing a reset of the VM.
*/
void
kvm_vgic_reset_mapped_irq
(
struct
kvm_vcpu
*
vcpu
,
u32
vintid
)
{
struct
vgic_irq
*
irq
=
vgic_get_irq
(
vcpu
->
kvm
,
vcpu
,
vintid
);
unsigned
long
flags
;
if
(
!
irq
->
hw
)
goto
out
;
spin_lock_irqsave
(
&
irq
->
irq_lock
,
flags
);
irq
->
active
=
false
;
irq
->
pending_latch
=
false
;
irq
->
line_level
=
false
;
spin_unlock_irqrestore
(
&
irq
->
irq_lock
,
flags
);
out:
vgic_put_irq
(
vcpu
->
kvm
,
irq
);
}
int
kvm_vgic_unmap_phys_irq
(
struct
kvm_vcpu
*
vcpu
,
unsigned
int
vintid
)
{
struct
vgic_irq
*
irq
;
...
...
@@ -685,22 +711,37 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
vgic_v3_set_underflow
(
vcpu
);
}
static
inline
void
vgic_set_npie
(
struct
kvm_vcpu
*
vcpu
)
{
if
(
kvm_vgic_global_state
.
type
==
VGIC_V2
)
vgic_v2_set_npie
(
vcpu
);
else
vgic_v3_set_npie
(
vcpu
);
}
/* Requires the ap_list_lock to be held. */
static
int
compute_ap_list_depth
(
struct
kvm_vcpu
*
vcpu
)
static
int
compute_ap_list_depth
(
struct
kvm_vcpu
*
vcpu
,
bool
*
multi_sgi
)
{
struct
vgic_cpu
*
vgic_cpu
=
&
vcpu
->
arch
.
vgic_cpu
;
struct
vgic_irq
*
irq
;
int
count
=
0
;
*
multi_sgi
=
false
;
DEBUG_SPINLOCK_BUG_ON
(
!
spin_is_locked
(
&
vgic_cpu
->
ap_list_lock
));
list_for_each_entry
(
irq
,
&
vgic_cpu
->
ap_list_head
,
ap_list
)
{
spin_lock
(
&
irq
->
irq_lock
);
/* GICv2 SGIs can count for more than one... */
if
(
vgic_irq_is_sgi
(
irq
->
intid
)
&&
irq
->
source
)
count
+=
hweight8
(
irq
->
source
);
else
if
(
vgic_irq_is_sgi
(
irq
->
intid
)
&&
irq
->
source
)
{
int
w
=
hweight8
(
irq
->
source
);
count
+=
w
;
*
multi_sgi
|=
(
w
>
1
);
}
else
{
count
++
;
}
spin_unlock
(
&
irq
->
irq_lock
);
}
return
count
;
...
...
@@ -711,28 +752,43 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
{
struct
vgic_cpu
*
vgic_cpu
=
&
vcpu
->
arch
.
vgic_cpu
;
struct
vgic_irq
*
irq
;
int
count
=
0
;
int
count
;
bool
npie
=
false
;
bool
multi_sgi
;
u8
prio
=
0xff
;
DEBUG_SPINLOCK_BUG_ON
(
!
spin_is_locked
(
&
vgic_cpu
->
ap_list_lock
));
if
(
compute_ap_list_depth
(
vcpu
)
>
kvm_vgic_global_state
.
nr_lr
)
count
=
compute_ap_list_depth
(
vcpu
,
&
multi_sgi
);
if
(
count
>
kvm_vgic_global_state
.
nr_lr
||
multi_sgi
)
vgic_sort_ap_list
(
vcpu
);
count
=
0
;
list_for_each_entry
(
irq
,
&
vgic_cpu
->
ap_list_head
,
ap_list
)
{
spin_lock
(
&
irq
->
irq_lock
);
if
(
unlikely
(
vgic_target_oracle
(
irq
)
!=
vcpu
))
goto
next
;
/*
* If we get an SGI with multiple sources, try to get
* them in all at once.
* If we have multi-SGIs in the pipeline, we need to
* guarantee that they are all seen before any IRQ of
* lower priority. In that case, we need to filter out
* these interrupts by exiting early. This is easy as
* the AP list has been sorted already.
*/
do
{
if
(
multi_sgi
&&
irq
->
priority
>
prio
)
{
spin_unlock
(
&
irq
->
irq_lock
);
break
;
}
if
(
likely
(
vgic_target_oracle
(
irq
)
==
vcpu
))
{
vgic_populate_lr
(
vcpu
,
irq
,
count
++
);
}
while
(
irq
->
source
&&
count
<
kvm_vgic_global_state
.
nr_lr
);
next:
if
(
irq
->
source
)
{
npie
=
true
;
prio
=
irq
->
priority
;
}
}
spin_unlock
(
&
irq
->
irq_lock
);
if
(
count
==
kvm_vgic_global_state
.
nr_lr
)
{
...
...
@@ -743,6 +799,9 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
}
}
if
(
npie
)
vgic_set_npie
(
vcpu
);
vcpu
->
arch
.
vgic_cpu
.
used_lrs
=
count
;
/* Nuke remaining LRs */
...
...
virt/kvm/arm/vgic/vgic.h
View file @
5fbb0df6
...
...
@@ -96,6 +96,7 @@
/* we only support 64 kB translation table page size */
#define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16)
/* Requires the irq_lock to be held by the caller. */
static
inline
bool
irq_is_pending
(
struct
vgic_irq
*
irq
)
{
if
(
irq
->
config
==
VGIC_CONFIG_EDGE
)
...
...
@@ -159,6 +160,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
void
vgic_v2_populate_lr
(
struct
kvm_vcpu
*
vcpu
,
struct
vgic_irq
*
irq
,
int
lr
);
void
vgic_v2_clear_lr
(
struct
kvm_vcpu
*
vcpu
,
int
lr
);
void
vgic_v2_set_underflow
(
struct
kvm_vcpu
*
vcpu
);
void
vgic_v2_set_npie
(
struct
kvm_vcpu
*
vcpu
);
int
vgic_v2_has_attr_regs
(
struct
kvm_device
*
dev
,
struct
kvm_device_attr
*
attr
);
int
vgic_v2_dist_uaccess
(
struct
kvm_vcpu
*
vcpu
,
bool
is_write
,
int
offset
,
u32
*
val
);
...
...
@@ -191,6 +193,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
void
vgic_v3_populate_lr
(
struct
kvm_vcpu
*
vcpu
,
struct
vgic_irq
*
irq
,
int
lr
);
void
vgic_v3_clear_lr
(
struct
kvm_vcpu
*
vcpu
,
int
lr
);
void
vgic_v3_set_underflow
(
struct
kvm_vcpu
*
vcpu
);
void
vgic_v3_set_npie
(
struct
kvm_vcpu
*
vcpu
);
void
vgic_v3_set_vmcr
(
struct
kvm_vcpu
*
vcpu
,
struct
vgic_vmcr
*
vmcr
);
void
vgic_v3_get_vmcr
(
struct
kvm_vcpu
*
vcpu
,
struct
vgic_vmcr
*
vmcr
);
void
vgic_v3_enable
(
struct
kvm_vcpu
*
vcpu
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment