Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
771df8cf
Commit
771df8cf
authored
Mar 24, 2020
by
Marc Zyngier
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'irq/gic-v4.1' into irq/irqchip-next
Signed-off-by:
Marc Zyngier
<
maz@kernel.org
>
parents
00760d3c
009384b3
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
600 additions
and
55 deletions
+600
-55
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3-its.c
+400
-22
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic-v3.c
+11
-2
drivers/irqchip/irq-gic-v4.c
drivers/irqchip/irq-gic-v4.c
+127
-7
include/kvm/arm_vgic.h
include/kvm/arm_vgic.h
+1
-0
include/linux/irqchip/arm-gic-common.h
include/linux/irqchip/arm-gic-common.h
+2
-0
include/linux/irqchip/arm-gic-v3.h
include/linux/irqchip/arm-gic-v3.h
+19
-1
include/linux/irqchip/arm-gic-v4.h
include/linux/irqchip/arm-gic-v4.h
+23
-2
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic-v3.c
+3
-1
virt/kvm/arm/vgic/vgic-v4.c
virt/kvm/arm/vgic/vgic-v4.c
+14
-20
No files found.
drivers/irqchip/irq-gic-v3-its.c
View file @
771df8cf
...
...
@@ -96,6 +96,7 @@ struct its_node {
struct
mutex
dev_alloc_lock
;
struct
list_head
entry
;
void
__iomem
*
base
;
void
__iomem
*
sgir_base
;
phys_addr_t
phys_base
;
struct
its_cmd_block
*
cmd_base
;
struct
its_cmd_block
*
cmd_write
;
...
...
@@ -188,6 +189,15 @@ static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
/*
* Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
* always have vSGIs mapped.
*/
static
bool
require_its_list_vmovp
(
struct
its_vm
*
vm
,
struct
its_node
*
its
)
{
return
(
gic_rdists
->
has_rvpeid
||
vm
->
vlpi_count
[
its
->
list_nr
]);
}
static
u16
get_its_list
(
struct
its_vm
*
vm
)
{
struct
its_node
*
its
;
...
...
@@ -197,7 +207,7 @@ static u16 get_its_list(struct its_vm *vm)
if
(
!
is_v4
(
its
))
continue
;
if
(
vm
->
vlpi_count
[
its
->
list_nr
]
)
if
(
require_its_list_vmovp
(
vm
,
its
)
)
__set_bit
(
its
->
list_nr
,
&
its_list
);
}
...
...
@@ -239,15 +249,41 @@ static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
return
NULL
;
}
static
int
irq_to_cpuid
(
struct
irq_data
*
d
)
static
int
vpe_to_cpuid_lock
(
struct
its_vpe
*
vpe
,
unsigned
long
*
flags
)
{
raw_spin_lock_irqsave
(
&
vpe
->
vpe_lock
,
*
flags
);
return
vpe
->
col_idx
;
}
static
void
vpe_to_cpuid_unlock
(
struct
its_vpe
*
vpe
,
unsigned
long
flags
)
{
raw_spin_unlock_irqrestore
(
&
vpe
->
vpe_lock
,
flags
);
}
static
int
irq_to_cpuid_lock
(
struct
irq_data
*
d
,
unsigned
long
*
flags
)
{
struct
its_vlpi_map
*
map
=
get_vlpi_map
(
d
);
int
cpu
;
if
(
map
)
{
cpu
=
vpe_to_cpuid_lock
(
map
->
vpe
,
flags
);
}
else
{
/* Physical LPIs are already locked via the irq_desc lock */
struct
its_device
*
its_dev
=
irq_data_get_irq_chip_data
(
d
);
cpu
=
its_dev
->
event_map
.
col_map
[
its_get_event_id
(
d
)];
/* Keep GCC quiet... */
*
flags
=
0
;
}
return
cpu
;
}
static
void
irq_to_cpuid_unlock
(
struct
irq_data
*
d
,
unsigned
long
flags
)
{
struct
its_vlpi_map
*
map
=
get_vlpi_map
(
d
);
if
(
map
)
return
map
->
vpe
->
col_idx
;
return
its_dev
->
event_map
.
col_map
[
its_get_event_id
(
d
)];
vpe_to_cpuid_unlock
(
map
->
vpe
,
flags
);
}
static
struct
its_collection
*
valid_col
(
struct
its_collection
*
col
)
...
...
@@ -353,6 +389,15 @@ struct its_cmd_desc {
struct
{
struct
its_vpe
*
vpe
;
}
its_invdb_cmd
;
struct
{
struct
its_vpe
*
vpe
;
u8
sgi
;
u8
priority
;
bool
enable
;
bool
group
;
bool
clear
;
}
its_vsgi_cmd
;
};
};
...
...
@@ -501,6 +546,31 @@ static void its_encode_db(struct its_cmd_block *cmd, bool db)
its_mask_encode
(
&
cmd
->
raw_cmd
[
2
],
db
,
63
,
63
);
}
static
void
its_encode_sgi_intid
(
struct
its_cmd_block
*
cmd
,
u8
sgi
)
{
its_mask_encode
(
&
cmd
->
raw_cmd
[
0
],
sgi
,
35
,
32
);
}
static
void
its_encode_sgi_priority
(
struct
its_cmd_block
*
cmd
,
u8
prio
)
{
its_mask_encode
(
&
cmd
->
raw_cmd
[
0
],
prio
>>
4
,
23
,
20
);
}
static
void
its_encode_sgi_group
(
struct
its_cmd_block
*
cmd
,
bool
grp
)
{
its_mask_encode
(
&
cmd
->
raw_cmd
[
0
],
grp
,
10
,
10
);
}
static
void
its_encode_sgi_clear
(
struct
its_cmd_block
*
cmd
,
bool
clr
)
{
its_mask_encode
(
&
cmd
->
raw_cmd
[
0
],
clr
,
9
,
9
);
}
static
void
its_encode_sgi_enable
(
struct
its_cmd_block
*
cmd
,
bool
en
)
{
its_mask_encode
(
&
cmd
->
raw_cmd
[
0
],
en
,
8
,
8
);
}
static
inline
void
its_fixup_cmd
(
struct
its_cmd_block
*
cmd
)
{
/* Let's fixup BE commands */
...
...
@@ -866,6 +936,26 @@ static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
return
valid_vpe
(
its
,
desc
->
its_invdb_cmd
.
vpe
);
}
static
struct
its_vpe
*
its_build_vsgi_cmd
(
struct
its_node
*
its
,
struct
its_cmd_block
*
cmd
,
struct
its_cmd_desc
*
desc
)
{
if
(
WARN_ON
(
!
is_v4_1
(
its
)))
return
NULL
;
its_encode_cmd
(
cmd
,
GITS_CMD_VSGI
);
its_encode_vpeid
(
cmd
,
desc
->
its_vsgi_cmd
.
vpe
->
vpe_id
);
its_encode_sgi_intid
(
cmd
,
desc
->
its_vsgi_cmd
.
sgi
);
its_encode_sgi_priority
(
cmd
,
desc
->
its_vsgi_cmd
.
priority
);
its_encode_sgi_group
(
cmd
,
desc
->
its_vsgi_cmd
.
group
);
its_encode_sgi_clear
(
cmd
,
desc
->
its_vsgi_cmd
.
clear
);
its_encode_sgi_enable
(
cmd
,
desc
->
its_vsgi_cmd
.
enable
);
its_fixup_cmd
(
cmd
);
return
valid_vpe
(
its
,
desc
->
its_vsgi_cmd
.
vpe
);
}
static
u64
its_cmd_ptr_to_offset
(
struct
its_node
*
its
,
struct
its_cmd_block
*
ptr
)
{
...
...
@@ -1214,7 +1304,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
if
(
!
is_v4
(
its
))
continue
;
if
(
!
vpe
->
its_vm
->
vlpi_count
[
its
->
list_nr
]
)
if
(
!
require_its_list_vmovp
(
vpe
->
its_vm
,
its
)
)
continue
;
desc
.
its_vmovp_cmd
.
col
=
&
its
->
collections
[
col_id
];
...
...
@@ -1329,7 +1419,9 @@ static void direct_lpi_inv(struct irq_data *d)
{
struct
its_vlpi_map
*
map
=
get_vlpi_map
(
d
);
void
__iomem
*
rdbase
;
unsigned
long
flags
;
u64
val
;
int
cpu
;
if
(
map
)
{
struct
its_device
*
its_dev
=
irq_data_get_irq_chip_data
(
d
);
...
...
@@ -1344,10 +1436,14 @@ static void direct_lpi_inv(struct irq_data *d)
}
/* Target the redistributor this LPI is currently routed to */
rdbase
=
per_cpu_ptr
(
gic_rdists
->
rdist
,
irq_to_cpuid
(
d
))
->
rd_base
;
cpu
=
irq_to_cpuid_lock
(
d
,
&
flags
);
raw_spin_lock
(
&
gic_data_rdist_cpu
(
cpu
)
->
rd_lock
);
rdbase
=
per_cpu_ptr
(
gic_rdists
->
rdist
,
cpu
)
->
rd_base
;
gic_write_lpir
(
val
,
rdbase
+
GICR_INVLPIR
);
wait_for_syncr
(
rdbase
);
raw_spin_unlock
(
&
gic_data_rdist_cpu
(
cpu
)
->
rd_lock
);
irq_to_cpuid_unlock
(
d
,
flags
);
}
static
void
lpi_update_config
(
struct
irq_data
*
d
,
u8
clr
,
u8
set
)
...
...
@@ -1499,12 +1595,31 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
return
0
;
}
/*
* Two favourable cases:
*
* (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
* for vSGI delivery
*
* (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
* and we're better off mapping all VPEs always
*
* If neither (a) nor (b) is true, then we map vPEs on demand.
*
*/
static
bool
gic_requires_eager_mapping
(
void
)
{
if
(
!
its_list_map
||
gic_rdists
->
has_rvpeid
)
return
true
;
return
false
;
}
static
void
its_map_vm
(
struct
its_node
*
its
,
struct
its_vm
*
vm
)
{
unsigned
long
flags
;
/* Not using the ITS list? Everything is always mapped. */
if
(
!
its_list_map
)
if
(
gic_requires_eager_mapping
())
return
;
raw_spin_lock_irqsave
(
&
vmovp_lock
,
flags
);
...
...
@@ -1538,7 +1653,7 @@ static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
unsigned
long
flags
;
/* Not using the ITS list? Everything is always mapped. */
if
(
!
its_list_map
)
if
(
gic_requires_eager_mapping
()
)
return
;
raw_spin_lock_irqsave
(
&
vmovp_lock
,
flags
);
...
...
@@ -2484,6 +2599,10 @@ static bool allocate_vpe_l2_table(int cpu, u32 id)
if
(
!
gic_rdists
->
has_rvpeid
)
return
true
;
/* Skip non-present CPUs */
if
(
!
base
)
return
true
;
val
=
gicr_read_vpropbaser
(
base
+
SZ_128K
+
GICR_VPROPBASER
);
esz
=
FIELD_GET
(
GICR_VPROPBASER_4_1_ENTRY_SIZE
,
val
)
+
1
;
...
...
@@ -3514,17 +3633,25 @@ static int its_vpe_set_affinity(struct irq_data *d,
{
struct
its_vpe
*
vpe
=
irq_data_get_irq_chip_data
(
d
);
int
from
,
cpu
=
cpumask_first
(
mask_val
);
unsigned
long
flags
;
/*
* Changing affinity is mega expensive, so let's be as lazy as
* we can and only do it if we really have to. Also, if mapped
* into the proxy device, we need to move the doorbell
* interrupt to its new location.
*
* Another thing is that changing the affinity of a vPE affects
* *other interrupts* such as all the vLPIs that are routed to
* this vPE. This means that the irq_desc lock is not enough to
* protect us, and that we must ensure nobody samples vpe->col_idx
* during the update, hence the lock below which must also be
* taken on any vLPI handling path that evaluates vpe->col_idx.
*/
if
(
vpe
->
col_idx
==
cpu
)
from
=
vpe_to_cpuid_lock
(
vpe
,
&
flags
);
if
(
from
==
cpu
)
goto
out
;
from
=
vpe
->
col_idx
;
vpe
->
col_idx
=
cpu
;
/*
...
...
@@ -3540,6 +3667,7 @@ static int its_vpe_set_affinity(struct irq_data *d,
out:
irq_data_update_effective_affinity
(
d
,
cpumask_of
(
cpu
));
vpe_to_cpuid_unlock
(
vpe
,
flags
);
return
IRQ_SET_MASK_OK_DONE
;
}
...
...
@@ -3651,9 +3779,11 @@ static void its_vpe_send_inv(struct irq_data *d)
void
__iomem
*
rdbase
;
/* Target the redistributor this VPE is currently known on */
raw_spin_lock
(
&
gic_data_rdist_cpu
(
vpe
->
col_idx
)
->
rd_lock
);
rdbase
=
per_cpu_ptr
(
gic_rdists
->
rdist
,
vpe
->
col_idx
)
->
rd_base
;
gic_write_lpir
(
d
->
parent_data
->
hwirq
,
rdbase
+
GICR_INVLPIR
);
wait_for_syncr
(
rdbase
);
raw_spin_unlock
(
&
gic_data_rdist_cpu
(
vpe
->
col_idx
)
->
rd_lock
);
}
else
{
its_vpe_send_cmd
(
vpe
,
its_send_inv
);
}
...
...
@@ -3820,8 +3950,12 @@ static void its_vpe_4_1_invall(struct its_vpe *vpe)
val
|=
FIELD_PREP
(
GICR_INVALLR_VPEID
,
vpe
->
vpe_id
);
/* Target the redistributor this vPE is currently known on */
raw_spin_lock
(
&
gic_data_rdist_cpu
(
vpe
->
col_idx
)
->
rd_lock
);
rdbase
=
per_cpu_ptr
(
gic_rdists
->
rdist
,
vpe
->
col_idx
)
->
rd_base
;
gic_write_lpir
(
val
,
rdbase
+
GICR_INVALLR
);
wait_for_syncr
(
rdbase
);
raw_spin_unlock
(
&
gic_data_rdist_cpu
(
vpe
->
col_idx
)
->
rd_lock
);
}
static
int
its_vpe_4_1_set_vcpu_affinity
(
struct
irq_data
*
d
,
void
*
vcpu_info
)
...
...
@@ -3856,6 +3990,221 @@ static struct irq_chip its_vpe_4_1_irq_chip = {
.
irq_set_vcpu_affinity
=
its_vpe_4_1_set_vcpu_affinity
,
};
static
void
its_configure_sgi
(
struct
irq_data
*
d
,
bool
clear
)
{
struct
its_vpe
*
vpe
=
irq_data_get_irq_chip_data
(
d
);
struct
its_cmd_desc
desc
;
desc
.
its_vsgi_cmd
.
vpe
=
vpe
;
desc
.
its_vsgi_cmd
.
sgi
=
d
->
hwirq
;
desc
.
its_vsgi_cmd
.
priority
=
vpe
->
sgi_config
[
d
->
hwirq
].
priority
;
desc
.
its_vsgi_cmd
.
enable
=
vpe
->
sgi_config
[
d
->
hwirq
].
enabled
;
desc
.
its_vsgi_cmd
.
group
=
vpe
->
sgi_config
[
d
->
hwirq
].
group
;
desc
.
its_vsgi_cmd
.
clear
=
clear
;
/*
* GICv4.1 allows us to send VSGI commands to any ITS as long as the
* destination VPE is mapped there. Since we map them eagerly at
* activation time, we're pretty sure the first GICv4.1 ITS will do.
*/
its_send_single_vcommand
(
find_4_1_its
(),
its_build_vsgi_cmd
,
&
desc
);
}
static
void
its_sgi_mask_irq
(
struct
irq_data
*
d
)
{
struct
its_vpe
*
vpe
=
irq_data_get_irq_chip_data
(
d
);
vpe
->
sgi_config
[
d
->
hwirq
].
enabled
=
false
;
its_configure_sgi
(
d
,
false
);
}
static
void
its_sgi_unmask_irq
(
struct
irq_data
*
d
)
{
struct
its_vpe
*
vpe
=
irq_data_get_irq_chip_data
(
d
);
vpe
->
sgi_config
[
d
->
hwirq
].
enabled
=
true
;
its_configure_sgi
(
d
,
false
);
}
static
int
its_sgi_set_affinity
(
struct
irq_data
*
d
,
const
struct
cpumask
*
mask_val
,
bool
force
)
{
/*
* There is no notion of affinity for virtual SGIs, at least
* not on the host (since they can only be targetting a vPE).
* Tell the kernel we've done whatever it asked for.
*/
return
IRQ_SET_MASK_OK
;
}
static
int
its_sgi_set_irqchip_state
(
struct
irq_data
*
d
,
enum
irqchip_irq_state
which
,
bool
state
)
{
if
(
which
!=
IRQCHIP_STATE_PENDING
)
return
-
EINVAL
;
if
(
state
)
{
struct
its_vpe
*
vpe
=
irq_data_get_irq_chip_data
(
d
);
struct
its_node
*
its
=
find_4_1_its
();
u64
val
;
val
=
FIELD_PREP
(
GITS_SGIR_VPEID
,
vpe
->
vpe_id
);
val
|=
FIELD_PREP
(
GITS_SGIR_VINTID
,
d
->
hwirq
);
writeq_relaxed
(
val
,
its
->
sgir_base
+
GITS_SGIR
-
SZ_128K
);
}
else
{
its_configure_sgi
(
d
,
true
);
}
return
0
;
}
static
int
its_sgi_get_irqchip_state
(
struct
irq_data
*
d
,
enum
irqchip_irq_state
which
,
bool
*
val
)
{
struct
its_vpe
*
vpe
=
irq_data_get_irq_chip_data
(
d
);
void
__iomem
*
base
;
unsigned
long
flags
;
u32
count
=
1000000
;
/* 1s! */
u32
status
;
int
cpu
;
if
(
which
!=
IRQCHIP_STATE_PENDING
)
return
-
EINVAL
;
/*
* Locking galore! We can race against two different events:
*
* - Concurent vPE affinity change: we must make sure it cannot
* happen, or we'll talk to the wrong redistributor. This is
* identical to what happens with vLPIs.
*
* - Concurrent VSGIPENDR access: As it involves accessing two
* MMIO registers, this must be made atomic one way or another.
*/
cpu
=
vpe_to_cpuid_lock
(
vpe
,
&
flags
);
raw_spin_lock
(
&
gic_data_rdist_cpu
(
cpu
)
->
rd_lock
);
base
=
gic_data_rdist_cpu
(
cpu
)
->
rd_base
+
SZ_128K
;
writel_relaxed
(
vpe
->
vpe_id
,
base
+
GICR_VSGIR
);
do
{
status
=
readl_relaxed
(
base
+
GICR_VSGIPENDR
);
if
(
!
(
status
&
GICR_VSGIPENDR_BUSY
))
goto
out
;
count
--
;
if
(
!
count
)
{
pr_err_ratelimited
(
"Unable to get SGI status
\n
"
);
goto
out
;
}
cpu_relax
();
udelay
(
1
);
}
while
(
count
);
out:
raw_spin_unlock
(
&
gic_data_rdist_cpu
(
cpu
)
->
rd_lock
);
vpe_to_cpuid_unlock
(
vpe
,
flags
);
if
(
!
count
)
return
-
ENXIO
;
*
val
=
!!
(
status
&
(
1
<<
d
->
hwirq
));
return
0
;
}
static
int
its_sgi_set_vcpu_affinity
(
struct
irq_data
*
d
,
void
*
vcpu_info
)
{
struct
its_vpe
*
vpe
=
irq_data_get_irq_chip_data
(
d
);
struct
its_cmd_info
*
info
=
vcpu_info
;
switch
(
info
->
cmd_type
)
{
case
PROP_UPDATE_VSGI
:
vpe
->
sgi_config
[
d
->
hwirq
].
priority
=
info
->
priority
;
vpe
->
sgi_config
[
d
->
hwirq
].
group
=
info
->
group
;
its_configure_sgi
(
d
,
false
);
return
0
;
default:
return
-
EINVAL
;
}
}
static
struct
irq_chip
its_sgi_irq_chip
=
{
.
name
=
"GICv4.1-sgi"
,
.
irq_mask
=
its_sgi_mask_irq
,
.
irq_unmask
=
its_sgi_unmask_irq
,
.
irq_set_affinity
=
its_sgi_set_affinity
,
.
irq_set_irqchip_state
=
its_sgi_set_irqchip_state
,
.
irq_get_irqchip_state
=
its_sgi_get_irqchip_state
,
.
irq_set_vcpu_affinity
=
its_sgi_set_vcpu_affinity
,
};
static
int
its_sgi_irq_domain_alloc
(
struct
irq_domain
*
domain
,
unsigned
int
virq
,
unsigned
int
nr_irqs
,
void
*
args
)
{
struct
its_vpe
*
vpe
=
args
;
int
i
;
/* Yes, we do want 16 SGIs */
WARN_ON
(
nr_irqs
!=
16
);
for
(
i
=
0
;
i
<
16
;
i
++
)
{
vpe
->
sgi_config
[
i
].
priority
=
0
;
vpe
->
sgi_config
[
i
].
enabled
=
false
;
vpe
->
sgi_config
[
i
].
group
=
false
;
irq_domain_set_hwirq_and_chip
(
domain
,
virq
+
i
,
i
,
&
its_sgi_irq_chip
,
vpe
);
irq_set_status_flags
(
virq
+
i
,
IRQ_DISABLE_UNLAZY
);
}
return
0
;
}
static
void
its_sgi_irq_domain_free
(
struct
irq_domain
*
domain
,
unsigned
int
virq
,
unsigned
int
nr_irqs
)
{
/* Nothing to do */
}
static
int
its_sgi_irq_domain_activate
(
struct
irq_domain
*
domain
,
struct
irq_data
*
d
,
bool
reserve
)
{
/* Write out the initial SGI configuration */
its_configure_sgi
(
d
,
false
);
return
0
;
}
static
void
its_sgi_irq_domain_deactivate
(
struct
irq_domain
*
domain
,
struct
irq_data
*
d
)
{
struct
its_vpe
*
vpe
=
irq_data_get_irq_chip_data
(
d
);
/*
* The VSGI command is awkward:
*
* - To change the configuration, CLEAR must be set to false,
* leaving the pending bit unchanged.
* - To clear the pending bit, CLEAR must be set to true, leaving
* the configuration unchanged.
*
* You just can't do both at once, hence the two commands below.
*/
vpe
->
sgi_config
[
d
->
hwirq
].
enabled
=
false
;
its_configure_sgi
(
d
,
false
);
its_configure_sgi
(
d
,
true
);
}
static
const
struct
irq_domain_ops
its_sgi_domain_ops
=
{
.
alloc
=
its_sgi_irq_domain_alloc
,
.
free
=
its_sgi_irq_domain_free
,
.
activate
=
its_sgi_irq_domain_activate
,
.
deactivate
=
its_sgi_irq_domain_deactivate
,
};
static
int
its_vpe_id_alloc
(
void
)
{
return
ida_simple_get
(
&
its_vpeid_ida
,
0
,
ITS_MAX_VPEID
,
GFP_KERNEL
);
...
...
@@ -3889,6 +4238,7 @@ static int its_vpe_init(struct its_vpe *vpe)
return
-
ENOMEM
;
}
raw_spin_lock_init
(
&
vpe
->
vpe_lock
);
vpe
->
vpe_id
=
vpe_id
;
vpe
->
vpt_page
=
vpt_page
;
if
(
gic_rdists
->
has_rvpeid
)
...
...
@@ -3998,8 +4348,12 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
struct
its_vpe
*
vpe
=
irq_data_get_irq_chip_data
(
d
);
struct
its_node
*
its
;
/* If we use the list map, we issue VMAPP on demand... */
if
(
its_list_map
)
/*
* If we use the list map, we issue VMAPP on demand... Unless
* we're on a GICv4.1 and we eagerly map the VPE on all ITSs
* so that VSGIs can work.
*/
if
(
!
gic_requires_eager_mapping
())
return
0
;
/* Map the VPE to the first possible CPU */
...
...
@@ -4025,10 +4379,10 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
struct
its_node
*
its
;
/*
* If we use the list map
, we unmap the VPE once no VLPIs are
* associated with the VM.
* If we use the list map
on GICv4.0, we unmap the VPE once no
*
VLPIs are
associated with the VM.
*/
if
(
its_list_map
)
if
(
!
gic_requires_eager_mapping
()
)
return
;
list_for_each_entry
(
its
,
&
its_nodes
,
entry
)
{
...
...
@@ -4442,7 +4796,7 @@ static int __init its_probe_one(struct resource *res,
struct
page
*
page
;
int
err
;
its_base
=
ioremap
(
res
->
start
,
resource_size
(
res
)
);
its_base
=
ioremap
(
res
->
start
,
SZ_64K
);
if
(
!
its_base
)
{
pr_warn
(
"ITS@%pa: Unable to map ITS registers
\n
"
,
&
res
->
start
);
return
-
ENOMEM
;
...
...
@@ -4493,6 +4847,13 @@ static int __init its_probe_one(struct resource *res,
if
(
is_v4_1
(
its
))
{
u32
svpet
=
FIELD_GET
(
GITS_TYPER_SVPET
,
typer
);
its
->
sgir_base
=
ioremap
(
res
->
start
+
SZ_128K
,
SZ_64K
);
if
(
!
its
->
sgir_base
)
{
err
=
-
ENOMEM
;
goto
out_free_its
;
}
its
->
mpidr
=
readl_relaxed
(
its_base
+
GITS_MPIDR
);
pr_info
(
"ITS@%pa: Using GICv4.1 mode %08x %08x
\n
"
,
...
...
@@ -4506,7 +4867,7 @@ static int __init its_probe_one(struct resource *res,
get_order
(
ITS_CMD_QUEUE_SZ
));
if
(
!
page
)
{
err
=
-
ENOMEM
;
goto
out_
free_its
;
goto
out_
unmap_sgir
;
}
its
->
cmd_base
=
(
void
*
)
page_address
(
page
);
its
->
cmd_write
=
its
->
cmd_base
;
...
...
@@ -4573,6 +4934,9 @@ static int __init its_probe_one(struct resource *res,
its_free_tables
(
its
);
out_free_cmd:
free_pages
((
unsigned
long
)
its
->
cmd_base
,
get_order
(
ITS_CMD_QUEUE_SZ
));
out_unmap_sgir:
if
(
its
->
sgir_base
)
iounmap
(
its
->
sgir_base
);
out_free_its:
kfree
(
its
);
out_unmap:
...
...
@@ -4856,6 +5220,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct
device_node
*
of_node
;
struct
its_node
*
its
;
bool
has_v4
=
false
;
bool
has_v4_1
=
false
;
int
err
;
gic_rdists
=
rdists
;
...
...
@@ -4876,12 +5241,25 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
if
(
err
)
return
err
;
list_for_each_entry
(
its
,
&
its_nodes
,
entry
)
list_for_each_entry
(
its
,
&
its_nodes
,
entry
)
{
has_v4
|=
is_v4
(
its
);
has_v4_1
|=
is_v4_1
(
its
);
}
/* Don't bother with inconsistent systems */
if
(
WARN_ON
(
!
has_v4_1
&&
rdists
->
has_rvpeid
))
rdists
->
has_rvpeid
=
false
;
if
(
has_v4
&
rdists
->
has_vlpis
)
{
const
struct
irq_domain_ops
*
sgi_ops
;
if
(
has_v4_1
)
sgi_ops
=
&
its_sgi_domain_ops
;
else
sgi_ops
=
NULL
;
if
(
its_init_vpe_domain
()
||
its_init_v4
(
parent_domain
,
&
its_vpe_domain_ops
))
{
its_init_v4
(
parent_domain
,
&
its_vpe_domain_ops
,
sgi_ops
))
{
rdists
->
has_vlpis
=
false
;
pr_err
(
"ITS: Disabling GICv4 support
\n
"
);
}
...
...
drivers/irqchip/irq-gic-v3.c
View file @
771df8cf
...
...
@@ -723,6 +723,7 @@ static void __init gic_dist_init(void)
unsigned
int
i
;
u64
affinity
;
void
__iomem
*
base
=
gic_data
.
dist_base
;
u32
val
;
/* Disable the distributor */
writel_relaxed
(
0
,
base
+
GICD_CTLR
);
...
...
@@ -755,9 +756,14 @@ static void __init gic_dist_init(void)
/* Now do the common stuff, and wait for the distributor to drain */
gic_dist_config
(
base
,
GIC_LINE_NR
,
gic_dist_wait_for_rwp
);
val
=
GICD_CTLR_ARE_NS
|
GICD_CTLR_ENABLE_G1A
|
GICD_CTLR_ENABLE_G1
;
if
(
gic_data
.
rdists
.
gicd_typer2
&
GICD_TYPER2_nASSGIcap
)
{
pr_info
(
"Enabling SGIs without active state
\n
"
);
val
|=
GICD_CTLR_nASSGIreq
;
}
/* Enable distributor with ARE, Group1 */
writel_relaxed
(
GICD_CTLR_ARE_NS
|
GICD_CTLR_ENABLE_G1A
|
GICD_CTLR_ENABLE_G1
,
base
+
GICD_CTLR
);
writel_relaxed
(
val
,
base
+
GICD_CTLR
);
/*
* Set all global interrupts to the boot CPU only. ARE must be
...
...
@@ -828,6 +834,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
typer
=
gic_read_typer
(
ptr
+
GICR_TYPER
);
if
((
typer
>>
32
)
==
aff
)
{
u64
offset
=
ptr
-
region
->
redist_base
;
raw_spin_lock_init
(
&
gic_data_rdist
()
->
rd_lock
);
gic_data_rdist_rd_base
()
=
ptr
;
gic_data_rdist
()
->
phys_base
=
region
->
phys_base
+
offset
;
...
...
@@ -1758,6 +1765,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
gic_v3_kvm_info
.
vcpu
=
r
;
gic_v3_kvm_info
.
has_v4
=
gic_data
.
rdists
.
has_vlpis
;
gic_v3_kvm_info
.
has_v4_1
=
gic_data
.
rdists
.
has_rvpeid
;
gic_set_kvm_info
(
&
gic_v3_kvm_info
);
}
...
...
@@ -2073,6 +2081,7 @@ static void __init gic_acpi_setup_kvm_info(void)
}
gic_v3_kvm_info
.
has_v4
=
gic_data
.
rdists
.
has_vlpis
;
gic_v3_kvm_info
.
has_v4_1
=
gic_data
.
rdists
.
has_rvpeid
;
gic_set_kvm_info
(
&
gic_v3_kvm_info
);
}
...
...
drivers/irqchip/irq-gic-v4.c
View file @
771df8cf
...
...
@@ -85,6 +85,53 @@
static
struct
irq_domain
*
gic_domain
;
static
const
struct
irq_domain_ops
*
vpe_domain_ops
;
static
const
struct
irq_domain_ops
*
sgi_domain_ops
;
static
bool
has_v4_1
(
void
)
{
return
!!
sgi_domain_ops
;
}
static
int
its_alloc_vcpu_sgis
(
struct
its_vpe
*
vpe
,
int
idx
)
{
char
*
name
;
int
sgi_base
;
if
(
!
has_v4_1
())
return
0
;
name
=
kasprintf
(
GFP_KERNEL
,
"GICv4-sgi-%d"
,
task_pid_nr
(
current
));
if
(
!
name
)
goto
err
;
vpe
->
fwnode
=
irq_domain_alloc_named_id_fwnode
(
name
,
idx
);
if
(
!
vpe
->
fwnode
)
goto
err
;
kfree
(
name
);
name
=
NULL
;
vpe
->
sgi_domain
=
irq_domain_create_linear
(
vpe
->
fwnode
,
16
,
sgi_domain_ops
,
vpe
);
if
(
!
vpe
->
sgi_domain
)
goto
err
;
sgi_base
=
__irq_domain_alloc_irqs
(
vpe
->
sgi_domain
,
-
1
,
16
,
NUMA_NO_NODE
,
vpe
,
false
,
NULL
);
if
(
sgi_base
<=
0
)
goto
err
;
return
0
;
err:
if
(
vpe
->
sgi_domain
)
irq_domain_remove
(
vpe
->
sgi_domain
);
if
(
vpe
->
fwnode
)
irq_domain_free_fwnode
(
vpe
->
fwnode
);
kfree
(
name
);
return
-
ENOMEM
;
}
int
its_alloc_vcpu_irqs
(
struct
its_vm
*
vm
)
{
...
...
@@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
if
(
vpe_base_irq
<=
0
)
goto
err
;
for
(
i
=
0
;
i
<
vm
->
nr_vpes
;
i
++
)
for
(
i
=
0
;
i
<
vm
->
nr_vpes
;
i
++
)
{
int
ret
;
vm
->
vpes
[
i
]
->
irq
=
vpe_base_irq
+
i
;
ret
=
its_alloc_vcpu_sgis
(
vm
->
vpes
[
i
],
i
);
if
(
ret
)
goto
err
;
}
return
0
;
...
...
@@ -126,8 +178,28 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
return
-
ENOMEM
;
}
static
void
its_free_sgi_irqs
(
struct
its_vm
*
vm
)
{
int
i
;
if
(
!
has_v4_1
())
return
;
for
(
i
=
0
;
i
<
vm
->
nr_vpes
;
i
++
)
{
unsigned
int
irq
=
irq_find_mapping
(
vm
->
vpes
[
i
]
->
sgi_domain
,
0
);
if
(
WARN_ON
(
!
irq
))
continue
;
irq_domain_free_irqs
(
irq
,
16
);
irq_domain_remove
(
vm
->
vpes
[
i
]
->
sgi_domain
);
irq_domain_free_fwnode
(
vm
->
vpes
[
i
]
->
fwnode
);
}
}
void
its_free_vcpu_irqs
(
struct
its_vm
*
vm
)
{
its_free_sgi_irqs
(
vm
);
irq_domain_free_irqs
(
vm
->
vpes
[
0
]
->
irq
,
vm
->
nr_vpes
);
irq_domain_remove
(
vm
->
domain
);
irq_domain_free_fwnode
(
vm
->
fwnode
);
...
...
@@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
return
irq_set_vcpu_affinity
(
vpe
->
irq
,
info
);
}
int
its_schedule_vpe
(
struct
its_vpe
*
vpe
,
bool
on
)
int
its_make_vpe_non_resident
(
struct
its_vpe
*
vpe
,
bool
db
)
{
struct
irq_desc
*
desc
=
irq_to_desc
(
vpe
->
irq
);
struct
its_cmd_info
info
=
{
};
int
ret
;
WARN_ON
(
preemptible
());
info
.
cmd_type
=
DESCHEDULE_VPE
;
if
(
has_v4_1
())
{
/* GICv4.1 can directly deal with doorbells */
info
.
req_db
=
db
;
}
else
{
/* Undo the nested disable_irq() calls... */
while
(
db
&&
irqd_irq_disabled
(
&
desc
->
irq_data
))
enable_irq
(
vpe
->
irq
);
}
ret
=
its_send_vpe_cmd
(
vpe
,
&
info
);
if
(
!
ret
)
vpe
->
resident
=
false
;
return
ret
;
}
int
its_make_vpe_resident
(
struct
its_vpe
*
vpe
,
bool
g0en
,
bool
g1en
)
{
struct
its_cmd_info
info
;
struct
its_cmd_info
info
=
{
}
;
int
ret
;
WARN_ON
(
preemptible
());
info
.
cmd_type
=
on
?
SCHEDULE_VPE
:
DESCHEDULE_VPE
;
info
.
cmd_type
=
SCHEDULE_VPE
;
if
(
has_v4_1
())
{
info
.
g0en
=
g0en
;
info
.
g1en
=
g1en
;
}
else
{
/* Disabled the doorbell, as we're about to enter the guest */
disable_irq_nosync
(
vpe
->
irq
);
}
ret
=
its_send_vpe_cmd
(
vpe
,
&
info
);
if
(
!
ret
)
vpe
->
resident
=
on
;
vpe
->
resident
=
true
;
return
ret
;
}
...
...
@@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv)
return
irq_set_vcpu_affinity
(
irq
,
&
info
);
}
int
its_init_v4
(
struct
irq_domain
*
domain
,
const
struct
irq_domain_ops
*
ops
)
int
its_prop_update_vsgi
(
int
irq
,
u8
priority
,
bool
group
)
{
struct
its_cmd_info
info
=
{
.
cmd_type
=
PROP_UPDATE_VSGI
,
{
.
priority
=
priority
,
.
group
=
group
,
},
};
return
irq_set_vcpu_affinity
(
irq
,
&
info
);
}
int
its_init_v4
(
struct
irq_domain
*
domain
,
const
struct
irq_domain_ops
*
vpe_ops
,
const
struct
irq_domain_ops
*
sgi_ops
)
{
if
(
domain
)
{
pr_info
(
"ITS: Enabling GICv4 support
\n
"
);
gic_domain
=
domain
;
vpe_domain_ops
=
ops
;
vpe_domain_ops
=
vpe_ops
;
sgi_domain_ops
=
sgi_ops
;
return
0
;
}
...
...
include/kvm/arm_vgic.h
View file @
771df8cf
...
...
@@ -70,6 +70,7 @@ struct vgic_global {
/* Hardware has GICv4? */
bool
has_gicv4
;
bool
has_gicv4_1
;
/* GIC system register CPU interface */
struct
static_key_false
gicv3_cpuif
;
...
...
include/linux/irqchip/arm-gic-common.h
View file @
771df8cf
...
...
@@ -32,6 +32,8 @@ struct gic_kvm_info {
struct
resource
vctrl
;
/* vlpi support */
bool
has_v4
;
/* rvpeid support */
bool
has_v4_1
;
};
const
struct
gic_kvm_info
*
gic_get_kvm_info
(
void
);
...
...
include/linux/irqchip/arm-gic-v3.h
View file @
771df8cf
...
...
@@ -57,6 +57,7 @@
#define GICD_SPENDSGIR 0x0F20
#define GICD_CTLR_RWP (1U << 31)
#define GICD_CTLR_nASSGIreq (1U << 8)
#define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1)
...
...
@@ -90,6 +91,7 @@
#define GICD_TYPER_ESPIS(typer) \
(((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0)
#define GICD_TYPER2_nASSGIcap (1U << 8)
#define GICD_TYPER2_VIL (1U << 7)
#define GICD_TYPER2_VID GENMASK(4, 0)
...
...
@@ -346,6 +348,15 @@
#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58)
#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0)
#define GICR_VSGIR 0x0080
#define GICR_VSGIR_VPEID GENMASK(15, 0)
#define GICR_VSGIPENDR 0x0088
#define GICR_VSGIPENDR_BUSY (1U << 31)
#define GICR_VSGIPENDR_PENDING GENMASK(15, 0)
/*
* ITS registers, offsets from ITS_base
*/
...
...
@@ -369,6 +380,11 @@
#define GITS_TRANSLATER 0x10040
#define GITS_SGIR 0x20020
#define GITS_SGIR_VPEID GENMASK_ULL(47, 32)
#define GITS_SGIR_VINTID GENMASK_ULL(3, 0)
#define GITS_CTLR_ENABLE (1U << 0)
#define GITS_CTLR_ImDe (1U << 1)
#define GITS_CTLR_ITS_NUMBER_SHIFT 4
...
...
@@ -503,8 +519,9 @@
#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
/* VMOVP and INVDB are the odd ones, as they dont have a physical counterpart */
/* VMOVP
, VSGI
and INVDB are the odd ones, as they dont have a physical counterpart */
#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
#define GITS_CMD_VSGI GITS_CMD_GICv4(3)
#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe)
/*
...
...
@@ -653,6 +670,7 @@
struct
rdists
{
struct
{
raw_spinlock_t
rd_lock
;
void
__iomem
*
rd_base
;
struct
page
*
pend_page
;
phys_addr_t
phys_base
;
...
...
include/linux/irqchip/arm-gic-v4.h
View file @
771df8cf
...
...
@@ -49,10 +49,22 @@ struct its_vpe {
};
/* GICv4.1 implementations */
struct
{
struct
fwnode_handle
*
fwnode
;
struct
irq_domain
*
sgi_domain
;
struct
{
u8
priority
;
bool
enabled
;
bool
group
;
}
sgi_config
[
16
];
atomic_t
vmapp_count
;
};
};
/*
* Ensures mutual exclusion between affinity setting of the
* vPE and vLPI operations using vpe->col_idx.
*/
raw_spinlock_t
vpe_lock
;
/*
* This collection ID is used to indirect the target
* redistributor for this VPE. The ID itself isn't involved in
...
...
@@ -93,6 +105,7 @@ enum its_vcpu_info_cmd_type {
SCHEDULE_VPE
,
DESCHEDULE_VPE
,
INVALL_VPE
,
PROP_UPDATE_VSGI
,
};
struct
its_cmd_info
{
...
...
@@ -105,19 +118,27 @@ struct its_cmd_info {
bool
g0en
;
bool
g1en
;
};
struct
{
u8
priority
;
bool
group
;
};
};
};
int
its_alloc_vcpu_irqs
(
struct
its_vm
*
vm
);
void
its_free_vcpu_irqs
(
struct
its_vm
*
vm
);
int
its_schedule_vpe
(
struct
its_vpe
*
vpe
,
bool
on
);
int
its_make_vpe_resident
(
struct
its_vpe
*
vpe
,
bool
g0en
,
bool
g1en
);
int
its_make_vpe_non_resident
(
struct
its_vpe
*
vpe
,
bool
db
);
int
its_invall_vpe
(
struct
its_vpe
*
vpe
);
int
its_map_vlpi
(
int
irq
,
struct
its_vlpi_map
*
map
);
int
its_get_vlpi
(
int
irq
,
struct
its_vlpi_map
*
map
);
int
its_unmap_vlpi
(
int
irq
);
int
its_prop_update_vlpi
(
int
irq
,
u8
config
,
bool
inv
);
int
its_prop_update_vsgi
(
int
irq
,
u8
priority
,
bool
group
);
struct
irq_domain_ops
;
int
its_init_v4
(
struct
irq_domain
*
domain
,
const
struct
irq_domain_ops
*
ops
);
int
its_init_v4
(
struct
irq_domain
*
domain
,
const
struct
irq_domain_ops
*
vpe_ops
,
const
struct
irq_domain_ops
*
sgi_ops
);
#endif
virt/kvm/arm/vgic/vgic-v3.c
View file @
771df8cf
...
...
@@ -595,7 +595,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
/* GICv4 support? */
if
(
info
->
has_v4
)
{
kvm_vgic_global_state
.
has_gicv4
=
gicv4_enable
;
kvm_info
(
"GICv4 support %sabled
\n
"
,
kvm_vgic_global_state
.
has_gicv4_1
=
info
->
has_v4_1
&&
gicv4_enable
;
kvm_info
(
"GICv4%s support %sabled
\n
"
,
kvm_vgic_global_state
.
has_gicv4_1
?
".1"
:
""
,
gicv4_enable
?
"en"
:
"dis"
);
}
...
...
virt/kvm/arm/vgic/vgic-v4.c
View file @
771df8cf
...
...
@@ -67,10 +67,10 @@
* it. And if we've migrated our vcpu from one CPU to another, we must
* tell the ITS (so that the messages reach the right redistributor).
* This is done in two steps: first issue a irq_set_affinity() on the
* irq corresponding to the vcpu, then call its_
schedule_vpe(). You
*
must be in a non-preemptible context. On exit, another
call to
* its_
schedule_vpe() tells the redistributor that we're done with th
e
* vcpu.
* irq corresponding to the vcpu, then call its_
make_vpe_resident().
*
You must be in a non-preemptible context. On exit, a
call to
* its_
make_vpe_non_resident() tells the redistributor that we're don
e
*
with the
vcpu.
*
* Finally, the doorbell handling: Each vcpu is allocated an interrupt
* which will fire each time a VLPI is made pending whilst the vcpu is
...
...
@@ -86,7 +86,8 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
struct
kvm_vcpu
*
vcpu
=
info
;
/* We got the message, no need to fire again */
if
(
!
irqd_irq_disabled
(
&
irq_to_desc
(
irq
)
->
irq_data
))
if
(
!
kvm_vgic_global_state
.
has_gicv4_1
&&
!
irqd_irq_disabled
(
&
irq_to_desc
(
irq
)
->
irq_data
))
disable_irq_nosync
(
irq
);
vcpu
->
arch
.
vgic_cpu
.
vgic_v3
.
its_vpe
.
pending_last
=
true
;
...
...
@@ -199,19 +200,11 @@ void vgic_v4_teardown(struct kvm *kvm)
int
vgic_v4_put
(
struct
kvm_vcpu
*
vcpu
,
bool
need_db
)
{
struct
its_vpe
*
vpe
=
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v3
.
its_vpe
;
struct
irq_desc
*
desc
=
irq_to_desc
(
vpe
->
irq
);
if
(
!
vgic_supports_direct_msis
(
vcpu
->
kvm
)
||
!
vpe
->
resident
)
return
0
;
/*
* If blocking, a doorbell is required. Undo the nested
* disable_irq() calls...
*/
while
(
need_db
&&
irqd_irq_disabled
(
&
desc
->
irq_data
))
enable_irq
(
vpe
->
irq
);
return
its_schedule_vpe
(
vpe
,
false
);
return
its_make_vpe_non_resident
(
vpe
,
need_db
);
}
int
vgic_v4_load
(
struct
kvm_vcpu
*
vcpu
)
...
...
@@ -232,18 +225,19 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
if
(
err
)
return
err
;
/* Disabled the doorbell, as we're about to enter the guest */
disable_irq_nosync
(
vpe
->
irq
);
err
=
its_schedule_vpe
(
vpe
,
true
);
err
=
its_make_vpe_resident
(
vpe
,
false
,
vcpu
->
kvm
->
arch
.
vgic
.
enabled
);
if
(
err
)
return
err
;
/*
* Now that the VPE is resident, let's get rid of a potential
* doorbell interrupt that would still be pending.
* doorbell interrupt that would still be pending. This is a
* GICv4.0 only "feature"...
*/
return
irq_set_irqchip_state
(
vpe
->
irq
,
IRQCHIP_STATE_PENDING
,
false
);
if
(
!
kvm_vgic_global_state
.
has_gicv4_1
)
err
=
irq_set_irqchip_state
(
vpe
->
irq
,
IRQCHIP_STATE_PENDING
,
false
);
return
err
;
}
static
struct
vgic_its
*
vgic_get_its
(
struct
kvm
*
kvm
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment