Commit 7b7563a9 authored by Suravee Suthikulpanit's avatar Suravee Suthikulpanit Committed by Joerg Roedel

iommu/amd: Consolidate feature detection and reporting logic

Currently, IOMMU driver assumes capabilities on all IOMMU instances to be
homogeneous. During early_amd_iommu_init(), the driver probes all IVHD
blocks and do sanity check to make sure that only features common among all
IOMMU instances are supported. This is tracked in the global amd_iommu_efr
and amd_iommu_efr2, which should be used whenever the driver need to check
hardware capabilities.

Therefore, introduce check_feature() and check_feature2(), and modify
the driver to adopt the new helper functions.

In addition, clean up the print_iommu_info() to avoid reporting redundant
EFR/EFR2 for each IOMMU instance.
Signed-off-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: default avatarVasant Hegde <vasant.hegde@amd.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarJerry Snitselaar <jsnitsel@redhat.com>
Link: https://lore.kernel.org/r/20230921092147.5930-9-vasant.hegde@amd.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 45677ab1
......@@ -87,9 +87,19 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev)
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
}
static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
static inline bool check_feature(u64 mask)
{
return !!(iommu->features & mask);
return (amd_iommu_efr & mask);
}
static inline bool check_feature2(u64 mask)
{
return (amd_iommu_efr2 & mask);
}
static inline int check_feature_gpt_level(void)
{
return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
}
static inline u64 iommu_virt_to_phys(void *vaddr)
......@@ -145,8 +155,5 @@ void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
extern u64 amd_iommu_efr;
extern u64 amd_iommu_efr2;
extern bool amd_iommu_snp_en;
#endif
......@@ -897,6 +897,10 @@ extern bool amd_iommu_force_isolation;
/* Max levels of glxval supported */
extern int amd_iommu_max_glx_val;
/* Global EFR and EFR2 registers */
extern u64 amd_iommu_efr;
extern u64 amd_iommu_efr2;
/*
* This function flushes all internal caches of
* the IOMMU used by this driver.
......
......@@ -270,7 +270,7 @@ int amd_iommu_get_num_iommus(void)
* Iterate through all the IOMMUs to get common EFR
* masks among all IOMMUs and warn if found inconsistency.
*/
static void get_global_efr(void)
static __init void get_global_efr(void)
{
struct amd_iommu *iommu;
......@@ -302,16 +302,6 @@ static void get_global_efr(void)
pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
}
static bool check_feature_on_all_iommus(u64 mask)
{
return !!(amd_iommu_efr & mask);
}
static inline int check_feature_gpt_level(void)
{
return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
}
/*
* For IVHD type 0x11/0x40, EFR is also available via IVHD.
* Default to IVHD EFR since it is available sooner
......@@ -397,7 +387,7 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu)
u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
u64 entry = start & PM_ADDR_MASK;
if (!check_feature_on_all_iommus(FEATURE_SNP))
if (!check_feature(FEATURE_SNP))
return;
/* Note:
......@@ -867,7 +857,7 @@ static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
void *buf = (void *)__get_free_pages(gfp, order);
if (buf &&
check_feature_on_all_iommus(FEATURE_SNP) &&
check_feature(FEATURE_SNP) &&
set_memory_4k((unsigned long)buf, (1 << order))) {
free_pages((unsigned long)buf, order);
buf = NULL;
......@@ -1046,7 +1036,7 @@ static void iommu_enable_xt(struct amd_iommu *iommu)
static void iommu_enable_gt(struct amd_iommu *iommu)
{
if (!iommu_feature(iommu, FEATURE_GT))
if (!check_feature(FEATURE_GT))
return;
iommu_feature_enable(iommu, CONTROL_GT_EN);
......@@ -1985,7 +1975,7 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
u64 val;
struct pci_dev *pdev = iommu->dev;
if (!iommu_feature(iommu, FEATURE_PC))
if (!check_feature(FEATURE_PC))
return;
amd_iommu_pc_present = true;
......@@ -2012,8 +2002,7 @@ static ssize_t amd_iommu_show_features(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amd_iommu *iommu = dev_to_amd_iommu(dev);
return sysfs_emit(buf, "%llx:%llx\n", iommu->features2, iommu->features);
return sysfs_emit(buf, "%llx:%llx\n", amd_iommu_efr, amd_iommu_efr2);
}
static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
......@@ -2049,9 +2038,9 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
if (!iommu->features) {
iommu->features = features;
iommu->features2 = features2;
if (!amd_iommu_efr) {
amd_iommu_efr = features;
amd_iommu_efr2 = features2;
return;
}
......@@ -2059,12 +2048,12 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
* Sanity check and warn if EFR values from
* IVHD and MMIO conflict.
*/
if (features != iommu->features ||
features2 != iommu->features2) {
if (features != amd_iommu_efr ||
features2 != amd_iommu_efr2) {
pr_warn(FW_WARN
"EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
features, iommu->features,
features2, iommu->features2);
features, amd_iommu_efr,
features2, amd_iommu_efr2);
}
}
......@@ -2090,12 +2079,12 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
late_iommu_features_init(iommu);
if (iommu_feature(iommu, FEATURE_GT)) {
if (check_feature(FEATURE_GT)) {
int glxval;
u32 max_pasid;
u64 pasmax;
pasmax = iommu->features & FEATURE_PASID_MASK;
pasmax = amd_iommu_efr & FEATURE_PASID_MASK;
pasmax >>= FEATURE_PASID_SHIFT;
max_pasid = (1 << (pasmax + 1)) - 1;
......@@ -2103,7 +2092,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
glxval = iommu->features & FEATURE_GLXVAL_MASK;
glxval = amd_iommu_efr & FEATURE_GLXVAL_MASK;
glxval >>= FEATURE_GLXVAL_SHIFT;
if (amd_iommu_max_glx_val == -1)
......@@ -2112,13 +2101,13 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
}
if (iommu_feature(iommu, FEATURE_GT) &&
iommu_feature(iommu, FEATURE_PPR)) {
if (check_feature(FEATURE_GT) &&
check_feature(FEATURE_PPR)) {
iommu->is_iommu_v2 = true;
amd_iommu_v2_present = true;
}
if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu))
return -ENOMEM;
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
......@@ -2130,8 +2119,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
init_iommu_perf_ctr(iommu);
if (amd_iommu_pgtable == AMD_IOMMU_V2) {
if (!iommu_feature(iommu, FEATURE_GIOSUP) ||
!iommu_feature(iommu, FEATURE_GT)) {
if (!check_feature(FEATURE_GIOSUP) ||
!check_feature(FEATURE_GT)) {
pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
amd_iommu_pgtable = AMD_IOMMU_V1;
}
......@@ -2181,35 +2170,29 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
static void print_iommu_info(void)
{
int i;
static const char * const feat_str[] = {
"PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
"IA", "GA", "HE", "PC"
};
struct amd_iommu *iommu;
for_each_iommu(iommu) {
struct pci_dev *pdev = iommu->dev;
int i;
pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2);
if (amd_iommu_efr) {
pr_info("Extended features (%#llx, %#llx):", amd_iommu_efr, amd_iommu_efr2);
for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i)))
pr_cont(" %s", feat_str[i]);
}
for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (check_feature(1ULL << i))
pr_cont(" %s", feat_str[i]);
}
if (iommu->features & FEATURE_GAM_VAPIC)
pr_cont(" GA_vAPIC");
if (check_feature(FEATURE_GAM_VAPIC))
pr_cont(" GA_vAPIC");
if (iommu->features & FEATURE_SNP)
pr_cont(" SNP");
if (check_feature(FEATURE_SNP))
pr_cont(" SNP");
pr_cont("\n");
}
pr_cont("\n");
}
if (irq_remapping_enabled) {
pr_info("Interrupt remapping enabled\n");
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
......@@ -2907,7 +2890,7 @@ static void enable_iommus_vapic(void)
}
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
!check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) {
!check_feature(FEATURE_GAM_VAPIC)) {
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
return;
}
......@@ -3819,7 +3802,7 @@ int amd_iommu_snp_enable(void)
return -EINVAL;
}
amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP);
amd_iommu_snp_en = check_feature(FEATURE_SNP);
if (!amd_iommu_snp_en)
return -EINVAL;
......
......@@ -1295,7 +1295,7 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
void iommu_flush_all_caches(struct amd_iommu *iommu)
{
if (iommu_feature(iommu, FEATURE_IA)) {
if (check_feature(FEATURE_IA)) {
amd_iommu_flush_all(iommu);
} else {
amd_iommu_flush_dte_all(iommu);
......@@ -1639,7 +1639,7 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
flags |= DTE_FLAG_IOTLB;
if (ppr) {
if (iommu_feature(iommu, FEATURE_EPHSUP))
if (check_feature(FEATURE_EPHSUP))
pte_root |= 1ULL << DEV_ENTRY_PPR;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment