Commit e339b51c authored by Suravee Suthikulpanit's avatar Suravee Suthikulpanit Committed by Joerg Roedel

iommu/amd: Modify logic for checking GT and PPR features

In order to support v2 page table, IOMMU driver need to check if the
hardware can support Guest Translation (GT) and Peripheral Page Request
(PPR) features. Currently, IOMMU driver uses global (amd_iommu_v2_present)
and per-iommu (struct amd_iommu.is_iommu_v2) variables to track the
features. There variables area redundant since we could simply just check
the global EFR mask.

Therefore, replace it with a helper function with appropriate name.
Signed-off-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Co-developed-by: default avatarVasant Hegde <vasant.hegde@amd.com>
Signed-off-by: default avatarVasant Hegde <vasant.hegde@amd.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarJerry Snitselaar <jsnitsel@redhat.com>
Link: https://lore.kernel.org/r/20230921092147.5930-10-vasant.hegde@amd.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 7b7563a9
...@@ -102,6 +102,12 @@ static inline int check_feature_gpt_level(void) ...@@ -102,6 +102,12 @@ static inline int check_feature_gpt_level(void)
return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK); return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
} }
static inline bool amd_iommu_gt_ppr_supported(void)
{
return (check_feature(FEATURE_GT) &&
check_feature(FEATURE_PPR));
}
static inline u64 iommu_virt_to_phys(void *vaddr) static inline u64 iommu_virt_to_phys(void *vaddr)
{ {
return (u64)__sme_set(virt_to_phys(vaddr)); return (u64)__sme_set(virt_to_phys(vaddr));
......
...@@ -679,9 +679,6 @@ struct amd_iommu { ...@@ -679,9 +679,6 @@ struct amd_iommu {
/* Extended features 2 */ /* Extended features 2 */
u64 features2; u64 features2;
/* IOMMUv2 */
bool is_iommu_v2;
/* PCI device id of the IOMMU device */ /* PCI device id of the IOMMU device */
u16 devid; u16 devid;
...@@ -890,8 +887,6 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap; ...@@ -890,8 +887,6 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
/* Smallest max PASID supported by any IOMMU in the system */ /* Smallest max PASID supported by any IOMMU in the system */
extern u32 amd_iommu_max_pasid; extern u32 amd_iommu_max_pasid;
extern bool amd_iommu_v2_present;
extern bool amd_iommu_force_isolation; extern bool amd_iommu_force_isolation;
/* Max levels of glxval supported */ /* Max levels of glxval supported */
......
...@@ -187,7 +187,6 @@ bool amd_iommu_iotlb_sup __read_mostly = true; ...@@ -187,7 +187,6 @@ bool amd_iommu_iotlb_sup __read_mostly = true;
u32 amd_iommu_max_pasid __read_mostly = ~0; u32 amd_iommu_max_pasid __read_mostly = ~0;
bool amd_iommu_v2_present __read_mostly;
static bool amd_iommu_pc_present __read_mostly; static bool amd_iommu_pc_present __read_mostly;
bool amdr_ivrs_remap_support __read_mostly; bool amdr_ivrs_remap_support __read_mostly;
...@@ -2101,12 +2100,6 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) ...@@ -2101,12 +2100,6 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
} }
if (check_feature(FEATURE_GT) &&
check_feature(FEATURE_PPR)) {
iommu->is_iommu_v2 = true;
amd_iommu_v2_present = true;
}
if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu)) if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu))
return -ENOMEM; return -ENOMEM;
...@@ -3676,7 +3669,7 @@ bool amd_iommu_v2_supported(void) ...@@ -3676,7 +3669,7 @@ bool amd_iommu_v2_supported(void)
* (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
* setting up IOMMUv1 page table. * setting up IOMMUv1 page table.
*/ */
return amd_iommu_v2_present && !amd_iommu_snp_en; return amd_iommu_gt_ppr_supported() && !amd_iommu_snp_en;
} }
EXPORT_SYMBOL(amd_iommu_v2_supported); EXPORT_SYMBOL(amd_iommu_v2_supported);
......
...@@ -397,7 +397,7 @@ static int iommu_init_device(struct amd_iommu *iommu, struct device *dev) ...@@ -397,7 +397,7 @@ static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
*/ */
if ((iommu_default_passthrough() || !amd_iommu_force_isolation) && if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) { dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
dev_data->iommu_v2 = iommu->is_iommu_v2; dev_data->iommu_v2 = amd_iommu_gt_ppr_supported();
} }
dev_iommu_priv_set(dev, dev_data); dev_iommu_priv_set(dev, dev_data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment