Commit e8cca466 authored by Joerg Roedel's avatar Joerg Roedel

Merge branches 'iommu/fixes', 'arm/tegra', 'arm/smmu', 'virtio', 'x86/vt-d',...

Merge branches 'iommu/fixes', 'arm/tegra', 'arm/smmu', 'virtio', 'x86/vt-d', 'x86/amd', 'core' and 's390' into next
...@@ -2220,7 +2220,7 @@ ...@@ -2220,7 +2220,7 @@
forcing Dual Address Cycle for PCI cards supporting forcing Dual Address Cycle for PCI cards supporting
greater than 32-bit addressing. greater than 32-bit addressing.
iommu.strict= [ARM64, X86] Configure TLB invalidation behaviour iommu.strict= [ARM64, X86, S390] Configure TLB invalidation behaviour
Format: { "0" | "1" } Format: { "0" | "1" }
0 - Lazy mode. 0 - Lazy mode.
Request that DMA unmap operations use deferred Request that DMA unmap operations use deferred
...@@ -5611,9 +5611,10 @@ ...@@ -5611,9 +5611,10 @@
s390_iommu= [HW,S390] s390_iommu= [HW,S390]
Set s390 IOTLB flushing mode Set s390 IOTLB flushing mode
strict strict
With strict flushing every unmap operation will result in With strict flushing every unmap operation will result
an IOTLB flush. Default is lazy flushing before reuse, in an IOTLB flush. Default is lazy flushing before
which is faster. reuse, which is faster. Deprecated, equivalent to
iommu.strict=1.
s390_iommu_aperture= [KNL,S390] s390_iommu_aperture= [KNL,S390]
Specifies the size of the per device DMA address space Specifies the size of the per device DMA address space
......
...@@ -110,6 +110,7 @@ properties: ...@@ -110,6 +110,7 @@ properties:
- qcom,sdm630-smmu-v2 - qcom,sdm630-smmu-v2
- qcom,sdm845-smmu-v2 - qcom,sdm845-smmu-v2
- qcom,sm6350-smmu-v2 - qcom,sm6350-smmu-v2
- qcom,sm7150-smmu-v2
- const: qcom,adreno-smmu - const: qcom,adreno-smmu
- const: qcom,smmu-v2 - const: qcom,smmu-v2
- description: Qcom Adreno GPUs on Google Cheza platform - description: Qcom Adreno GPUs on Google Cheza platform
...@@ -409,6 +410,7 @@ allOf: ...@@ -409,6 +410,7 @@ allOf:
contains: contains:
enum: enum:
- qcom,sm6350-smmu-v2 - qcom,sm6350-smmu-v2
- qcom,sm7150-smmu-v2
- qcom,sm8150-smmu-500 - qcom,sm8150-smmu-500
- qcom,sm8250-smmu-500 - qcom,sm8250-smmu-500
then: then:
......
...@@ -1073,7 +1073,6 @@ CONFIG_QCOM_IPCC=y ...@@ -1073,7 +1073,6 @@ CONFIG_QCOM_IPCC=y
CONFIG_OMAP_IOMMU=y CONFIG_OMAP_IOMMU=y
CONFIG_OMAP_IOMMU_DEBUG=y CONFIG_OMAP_IOMMU_DEBUG=y
CONFIG_ROCKCHIP_IOMMU=y CONFIG_ROCKCHIP_IOMMU=y
CONFIG_TEGRA_IOMMU_GART=y
CONFIG_TEGRA_IOMMU_SMMU=y CONFIG_TEGRA_IOMMU_SMMU=y
CONFIG_EXYNOS_IOMMU=y CONFIG_EXYNOS_IOMMU=y
CONFIG_QCOM_IOMMU=y CONFIG_QCOM_IOMMU=y
......
...@@ -292,7 +292,6 @@ CONFIG_CHROME_PLATFORMS=y ...@@ -292,7 +292,6 @@ CONFIG_CHROME_PLATFORMS=y
CONFIG_CROS_EC=y CONFIG_CROS_EC=y
CONFIG_CROS_EC_I2C=m CONFIG_CROS_EC_I2C=m
CONFIG_CROS_EC_SPI=m CONFIG_CROS_EC_SPI=m
CONFIG_TEGRA_IOMMU_GART=y
CONFIG_TEGRA_IOMMU_SMMU=y CONFIG_TEGRA_IOMMU_SMMU=y
CONFIG_ARCH_TEGRA_2x_SOC=y CONFIG_ARCH_TEGRA_2x_SOC=y
CONFIG_ARCH_TEGRA_3x_SOC=y CONFIG_ARCH_TEGRA_3x_SOC=y
......
...@@ -1280,13 +1280,19 @@ struct iommu_table_group_ops spapr_tce_table_group_ops = { ...@@ -1280,13 +1280,19 @@ struct iommu_table_group_ops spapr_tce_table_group_ops = {
/* /*
* A simple iommu_ops to allow less cruft in generic VFIO code. * A simple iommu_ops to allow less cruft in generic VFIO code.
*/ */
static int spapr_tce_blocking_iommu_attach_dev(struct iommu_domain *dom, static int
struct device *dev) spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
struct device *dev)
{ {
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_group *grp = iommu_group_get(dev); struct iommu_group *grp = iommu_group_get(dev);
struct iommu_table_group *table_group; struct iommu_table_group *table_group;
int ret = -EINVAL; int ret = -EINVAL;
/* At first attach the ownership is already set */
if (!domain)
return 0;
if (!grp) if (!grp)
return -ENODEV; return -ENODEV;
...@@ -1297,17 +1303,22 @@ static int spapr_tce_blocking_iommu_attach_dev(struct iommu_domain *dom, ...@@ -1297,17 +1303,22 @@ static int spapr_tce_blocking_iommu_attach_dev(struct iommu_domain *dom,
return ret; return ret;
} }
static void spapr_tce_blocking_iommu_set_platform_dma(struct device *dev) static const struct iommu_domain_ops spapr_tce_platform_domain_ops = {
{ .attach_dev = spapr_tce_platform_iommu_attach_dev,
struct iommu_group *grp = iommu_group_get(dev); };
struct iommu_table_group *table_group;
table_group = iommu_group_get_iommudata(grp); static struct iommu_domain spapr_tce_platform_domain = {
table_group->ops->release_ownership(table_group); .type = IOMMU_DOMAIN_PLATFORM,
} .ops = &spapr_tce_platform_domain_ops,
};
static const struct iommu_domain_ops spapr_tce_blocking_domain_ops = { static struct iommu_domain spapr_tce_blocked_domain = {
.attach_dev = spapr_tce_blocking_iommu_attach_dev, .type = IOMMU_DOMAIN_BLOCKED,
/*
* FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain
* also sets the dma_api ops
*/
.ops = &spapr_tce_platform_domain_ops,
}; };
static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap) static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
...@@ -1322,22 +1333,6 @@ static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap) ...@@ -1322,22 +1333,6 @@ static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
return false; return false;
} }
static struct iommu_domain *spapr_tce_iommu_domain_alloc(unsigned int type)
{
struct iommu_domain *dom;
if (type != IOMMU_DOMAIN_BLOCKED)
return NULL;
dom = kzalloc(sizeof(*dom), GFP_KERNEL);
if (!dom)
return NULL;
dom->ops = &spapr_tce_blocking_domain_ops;
return dom;
}
static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev) static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
{ {
struct pci_dev *pdev; struct pci_dev *pdev;
...@@ -1371,12 +1366,12 @@ static struct iommu_group *spapr_tce_iommu_device_group(struct device *dev) ...@@ -1371,12 +1366,12 @@ static struct iommu_group *spapr_tce_iommu_device_group(struct device *dev)
} }
static const struct iommu_ops spapr_tce_iommu_ops = { static const struct iommu_ops spapr_tce_iommu_ops = {
.default_domain = &spapr_tce_platform_domain,
.blocked_domain = &spapr_tce_blocked_domain,
.capable = spapr_tce_iommu_capable, .capable = spapr_tce_iommu_capable,
.domain_alloc = spapr_tce_iommu_domain_alloc,
.probe_device = spapr_tce_iommu_probe_device, .probe_device = spapr_tce_iommu_probe_device,
.release_device = spapr_tce_iommu_release_device, .release_device = spapr_tce_iommu_release_device,
.device_group = spapr_tce_iommu_device_group, .device_group = spapr_tce_iommu_device_group,
.set_platform_dma_ops = spapr_tce_blocking_iommu_set_platform_dma,
}; };
static struct attribute *spapr_tce_iommu_attrs[] = { static struct attribute *spapr_tce_iommu_attrs[] = {
......
...@@ -159,13 +159,6 @@ struct zpci_dev { ...@@ -159,13 +159,6 @@ struct zpci_dev {
unsigned long *dma_table; unsigned long *dma_table;
int tlb_refresh; int tlb_refresh;
spinlock_t iommu_bitmap_lock;
unsigned long *iommu_bitmap;
unsigned long *lazy_bitmap;
unsigned long iommu_size;
unsigned long iommu_pages;
unsigned int next_bit;
struct iommu_device iommu_dev; /* IOMMU core handle */ struct iommu_device iommu_dev; /* IOMMU core handle */
char res_name[16]; char res_name[16];
...@@ -180,10 +173,6 @@ struct zpci_dev { ...@@ -180,10 +173,6 @@ struct zpci_dev {
struct zpci_fmb *fmb; struct zpci_fmb *fmb;
u16 fmb_update; /* update interval */ u16 fmb_update; /* update interval */
u16 fmb_length; u16 fmb_length;
/* software counters */
atomic64_t allocated_pages;
atomic64_t mapped_pages;
atomic64_t unmapped_pages;
u8 version; u8 version;
enum pci_bus_speed max_bus_speed; enum pci_bus_speed max_bus_speed;
......
...@@ -50,6 +50,9 @@ struct clp_fh_list_entry { ...@@ -50,6 +50,9 @@ struct clp_fh_list_entry {
#define CLP_UTIL_STR_LEN 64 #define CLP_UTIL_STR_LEN 64
#define CLP_PFIP_NR_SEGMENTS 4 #define CLP_PFIP_NR_SEGMENTS 4
/* PCI function type numbers */
#define PCI_FUNC_TYPE_ISM 0x5 /* ISM device */
extern bool zpci_unique_uid; extern bool zpci_unique_uid;
struct clp_rsp_slpc_pci { struct clp_rsp_slpc_pci {
......
...@@ -82,117 +82,16 @@ enum zpci_ioat_dtype { ...@@ -82,117 +82,16 @@ enum zpci_ioat_dtype {
#define ZPCI_TABLE_VALID_MASK 0x20 #define ZPCI_TABLE_VALID_MASK 0x20
#define ZPCI_TABLE_PROT_MASK 0x200 #define ZPCI_TABLE_PROT_MASK 0x200
static inline unsigned int calc_rtx(dma_addr_t ptr) struct zpci_iommu_ctrs {
{ atomic64_t mapped_pages;
return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK; atomic64_t unmapped_pages;
} atomic64_t global_rpcits;
atomic64_t sync_map_rpcits;
static inline unsigned int calc_sx(dma_addr_t ptr) atomic64_t sync_rpcits;
{ };
return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
} struct zpci_dev;
static inline unsigned int calc_px(dma_addr_t ptr)
{
return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
}
static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
{
*entry &= ZPCI_PTE_FLAG_MASK;
*entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
}
static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
{
*entry &= ZPCI_RTE_FLAG_MASK;
*entry |= (sto & ZPCI_RTE_ADDR_MASK);
*entry |= ZPCI_TABLE_TYPE_RTX;
}
static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
{
*entry &= ZPCI_STE_FLAG_MASK;
*entry |= (pto & ZPCI_STE_ADDR_MASK);
*entry |= ZPCI_TABLE_TYPE_SX;
}
static inline void validate_rt_entry(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_VALID_MASK;
*entry &= ~ZPCI_TABLE_OFFSET_MASK;
*entry |= ZPCI_TABLE_VALID;
*entry |= ZPCI_TABLE_LEN_RTX;
}
static inline void validate_st_entry(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_VALID_MASK;
*entry |= ZPCI_TABLE_VALID;
}
static inline void invalidate_pt_entry(unsigned long *entry)
{
WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
*entry &= ~ZPCI_PTE_VALID_MASK;
*entry |= ZPCI_PTE_INVALID;
}
static inline void validate_pt_entry(unsigned long *entry)
{
WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
*entry &= ~ZPCI_PTE_VALID_MASK;
*entry |= ZPCI_PTE_VALID;
}
static inline void entry_set_protected(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_PROT_MASK;
*entry |= ZPCI_TABLE_PROTECTED;
}
static inline void entry_clr_protected(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_PROT_MASK;
*entry |= ZPCI_TABLE_UNPROTECTED;
}
static inline int reg_entry_isvalid(unsigned long entry)
{
return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
}
static inline int pt_entry_isvalid(unsigned long entry)
{
return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
}
static inline unsigned long *get_rt_sto(unsigned long entry)
{
if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
else
return NULL;
}
static inline unsigned long *get_st_pto(unsigned long entry)
{
if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
return phys_to_virt(entry & ZPCI_STE_ADDR_MASK);
else
return NULL;
}
/* Prototypes */
void dma_free_seg_table(unsigned long);
unsigned long *dma_alloc_cpu_table(gfp_t gfp);
void dma_cleanup_tables(unsigned long *);
unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr,
gfp_t gfp);
void dma_update_cpu_trans(unsigned long *entry, phys_addr_t page_addr, int flags);
extern const struct dma_map_ops s390_pci_dma_ops;
struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev);
#endif #endif
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# Makefile for the s390 PCI subsystem. # Makefile for the s390 PCI subsystem.
# #
obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \ obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_clp.o pci_sysfs.o \
pci_event.o pci_debug.o pci_insn.o pci_mmio.o \ pci_event.o pci_debug.o pci_insn.o pci_mmio.o \
pci_bus.o pci_kvm_hook.o pci_bus.o pci_kvm_hook.o
obj-$(CONFIG_PCI_IOV) += pci_iov.o obj-$(CONFIG_PCI_IOV) += pci_iov.o
...@@ -124,7 +124,11 @@ int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, ...@@ -124,7 +124,11 @@ int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
WARN_ON_ONCE(iota & 0x3fff); WARN_ON_ONCE(iota & 0x3fff);
fib.pba = base; fib.pba = base;
fib.pal = limit; /* Work around off by one in ISM virt device */
if (zdev->pft == PCI_FUNC_TYPE_ISM && limit > base)
fib.pal = limit + (1 << 12);
else
fib.pal = limit;
fib.iota = iota | ZPCI_IOTA_RTTO_FLAG; fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
fib.gd = zdev->gisa; fib.gd = zdev->gisa;
cc = zpci_mod_fc(req, &fib, status); cc = zpci_mod_fc(req, &fib, status);
...@@ -153,6 +157,7 @@ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) ...@@ -153,6 +157,7 @@ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
int zpci_fmb_enable_device(struct zpci_dev *zdev) int zpci_fmb_enable_device(struct zpci_dev *zdev)
{ {
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE); u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
struct zpci_iommu_ctrs *ctrs;
struct zpci_fib fib = {0}; struct zpci_fib fib = {0};
u8 cc, status; u8 cc, status;
...@@ -165,9 +170,15 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev) ...@@ -165,9 +170,15 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
WARN_ON((u64) zdev->fmb & 0xf); WARN_ON((u64) zdev->fmb & 0xf);
/* reset software counters */ /* reset software counters */
atomic64_set(&zdev->allocated_pages, 0); ctrs = zpci_get_iommu_ctrs(zdev);
atomic64_set(&zdev->mapped_pages, 0); if (ctrs) {
atomic64_set(&zdev->unmapped_pages, 0); atomic64_set(&ctrs->mapped_pages, 0);
atomic64_set(&ctrs->unmapped_pages, 0);
atomic64_set(&ctrs->global_rpcits, 0);
atomic64_set(&ctrs->sync_map_rpcits, 0);
atomic64_set(&ctrs->sync_rpcits, 0);
}
fib.fmb_addr = virt_to_phys(zdev->fmb); fib.fmb_addr = virt_to_phys(zdev->fmb);
fib.gd = zdev->gisa; fib.gd = zdev->gisa;
...@@ -582,7 +593,6 @@ int pcibios_device_add(struct pci_dev *pdev) ...@@ -582,7 +593,6 @@ int pcibios_device_add(struct pci_dev *pdev)
pdev->no_vf_scan = 1; pdev->no_vf_scan = 1;
pdev->dev.groups = zpci_attr_groups; pdev->dev.groups = zpci_attr_groups;
pdev->dev.dma_ops = &s390_pci_dma_ops;
zpci_map_resources(pdev); zpci_map_resources(pdev);
for (i = 0; i < PCI_STD_NUM_BARS; i++) { for (i = 0; i < PCI_STD_NUM_BARS; i++) {
...@@ -756,8 +766,6 @@ int zpci_hot_reset_device(struct zpci_dev *zdev) ...@@ -756,8 +766,6 @@ int zpci_hot_reset_device(struct zpci_dev *zdev)
if (zdev->dma_table) if (zdev->dma_table)
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
virt_to_phys(zdev->dma_table), &status); virt_to_phys(zdev->dma_table), &status);
else
rc = zpci_dma_init_device(zdev);
if (rc) { if (rc) {
zpci_disable_device(zdev); zpci_disable_device(zdev);
return rc; return rc;
...@@ -865,11 +873,6 @@ int zpci_deconfigure_device(struct zpci_dev *zdev) ...@@ -865,11 +873,6 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
if (zdev->zbus->bus) if (zdev->zbus->bus)
zpci_bus_remove_device(zdev, false); zpci_bus_remove_device(zdev, false);
if (zdev->dma_table) {
rc = zpci_dma_exit_device(zdev);
if (rc)
return rc;
}
if (zdev_enabled(zdev)) { if (zdev_enabled(zdev)) {
rc = zpci_disable_device(zdev); rc = zpci_disable_device(zdev);
if (rc) if (rc)
...@@ -918,8 +921,6 @@ void zpci_release_device(struct kref *kref) ...@@ -918,8 +921,6 @@ void zpci_release_device(struct kref *kref)
if (zdev->zbus->bus) if (zdev->zbus->bus)
zpci_bus_remove_device(zdev, false); zpci_bus_remove_device(zdev, false);
if (zdev->dma_table)
zpci_dma_exit_device(zdev);
if (zdev_enabled(zdev)) if (zdev_enabled(zdev))
zpci_disable_device(zdev); zpci_disable_device(zdev);
...@@ -1109,10 +1110,6 @@ static int __init pci_base_init(void) ...@@ -1109,10 +1110,6 @@ static int __init pci_base_init(void)
if (rc) if (rc)
goto out_irq; goto out_irq;
rc = zpci_dma_init();
if (rc)
goto out_dma;
rc = clp_scan_pci_devices(); rc = clp_scan_pci_devices();
if (rc) if (rc)
goto out_find; goto out_find;
...@@ -1122,8 +1119,6 @@ static int __init pci_base_init(void) ...@@ -1122,8 +1119,6 @@ static int __init pci_base_init(void)
return 0; return 0;
out_find: out_find:
zpci_dma_exit();
out_dma:
zpci_irq_exit(); zpci_irq_exit();
out_irq: out_irq:
zpci_mem_exit(); zpci_mem_exit();
......
...@@ -47,11 +47,6 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev) ...@@ -47,11 +47,6 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
rc = zpci_enable_device(zdev); rc = zpci_enable_device(zdev);
if (rc) if (rc)
return rc; return rc;
rc = zpci_dma_init_device(zdev);
if (rc) {
zpci_disable_device(zdev);
return rc;
}
} }
if (!zdev->has_resources) { if (!zdev->has_resources) {
......
...@@ -53,9 +53,11 @@ static char *pci_fmt3_names[] = { ...@@ -53,9 +53,11 @@ static char *pci_fmt3_names[] = {
}; };
static char *pci_sw_names[] = { static char *pci_sw_names[] = {
"Allocated pages",
"Mapped pages", "Mapped pages",
"Unmapped pages", "Unmapped pages",
"Global RPCITs",
"Sync Map RPCITs",
"Sync RPCITs",
}; };
static void pci_fmb_show(struct seq_file *m, char *name[], int length, static void pci_fmb_show(struct seq_file *m, char *name[], int length,
...@@ -69,10 +71,14 @@ static void pci_fmb_show(struct seq_file *m, char *name[], int length, ...@@ -69,10 +71,14 @@ static void pci_fmb_show(struct seq_file *m, char *name[], int length,
static void pci_sw_counter_show(struct seq_file *m) static void pci_sw_counter_show(struct seq_file *m)
{ {
struct zpci_dev *zdev = m->private; struct zpci_iommu_ctrs *ctrs = zpci_get_iommu_ctrs(m->private);
atomic64_t *counter = &zdev->allocated_pages; atomic64_t *counter;
int i; int i;
if (!ctrs)
return;
counter = &ctrs->mapped_pages;
for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++) for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i], seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
atomic64_read(counter)); atomic64_read(counter));
......
This diff is collapsed.
...@@ -59,9 +59,16 @@ static inline bool ers_result_indicates_abort(pci_ers_result_t ers_res) ...@@ -59,9 +59,16 @@ static inline bool ers_result_indicates_abort(pci_ers_result_t ers_res)
} }
} }
static bool is_passed_through(struct zpci_dev *zdev) static bool is_passed_through(struct pci_dev *pdev)
{ {
return zdev->s390_domain; struct zpci_dev *zdev = to_zpci(pdev);
bool ret;
mutex_lock(&zdev->kzdev_lock);
ret = !!zdev->kzdev;
mutex_unlock(&zdev->kzdev_lock);
return ret;
} }
static bool is_driver_supported(struct pci_driver *driver) static bool is_driver_supported(struct pci_driver *driver)
...@@ -176,7 +183,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev) ...@@ -176,7 +183,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
} }
pdev->error_state = pci_channel_io_frozen; pdev->error_state = pci_channel_io_frozen;
if (is_passed_through(to_zpci(pdev))) { if (is_passed_through(pdev)) {
pr_info("%s: Cannot be recovered in the host because it is a pass-through device\n", pr_info("%s: Cannot be recovered in the host because it is a pass-through device\n",
pci_name(pdev)); pci_name(pdev));
goto out_unlock; goto out_unlock;
...@@ -239,7 +246,7 @@ static void zpci_event_io_failure(struct pci_dev *pdev, pci_channel_state_t es) ...@@ -239,7 +246,7 @@ static void zpci_event_io_failure(struct pci_dev *pdev, pci_channel_state_t es)
* we will inject the error event and let the guest recover the device * we will inject the error event and let the guest recover the device
* itself. * itself.
*/ */
if (is_passed_through(to_zpci(pdev))) if (is_passed_through(pdev))
goto out; goto out;
driver = to_pci_driver(pdev->dev.driver); driver = to_pci_driver(pdev->dev.driver);
if (driver && driver->err_handler && driver->err_handler->error_detected) if (driver && driver->err_handler && driver->err_handler->error_detected)
...@@ -306,8 +313,6 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh) ...@@ -306,8 +313,6 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
/* Even though the device is already gone we still /* Even though the device is already gone we still
* need to free zPCI resources as part of the disable. * need to free zPCI resources as part of the disable.
*/ */
if (zdev->dma_table)
zpci_dma_exit_device(zdev);
if (zdev_enabled(zdev)) if (zdev_enabled(zdev))
zpci_disable_device(zdev); zpci_disable_device(zdev);
zdev->state = ZPCI_FN_STATE_STANDBY; zdev->state = ZPCI_FN_STATE_STANDBY;
......
...@@ -56,6 +56,7 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr, ...@@ -56,6 +56,7 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct zpci_dev *zdev = to_zpci(pdev); struct zpci_dev *zdev = to_zpci(pdev);
int ret = 0; int ret = 0;
u8 status;
/* Can't use device_remove_self() here as that would lead us to lock /* Can't use device_remove_self() here as that would lead us to lock
* the pci_rescan_remove_lock while holding the device' kernfs lock. * the pci_rescan_remove_lock while holding the device' kernfs lock.
...@@ -82,12 +83,6 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr, ...@@ -82,12 +83,6 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
pci_lock_rescan_remove(); pci_lock_rescan_remove();
if (pci_dev_is_added(pdev)) { if (pci_dev_is_added(pdev)) {
pci_stop_and_remove_bus_device(pdev); pci_stop_and_remove_bus_device(pdev);
if (zdev->dma_table) {
ret = zpci_dma_exit_device(zdev);
if (ret)
goto out;
}
if (zdev_enabled(zdev)) { if (zdev_enabled(zdev)) {
ret = zpci_disable_device(zdev); ret = zpci_disable_device(zdev);
/* /*
...@@ -105,14 +100,16 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr, ...@@ -105,14 +100,16 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
ret = zpci_enable_device(zdev); ret = zpci_enable_device(zdev);
if (ret) if (ret)
goto out; goto out;
ret = zpci_dma_init_device(zdev);
if (ret) { if (zdev->dma_table) {
zpci_disable_device(zdev); ret = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
goto out; virt_to_phys(zdev->dma_table), &status);
if (ret)
zpci_disable_device(zdev);
} }
pci_rescan_bus(zdev->zbus->bus);
} }
out: out:
pci_rescan_bus(zdev->zbus->bus);
pci_unlock_rescan_remove(); pci_unlock_rescan_remove();
if (kn) if (kn)
sysfs_unbreak_active_protection(kn); sysfs_unbreak_active_protection(kn);
......
...@@ -91,7 +91,7 @@ config IOMMU_DEBUGFS ...@@ -91,7 +91,7 @@ config IOMMU_DEBUGFS
choice choice
prompt "IOMMU default domain type" prompt "IOMMU default domain type"
depends on IOMMU_API depends on IOMMU_API
default IOMMU_DEFAULT_DMA_LAZY if X86 || IA64 default IOMMU_DEFAULT_DMA_LAZY if X86 || IA64 || S390
default IOMMU_DEFAULT_DMA_STRICT default IOMMU_DEFAULT_DMA_STRICT
help help
Choose the type of IOMMU domain used to manage DMA API usage by Choose the type of IOMMU domain used to manage DMA API usage by
...@@ -146,7 +146,7 @@ config OF_IOMMU ...@@ -146,7 +146,7 @@ config OF_IOMMU
# IOMMU-agnostic DMA-mapping layer # IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA config IOMMU_DMA
def_bool ARM64 || IA64 || X86 def_bool ARM64 || IA64 || X86 || S390
select DMA_OPS select DMA_OPS
select IOMMU_API select IOMMU_API
select IOMMU_IOVA select IOMMU_IOVA
...@@ -236,17 +236,6 @@ config SUN50I_IOMMU ...@@ -236,17 +236,6 @@ config SUN50I_IOMMU
help help
Support for the IOMMU introduced in the Allwinner H6 SoCs. Support for the IOMMU introduced in the Allwinner H6 SoCs.
config TEGRA_IOMMU_GART
bool "Tegra GART IOMMU Support"
depends on ARCH_TEGRA_2x_SOC
depends on TEGRA_MC
select IOMMU_API
help
Enables support for remapping discontiguous physical memory
shared with the operating system into contiguous I/O virtual
space through the GART (Graphics Address Relocation Table)
hardware included on Tegra SoCs.
config TEGRA_IOMMU_SMMU config TEGRA_IOMMU_SMMU
bool "NVIDIA Tegra SMMU Support" bool "NVIDIA Tegra SMMU Support"
depends on ARCH_TEGRA depends on ARCH_TEGRA
......
...@@ -20,7 +20,6 @@ obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o ...@@ -20,7 +20,6 @@ obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
obj-$(CONFIG_SUN50I_IOMMU) += sun50i-iommu.o obj-$(CONFIG_SUN50I_IOMMU) += sun50i-iommu.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
......
...@@ -22,15 +22,6 @@ config AMD_IOMMU ...@@ -22,15 +22,6 @@ config AMD_IOMMU
your BIOS for an option to enable it or if you have an IVRS ACPI your BIOS for an option to enable it or if you have an IVRS ACPI
table. table.
config AMD_IOMMU_V2
tristate "AMD IOMMU Version 2 driver"
depends on AMD_IOMMU
select MMU_NOTIFIER
help
This option enables support for the AMD IOMMUv2 features of the IOMMU
hardware. Select this option if you want to use devices that support
the PCI PRI and PASID interface.
config AMD_IOMMU_DEBUGFS config AMD_IOMMU_DEBUGFS
bool "Enable AMD IOMMU internals in DebugFS" bool "Enable AMD IOMMU internals in DebugFS"
depends on AMD_IOMMU && IOMMU_DEBUGFS depends on AMD_IOMMU && IOMMU_DEBUGFS
......
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o
...@@ -38,9 +38,6 @@ extern int amd_iommu_guest_ir; ...@@ -38,9 +38,6 @@ extern int amd_iommu_guest_ir;
extern enum io_pgtable_fmt amd_iommu_pgtable; extern enum io_pgtable_fmt amd_iommu_pgtable;
extern int amd_iommu_gpt_level; extern int amd_iommu_gpt_level;
/* IOMMUv2 specific functions */
struct iommu_domain;
bool amd_iommu_v2_supported(void); bool amd_iommu_v2_supported(void);
struct amd_iommu *get_amd_iommu(unsigned int idx); struct amd_iommu *get_amd_iommu(unsigned int idx);
u8 amd_iommu_pc_get_max_banks(unsigned int idx); u8 amd_iommu_pc_get_max_banks(unsigned int idx);
...@@ -51,10 +48,10 @@ int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, ...@@ -51,10 +48,10 @@ int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value); u8 fxn, u64 *value);
int amd_iommu_register_ppr_notifier(struct notifier_block *nb); /* Device capabilities */
int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb); int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
void amd_iommu_domain_direct_map(struct iommu_domain *dom); void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address); int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address);
void amd_iommu_update_and_flush_device_table(struct protection_domain *domain); void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
void amd_iommu_domain_update(struct protection_domain *domain); void amd_iommu_domain_update(struct protection_domain *domain);
...@@ -87,9 +84,25 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev) ...@@ -87,9 +84,25 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev)
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU); (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
} }
static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask) static inline bool check_feature(u64 mask)
{
return (amd_iommu_efr & mask);
}
static inline bool check_feature2(u64 mask)
{
return (amd_iommu_efr2 & mask);
}
static inline int check_feature_gpt_level(void)
{
return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
}
static inline bool amd_iommu_gt_ppr_supported(void)
{ {
return !!(iommu->features & mask); return (check_feature(FEATURE_GT) &&
check_feature(FEATURE_PPR));
} }
static inline u64 iommu_virt_to_phys(void *vaddr) static inline u64 iommu_virt_to_phys(void *vaddr)
...@@ -105,7 +118,6 @@ static inline void *iommu_phys_to_virt(unsigned long paddr) ...@@ -105,7 +118,6 @@ static inline void *iommu_phys_to_virt(unsigned long paddr)
static inline static inline
void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root) void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
{ {
atomic64_set(&domain->iop.pt_root, root);
domain->iop.root = (u64 *)(root & PAGE_MASK); domain->iop.root = (u64 *)(root & PAGE_MASK);
domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */ domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */
} }
...@@ -146,8 +158,5 @@ void amd_iommu_domain_set_pgtable(struct protection_domain *domain, ...@@ -146,8 +158,5 @@ void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode); u64 *root, int mode);
struct dev_table_entry *get_dev_table(struct amd_iommu *iommu); struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
extern u64 amd_iommu_efr;
extern u64 amd_iommu_efr2;
extern bool amd_iommu_snp_en; extern bool amd_iommu_snp_en;
#endif #endif
...@@ -451,6 +451,10 @@ ...@@ -451,6 +451,10 @@
#define PD_IOMMUV2_MASK BIT(3) /* domain has gcr3 table */ #define PD_IOMMUV2_MASK BIT(3) /* domain has gcr3 table */
#define PD_GIOV_MASK BIT(4) /* domain enable GIOV support */ #define PD_GIOV_MASK BIT(4) /* domain enable GIOV support */
/* Timeout stuff */
#define LOOP_TIMEOUT 100000
#define MMIO_STATUS_TIMEOUT 2000000
extern bool amd_iommu_dump; extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \ #define DUMP_printk(format, arg...) \
do { \ do { \
...@@ -505,19 +509,6 @@ extern struct kmem_cache *amd_iommu_irq_cache; ...@@ -505,19 +509,6 @@ extern struct kmem_cache *amd_iommu_irq_cache;
#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
/*
* This struct is used to pass information about
* incoming PPR faults around.
*/
struct amd_iommu_fault {
u64 address; /* IO virtual address of the fault*/
u32 pasid; /* Address space identifier */
u32 sbdf; /* Originating PCI device id */
u16 tag; /* PPR tag */
u16 flags; /* Fault flags */
};
struct amd_iommu; struct amd_iommu;
struct iommu_domain; struct iommu_domain;
...@@ -544,7 +535,6 @@ struct amd_io_pgtable { ...@@ -544,7 +535,6 @@ struct amd_io_pgtable {
struct io_pgtable iop; struct io_pgtable iop;
int mode; int mode;
u64 *root; u64 *root;
atomic64_t pt_root; /* pgtable root and pgtable mode */
u64 *pgd; /* v2 pgtable pgd pointer */ u64 *pgd; /* v2 pgtable pgd pointer */
}; };
...@@ -676,9 +666,6 @@ struct amd_iommu { ...@@ -676,9 +666,6 @@ struct amd_iommu {
/* Extended features 2 */ /* Extended features 2 */
u64 features2; u64 features2;
/* IOMMUv2 */
bool is_iommu_v2;
/* PCI device id of the IOMMU device */ /* PCI device id of the IOMMU device */
u16 devid; u16 devid;
...@@ -799,6 +786,14 @@ struct devid_map { ...@@ -799,6 +786,14 @@ struct devid_map {
bool cmd_line; bool cmd_line;
}; };
#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */
#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */
#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */
/* Device may request execution on memory pages */
#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8
/* Device may request super-user privileges */
#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10
/* /*
* This struct contains device specific data for the IOMMU * This struct contains device specific data for the IOMMU
*/ */
...@@ -811,13 +806,15 @@ struct iommu_dev_data { ...@@ -811,13 +806,15 @@ struct iommu_dev_data {
struct protection_domain *domain; /* Domain the device is bound to */ struct protection_domain *domain; /* Domain the device is bound to */
struct device *dev; struct device *dev;
u16 devid; /* PCI Device ID */ u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
struct { u32 flags; /* Holds AMD_IOMMU_DEVICE_FLAG_<*> */
bool enabled; int ats_qdep;
int qdep; u8 ats_enabled :1; /* ATS state */
} ats; /* ATS state */ u8 pri_enabled :1; /* PRI state */
bool pri_tlp; /* PASID TLB required for u8 pasid_enabled:1; /* PASID state */
u8 pri_tlp :1; /* PASID TLB required for
PPR completions */ PPR completions */
u8 ppr :1; /* Enable device PPR support */
bool use_vapic; /* Enable device to use vapic mode */ bool use_vapic; /* Enable device to use vapic mode */
bool defer_attach; bool defer_attach;
...@@ -884,16 +881,15 @@ extern unsigned amd_iommu_aperture_order; ...@@ -884,16 +881,15 @@ extern unsigned amd_iommu_aperture_order;
/* allocation bitmap for domain ids */ /* allocation bitmap for domain ids */
extern unsigned long *amd_iommu_pd_alloc_bitmap; extern unsigned long *amd_iommu_pd_alloc_bitmap;
/* Smallest max PASID supported by any IOMMU in the system */
extern u32 amd_iommu_max_pasid;
extern bool amd_iommu_v2_present;
extern bool amd_iommu_force_isolation; extern bool amd_iommu_force_isolation;
/* Max levels of glxval supported */ /* Max levels of glxval supported */
extern int amd_iommu_max_glx_val; extern int amd_iommu_max_glx_val;
/* Global EFR and EFR2 registers */
extern u64 amd_iommu_efr;
extern u64 amd_iommu_efr2;
/* /*
* This function flushes all internal caches of * This function flushes all internal caches of
* the IOMMU used by this driver. * the IOMMU used by this driver.
......
...@@ -83,8 +83,6 @@ ...@@ -83,8 +83,6 @@
#define ACPI_DEVFLAG_LINT1 0x80 #define ACPI_DEVFLAG_LINT1 0x80
#define ACPI_DEVFLAG_ATSDIS 0x10000000 #define ACPI_DEVFLAG_ATSDIS 0x10000000
#define LOOP_TIMEOUT 2000000
#define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \ #define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
| ((dev & 0x1f) << 3) | (fn & 0x7)) | ((dev & 0x1f) << 3) | (fn & 0x7))
...@@ -187,9 +185,6 @@ static int amd_iommus_present; ...@@ -187,9 +185,6 @@ static int amd_iommus_present;
bool amd_iommu_np_cache __read_mostly; bool amd_iommu_np_cache __read_mostly;
bool amd_iommu_iotlb_sup __read_mostly = true; bool amd_iommu_iotlb_sup __read_mostly = true;
u32 amd_iommu_max_pasid __read_mostly = ~0;
bool amd_iommu_v2_present __read_mostly;
static bool amd_iommu_pc_present __read_mostly; static bool amd_iommu_pc_present __read_mostly;
bool amdr_ivrs_remap_support __read_mostly; bool amdr_ivrs_remap_support __read_mostly;
...@@ -272,7 +267,7 @@ int amd_iommu_get_num_iommus(void) ...@@ -272,7 +267,7 @@ int amd_iommu_get_num_iommus(void)
* Iterate through all the IOMMUs to get common EFR * Iterate through all the IOMMUs to get common EFR
* masks among all IOMMUs and warn if found inconsistency. * masks among all IOMMUs and warn if found inconsistency.
*/ */
static void get_global_efr(void) static __init void get_global_efr(void)
{ {
struct amd_iommu *iommu; struct amd_iommu *iommu;
...@@ -304,16 +299,6 @@ static void get_global_efr(void) ...@@ -304,16 +299,6 @@ static void get_global_efr(void)
pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2); pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
} }
static bool check_feature_on_all_iommus(u64 mask)
{
return !!(amd_iommu_efr & mask);
}
static inline int check_feature_gpt_level(void)
{
return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
}
/* /*
* For IVHD type 0x11/0x40, EFR is also available via IVHD. * For IVHD type 0x11/0x40, EFR is also available via IVHD.
* Default to IVHD EFR since it is available sooner * Default to IVHD EFR since it is available sooner
...@@ -399,7 +384,7 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu) ...@@ -399,7 +384,7 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu)
u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem); u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
u64 entry = start & PM_ADDR_MASK; u64 entry = start & PM_ADDR_MASK;
if (!check_feature_on_all_iommus(FEATURE_SNP)) if (!check_feature(FEATURE_SNP))
return; return;
/* Note: /* Note:
...@@ -869,7 +854,7 @@ static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, ...@@ -869,7 +854,7 @@ static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
void *buf = (void *)__get_free_pages(gfp, order); void *buf = (void *)__get_free_pages(gfp, order);
if (buf && if (buf &&
check_feature_on_all_iommus(FEATURE_SNP) && check_feature(FEATURE_SNP) &&
set_memory_4k((unsigned long)buf, (1 << order))) { set_memory_4k((unsigned long)buf, (1 << order))) {
free_pages((unsigned long)buf, order); free_pages((unsigned long)buf, order);
buf = NULL; buf = NULL;
...@@ -985,14 +970,14 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu) ...@@ -985,14 +970,14 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
iommu_feature_enable(iommu, CONTROL_GAINT_EN); iommu_feature_enable(iommu, CONTROL_GAINT_EN);
iommu_feature_enable(iommu, CONTROL_GALOG_EN); iommu_feature_enable(iommu, CONTROL_GALOG_EN);
for (i = 0; i < LOOP_TIMEOUT; ++i) { for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) {
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
if (status & (MMIO_STATUS_GALOG_RUN_MASK)) if (status & (MMIO_STATUS_GALOG_RUN_MASK))
break; break;
udelay(10); udelay(10);
} }
if (WARN_ON(i >= LOOP_TIMEOUT)) if (WARN_ON(i >= MMIO_STATUS_TIMEOUT))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -1048,7 +1033,7 @@ static void iommu_enable_xt(struct amd_iommu *iommu) ...@@ -1048,7 +1033,7 @@ static void iommu_enable_xt(struct amd_iommu *iommu)
static void iommu_enable_gt(struct amd_iommu *iommu) static void iommu_enable_gt(struct amd_iommu *iommu)
{ {
if (!iommu_feature(iommu, FEATURE_GT)) if (!check_feature(FEATURE_GT))
return; return;
iommu_feature_enable(iommu, CONTROL_GT_EN); iommu_feature_enable(iommu, CONTROL_GT_EN);
...@@ -1987,7 +1972,7 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu) ...@@ -1987,7 +1972,7 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
u64 val; u64 val;
struct pci_dev *pdev = iommu->dev; struct pci_dev *pdev = iommu->dev;
if (!iommu_feature(iommu, FEATURE_PC)) if (!check_feature(FEATURE_PC))
return; return;
amd_iommu_pc_present = true; amd_iommu_pc_present = true;
...@@ -2014,8 +1999,7 @@ static ssize_t amd_iommu_show_features(struct device *dev, ...@@ -2014,8 +1999,7 @@ static ssize_t amd_iommu_show_features(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct amd_iommu *iommu = dev_to_amd_iommu(dev); return sysfs_emit(buf, "%llx:%llx\n", amd_iommu_efr, amd_iommu_efr2);
return sysfs_emit(buf, "%llx:%llx\n", iommu->features2, iommu->features);
} }
static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
...@@ -2051,9 +2035,9 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu) ...@@ -2051,9 +2035,9 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2); features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
if (!iommu->features) { if (!amd_iommu_efr) {
iommu->features = features; amd_iommu_efr = features;
iommu->features2 = features2; amd_iommu_efr2 = features2;
return; return;
} }
...@@ -2061,12 +2045,12 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu) ...@@ -2061,12 +2045,12 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
* Sanity check and warn if EFR values from * Sanity check and warn if EFR values from
* IVHD and MMIO conflict. * IVHD and MMIO conflict.
*/ */
if (features != iommu->features || if (features != amd_iommu_efr ||
features2 != iommu->features2) { features2 != amd_iommu_efr2) {
pr_warn(FW_WARN pr_warn(FW_WARN
"EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n", "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
features, iommu->features, features, amd_iommu_efr,
features2, iommu->features2); features2, amd_iommu_efr2);
} }
} }
...@@ -2092,20 +2076,17 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) ...@@ -2092,20 +2076,17 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
late_iommu_features_init(iommu); late_iommu_features_init(iommu);
if (iommu_feature(iommu, FEATURE_GT)) { if (check_feature(FEATURE_GT)) {
int glxval; int glxval;
u32 max_pasid;
u64 pasmax; u64 pasmax;
pasmax = iommu->features & FEATURE_PASID_MASK; pasmax = amd_iommu_efr & FEATURE_PASID_MASK;
pasmax >>= FEATURE_PASID_SHIFT; pasmax >>= FEATURE_PASID_SHIFT;
max_pasid = (1 << (pasmax + 1)) - 1; iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1;
amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK);
BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); glxval = amd_iommu_efr & FEATURE_GLXVAL_MASK;
glxval = iommu->features & FEATURE_GLXVAL_MASK;
glxval >>= FEATURE_GLXVAL_SHIFT; glxval >>= FEATURE_GLXVAL_SHIFT;
if (amd_iommu_max_glx_val == -1) if (amd_iommu_max_glx_val == -1)
...@@ -2114,13 +2095,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) ...@@ -2114,13 +2095,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
} }
if (iommu_feature(iommu, FEATURE_GT) && if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu))
iommu_feature(iommu, FEATURE_PPR)) {
iommu->is_iommu_v2 = true;
amd_iommu_v2_present = true;
}
if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
return -ENOMEM; return -ENOMEM;
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) { if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
...@@ -2132,13 +2107,10 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) ...@@ -2132,13 +2107,10 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
init_iommu_perf_ctr(iommu); init_iommu_perf_ctr(iommu);
if (amd_iommu_pgtable == AMD_IOMMU_V2) { if (amd_iommu_pgtable == AMD_IOMMU_V2) {
if (!iommu_feature(iommu, FEATURE_GIOSUP) || if (!check_feature(FEATURE_GIOSUP) ||
!iommu_feature(iommu, FEATURE_GT)) { !check_feature(FEATURE_GT)) {
pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
amd_iommu_pgtable = AMD_IOMMU_V1; amd_iommu_pgtable = AMD_IOMMU_V1;
} else if (iommu_default_passthrough()) {
pr_warn("V2 page table doesn't support passthrough mode. Fallback to v1.\n");
amd_iommu_pgtable = AMD_IOMMU_V1;
} }
} }
...@@ -2186,35 +2158,29 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) ...@@ -2186,35 +2158,29 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
static void print_iommu_info(void) static void print_iommu_info(void)
{ {
int i;
static const char * const feat_str[] = { static const char * const feat_str[] = {
"PreF", "PPR", "X2APIC", "NX", "GT", "[5]", "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
"IA", "GA", "HE", "PC" "IA", "GA", "HE", "PC"
}; };
struct amd_iommu *iommu;
for_each_iommu(iommu) {
struct pci_dev *pdev = iommu->dev;
int i;
pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr); if (amd_iommu_efr) {
pr_info("Extended features (%#llx, %#llx):", amd_iommu_efr, amd_iommu_efr2);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) { for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2); if (check_feature(1ULL << i))
pr_cont(" %s", feat_str[i]);
for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { }
if (iommu_feature(iommu, (1ULL << i)))
pr_cont(" %s", feat_str[i]);
}
if (iommu->features & FEATURE_GAM_VAPIC) if (check_feature(FEATURE_GAM_VAPIC))
pr_cont(" GA_vAPIC"); pr_cont(" GA_vAPIC");
if (iommu->features & FEATURE_SNP) if (check_feature(FEATURE_SNP))
pr_cont(" SNP"); pr_cont(" SNP");
pr_cont("\n"); pr_cont("\n");
}
} }
if (irq_remapping_enabled) { if (irq_remapping_enabled) {
pr_info("Interrupt remapping enabled\n"); pr_info("Interrupt remapping enabled\n");
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
...@@ -2900,19 +2866,19 @@ static void enable_iommus_vapic(void) ...@@ -2900,19 +2866,19 @@ static void enable_iommus_vapic(void)
* Need to set and poll check the GALOGRun bit to zero before * Need to set and poll check the GALOGRun bit to zero before
* we can set/ modify GA Log registers safely. * we can set/ modify GA Log registers safely.
*/ */
for (i = 0; i < LOOP_TIMEOUT; ++i) { for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) {
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
if (!(status & MMIO_STATUS_GALOG_RUN_MASK)) if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
break; break;
udelay(10); udelay(10);
} }
if (WARN_ON(i >= LOOP_TIMEOUT)) if (WARN_ON(i >= MMIO_STATUS_TIMEOUT))
return; return;
} }
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
!check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) { !check_feature(FEATURE_GAM_VAPIC)) {
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
return; return;
} }
...@@ -3698,9 +3664,8 @@ bool amd_iommu_v2_supported(void) ...@@ -3698,9 +3664,8 @@ bool amd_iommu_v2_supported(void)
* (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
* setting up IOMMUv1 page table. * setting up IOMMUv1 page table.
*/ */
return amd_iommu_v2_present && !amd_iommu_snp_en; return amd_iommu_gt_ppr_supported() && !amd_iommu_snp_en;
} }
EXPORT_SYMBOL(amd_iommu_v2_supported);
struct amd_iommu *get_amd_iommu(unsigned int idx) struct amd_iommu *get_amd_iommu(unsigned int idx)
{ {
...@@ -3824,7 +3789,7 @@ int amd_iommu_snp_enable(void) ...@@ -3824,7 +3789,7 @@ int amd_iommu_snp_enable(void)
return -EINVAL; return -EINVAL;
} }
amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP); amd_iommu_snp_en = check_feature(FEATURE_SNP);
if (!amd_iommu_snp_en) if (!amd_iommu_snp_en)
return -EINVAL; return -EINVAL;
......
...@@ -363,10 +363,10 @@ static void v2_free_pgtable(struct io_pgtable *iop) ...@@ -363,10 +363,10 @@ static void v2_free_pgtable(struct io_pgtable *iop)
if (!(pdom->flags & PD_IOMMUV2_MASK)) if (!(pdom->flags & PD_IOMMUV2_MASK))
return; return;
/* /* Clear gcr3 entry */
* Make changes visible to IOMMUs. No need to clear gcr3 entry amd_iommu_domain_clear_gcr3(&pdom->domain, 0);
* as gcr3 table is already freed.
*/ /* Make changes visible to IOMMUs */
amd_iommu_domain_update(pdom); amd_iommu_domain_update(pdom);
/* Free page table */ /* Free page table */
......
This diff is collapsed.
This diff is collapsed.
...@@ -196,7 +196,6 @@ struct apple_dart_hw { ...@@ -196,7 +196,6 @@ struct apple_dart_hw {
* @lock: lock for hardware operations involving this dart * @lock: lock for hardware operations involving this dart
* @pgsize: pagesize supported by this DART * @pgsize: pagesize supported by this DART
* @supports_bypass: indicates if this DART supports bypass mode * @supports_bypass: indicates if this DART supports bypass mode
* @force_bypass: force bypass mode due to pagesize mismatch?
* @sid2group: maps stream ids to iommu_groups * @sid2group: maps stream ids to iommu_groups
* @iommu: iommu core device * @iommu: iommu core device
*/ */
...@@ -217,7 +216,6 @@ struct apple_dart { ...@@ -217,7 +216,6 @@ struct apple_dart {
u32 pgsize; u32 pgsize;
u32 num_streams; u32 num_streams;
u32 supports_bypass : 1; u32 supports_bypass : 1;
u32 force_bypass : 1;
struct iommu_group *sid2group[DART_MAX_STREAMS]; struct iommu_group *sid2group[DART_MAX_STREAMS];
struct iommu_device iommu; struct iommu_device iommu;
...@@ -506,10 +504,11 @@ static void apple_dart_iotlb_sync(struct iommu_domain *domain, ...@@ -506,10 +504,11 @@ static void apple_dart_iotlb_sync(struct iommu_domain *domain,
apple_dart_domain_flush_tlb(to_dart_domain(domain)); apple_dart_domain_flush_tlb(to_dart_domain(domain));
} }
static void apple_dart_iotlb_sync_map(struct iommu_domain *domain, static int apple_dart_iotlb_sync_map(struct iommu_domain *domain,
unsigned long iova, size_t size) unsigned long iova, size_t size)
{ {
apple_dart_domain_flush_tlb(to_dart_domain(domain)); apple_dart_domain_flush_tlb(to_dart_domain(domain));
return 0;
} }
static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain, static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
...@@ -568,15 +567,17 @@ apple_dart_setup_translation(struct apple_dart_domain *domain, ...@@ -568,15 +567,17 @@ apple_dart_setup_translation(struct apple_dart_domain *domain,
stream_map->dart->hw->invalidate_tlb(stream_map); stream_map->dart->hw->invalidate_tlb(stream_map);
} }
static int apple_dart_finalize_domain(struct iommu_domain *domain, static int apple_dart_finalize_domain(struct apple_dart_domain *dart_domain,
struct apple_dart_master_cfg *cfg) struct apple_dart_master_cfg *cfg)
{ {
struct apple_dart_domain *dart_domain = to_dart_domain(domain);
struct apple_dart *dart = cfg->stream_maps[0].dart; struct apple_dart *dart = cfg->stream_maps[0].dart;
struct io_pgtable_cfg pgtbl_cfg; struct io_pgtable_cfg pgtbl_cfg;
int ret = 0; int ret = 0;
int i, j; int i, j;
if (dart->pgsize > PAGE_SIZE)
return -EINVAL;
mutex_lock(&dart_domain->init_lock); mutex_lock(&dart_domain->init_lock);
if (dart_domain->finalized) if (dart_domain->finalized)
...@@ -597,17 +598,18 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain, ...@@ -597,17 +598,18 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
.iommu_dev = dart->dev, .iommu_dev = dart->dev,
}; };
dart_domain->pgtbl_ops = dart_domain->pgtbl_ops = alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg,
alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg, domain); &dart_domain->domain);
if (!dart_domain->pgtbl_ops) { if (!dart_domain->pgtbl_ops) {
ret = -ENOMEM; ret = -ENOMEM;
goto done; goto done;
} }
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; dart_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
domain->geometry.aperture_start = 0; dart_domain->domain.geometry.aperture_start = 0;
domain->geometry.aperture_end = (dma_addr_t)DMA_BIT_MASK(dart->ias); dart_domain->domain.geometry.aperture_end =
domain->geometry.force_aperture = true; (dma_addr_t)DMA_BIT_MASK(dart->ias);
dart_domain->domain.geometry.force_aperture = true;
dart_domain->finalized = true; dart_domain->finalized = true;
...@@ -651,47 +653,72 @@ static int apple_dart_domain_add_streams(struct apple_dart_domain *domain, ...@@ -651,47 +653,72 @@ static int apple_dart_domain_add_streams(struct apple_dart_domain *domain,
true); true);
} }
static int apple_dart_attach_dev(struct iommu_domain *domain, static int apple_dart_attach_dev_paging(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
int ret, i; int ret, i;
struct apple_dart_stream_map *stream_map; struct apple_dart_stream_map *stream_map;
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
struct apple_dart_domain *dart_domain = to_dart_domain(domain); struct apple_dart_domain *dart_domain = to_dart_domain(domain);
if (cfg->stream_maps[0].dart->force_bypass && ret = apple_dart_finalize_domain(dart_domain, cfg);
domain->type != IOMMU_DOMAIN_IDENTITY) if (ret)
return -EINVAL; return ret;
if (!cfg->stream_maps[0].dart->supports_bypass &&
domain->type == IOMMU_DOMAIN_IDENTITY)
return -EINVAL;
ret = apple_dart_finalize_domain(domain, cfg); ret = apple_dart_domain_add_streams(dart_domain, cfg);
if (ret) if (ret)
return ret; return ret;
switch (domain->type) { for_each_stream_map(i, cfg, stream_map)
default: apple_dart_setup_translation(dart_domain, stream_map);
ret = apple_dart_domain_add_streams(dart_domain, cfg); return 0;
if (ret) }
return ret;
for_each_stream_map(i, cfg, stream_map) static int apple_dart_attach_dev_identity(struct iommu_domain *domain,
apple_dart_setup_translation(dart_domain, stream_map); struct device *dev)
break; {
case IOMMU_DOMAIN_BLOCKED: struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
for_each_stream_map(i, cfg, stream_map) struct apple_dart_stream_map *stream_map;
apple_dart_hw_disable_dma(stream_map); int i;
break;
case IOMMU_DOMAIN_IDENTITY:
for_each_stream_map(i, cfg, stream_map)
apple_dart_hw_enable_bypass(stream_map);
break;
}
return ret; if (!cfg->stream_maps[0].dart->supports_bypass)
return -EINVAL;
for_each_stream_map(i, cfg, stream_map)
apple_dart_hw_enable_bypass(stream_map);
return 0;
} }
static const struct iommu_domain_ops apple_dart_identity_ops = {
.attach_dev = apple_dart_attach_dev_identity,
};
static struct iommu_domain apple_dart_identity_domain = {
.type = IOMMU_DOMAIN_IDENTITY,
.ops = &apple_dart_identity_ops,
};
static int apple_dart_attach_dev_blocked(struct iommu_domain *domain,
struct device *dev)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
struct apple_dart_stream_map *stream_map;
int i;
for_each_stream_map(i, cfg, stream_map)
apple_dart_hw_disable_dma(stream_map);
return 0;
}
static const struct iommu_domain_ops apple_dart_blocked_ops = {
.attach_dev = apple_dart_attach_dev_blocked,
};
static struct iommu_domain apple_dart_blocked_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &apple_dart_blocked_ops,
};
static struct iommu_device *apple_dart_probe_device(struct device *dev) static struct iommu_device *apple_dart_probe_device(struct device *dev)
{ {
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
...@@ -717,24 +744,26 @@ static void apple_dart_release_device(struct device *dev) ...@@ -717,24 +744,26 @@ static void apple_dart_release_device(struct device *dev)
kfree(cfg); kfree(cfg);
} }
static struct iommu_domain *apple_dart_domain_alloc(unsigned int type) static struct iommu_domain *apple_dart_domain_alloc_paging(struct device *dev)
{ {
struct apple_dart_domain *dart_domain; struct apple_dart_domain *dart_domain;
if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED &&
type != IOMMU_DOMAIN_IDENTITY && type != IOMMU_DOMAIN_BLOCKED)
return NULL;
dart_domain = kzalloc(sizeof(*dart_domain), GFP_KERNEL); dart_domain = kzalloc(sizeof(*dart_domain), GFP_KERNEL);
if (!dart_domain) if (!dart_domain)
return NULL; return NULL;
mutex_init(&dart_domain->init_lock); mutex_init(&dart_domain->init_lock);
/* no need to allocate pgtbl_ops or do any other finalization steps */ if (dev) {
if (type == IOMMU_DOMAIN_IDENTITY || type == IOMMU_DOMAIN_BLOCKED) struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
dart_domain->finalized = true; int ret;
ret = apple_dart_finalize_domain(dart_domain, cfg);
if (ret) {
kfree(dart_domain);
return ERR_PTR(ret);
}
}
return &dart_domain->domain; return &dart_domain->domain;
} }
...@@ -770,8 +799,6 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args) ...@@ -770,8 +799,6 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
if (cfg_dart) { if (cfg_dart) {
if (cfg_dart->supports_bypass != dart->supports_bypass) if (cfg_dart->supports_bypass != dart->supports_bypass)
return -EINVAL; return -EINVAL;
if (cfg_dart->force_bypass != dart->force_bypass)
return -EINVAL;
if (cfg_dart->pgsize != dart->pgsize) if (cfg_dart->pgsize != dart->pgsize)
return -EINVAL; return -EINVAL;
} }
...@@ -913,7 +940,7 @@ static int apple_dart_def_domain_type(struct device *dev) ...@@ -913,7 +940,7 @@ static int apple_dart_def_domain_type(struct device *dev)
{ {
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
if (cfg->stream_maps[0].dart->force_bypass) if (cfg->stream_maps[0].dart->pgsize > PAGE_SIZE)
return IOMMU_DOMAIN_IDENTITY; return IOMMU_DOMAIN_IDENTITY;
if (!cfg->stream_maps[0].dart->supports_bypass) if (!cfg->stream_maps[0].dart->supports_bypass)
return IOMMU_DOMAIN_DMA; return IOMMU_DOMAIN_DMA;
...@@ -947,7 +974,9 @@ static void apple_dart_get_resv_regions(struct device *dev, ...@@ -947,7 +974,9 @@ static void apple_dart_get_resv_regions(struct device *dev,
} }
static const struct iommu_ops apple_dart_iommu_ops = { static const struct iommu_ops apple_dart_iommu_ops = {
.domain_alloc = apple_dart_domain_alloc, .identity_domain = &apple_dart_identity_domain,
.blocked_domain = &apple_dart_blocked_domain,
.domain_alloc_paging = apple_dart_domain_alloc_paging,
.probe_device = apple_dart_probe_device, .probe_device = apple_dart_probe_device,
.release_device = apple_dart_release_device, .release_device = apple_dart_release_device,
.device_group = apple_dart_device_group, .device_group = apple_dart_device_group,
...@@ -957,7 +986,7 @@ static const struct iommu_ops apple_dart_iommu_ops = { ...@@ -957,7 +986,7 @@ static const struct iommu_ops apple_dart_iommu_ops = {
.pgsize_bitmap = -1UL, /* Restricted during dart probe */ .pgsize_bitmap = -1UL, /* Restricted during dart probe */
.owner = THIS_MODULE, .owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) { .default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = apple_dart_attach_dev, .attach_dev = apple_dart_attach_dev_paging,
.map_pages = apple_dart_map_pages, .map_pages = apple_dart_map_pages,
.unmap_pages = apple_dart_unmap_pages, .unmap_pages = apple_dart_unmap_pages,
.flush_iotlb_all = apple_dart_flush_iotlb_all, .flush_iotlb_all = apple_dart_flush_iotlb_all,
...@@ -1111,8 +1140,6 @@ static int apple_dart_probe(struct platform_device *pdev) ...@@ -1111,8 +1140,6 @@ static int apple_dart_probe(struct platform_device *pdev)
goto err_clk_disable; goto err_clk_disable;
} }
dart->force_bypass = dart->pgsize > PAGE_SIZE;
ret = apple_dart_hw_reset(dart); ret = apple_dart_hw_reset(dart);
if (ret) if (ret)
goto err_clk_disable; goto err_clk_disable;
...@@ -1136,7 +1163,8 @@ static int apple_dart_probe(struct platform_device *pdev) ...@@ -1136,7 +1163,8 @@ static int apple_dart_probe(struct platform_device *pdev)
dev_info( dev_info(
&pdev->dev, &pdev->dev,
"DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n", "DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n",
dart->pgsize, dart->num_streams, dart->supports_bypass, dart->force_bypass); dart->pgsize, dart->num_streams, dart->supports_bypass,
dart->pgsize > PAGE_SIZE);
return 0; return 0;
err_sysfs_remove: err_sysfs_remove:
......
...@@ -25,11 +25,9 @@ struct arm_smmu_mmu_notifier { ...@@ -25,11 +25,9 @@ struct arm_smmu_mmu_notifier {
#define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn) #define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
struct arm_smmu_bond { struct arm_smmu_bond {
struct iommu_sva sva;
struct mm_struct *mm; struct mm_struct *mm;
struct arm_smmu_mmu_notifier *smmu_mn; struct arm_smmu_mmu_notifier *smmu_mn;
struct list_head list; struct list_head list;
refcount_t refs;
}; };
#define sva_to_bond(handle) \ #define sva_to_bond(handle) \
...@@ -37,6 +35,25 @@ struct arm_smmu_bond { ...@@ -37,6 +35,25 @@ struct arm_smmu_bond {
static DEFINE_MUTEX(sva_lock); static DEFINE_MUTEX(sva_lock);
/*
* Write the CD to the CD tables for all masters that this domain is attached
* to. Note that this is only used to update existing CD entries in the target
* CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail.
*/
static void arm_smmu_update_ctx_desc_devices(struct arm_smmu_domain *smmu_domain,
int ssid,
struct arm_smmu_ctx_desc *cd)
{
struct arm_smmu_master *master;
unsigned long flags;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
arm_smmu_write_ctx_desc(master, ssid, cd);
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
}
/* /*
* Check if the CPU ASID is available on the SMMU side. If a private context * Check if the CPU ASID is available on the SMMU side. If a private context
* descriptor is using it, try to replace it. * descriptor is using it, try to replace it.
...@@ -62,7 +79,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid) ...@@ -62,7 +79,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
return cd; return cd;
} }
smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd); smmu_domain = container_of(cd, struct arm_smmu_domain, cd);
smmu = smmu_domain->smmu; smmu = smmu_domain->smmu;
ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd, ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
...@@ -80,7 +97,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid) ...@@ -80,7 +97,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
* be some overlap between use of both ASIDs, until we invalidate the * be some overlap between use of both ASIDs, until we invalidate the
* TLB. * TLB.
*/ */
arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd); arm_smmu_update_ctx_desc_devices(smmu_domain, IOMMU_NO_PASID, cd);
/* Invalidate TLB entries previously associated with that context */ /* Invalidate TLB entries previously associated with that context */
arm_smmu_tlb_inv_asid(smmu, asid); arm_smmu_tlb_inv_asid(smmu, asid);
...@@ -247,7 +264,7 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) ...@@ -247,7 +264,7 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
* DMA may still be running. Keep the cd valid to avoid C_BAD_CD events, * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
* but disable translation. * but disable translation.
*/ */
arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd); arm_smmu_update_ctx_desc_devices(smmu_domain, mm->pasid, &quiet_cd);
arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid); arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
...@@ -273,8 +290,10 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain, ...@@ -273,8 +290,10 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
struct mm_struct *mm) struct mm_struct *mm)
{ {
int ret; int ret;
unsigned long flags;
struct arm_smmu_ctx_desc *cd; struct arm_smmu_ctx_desc *cd;
struct arm_smmu_mmu_notifier *smmu_mn; struct arm_smmu_mmu_notifier *smmu_mn;
struct arm_smmu_master *master;
list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) { list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
if (smmu_mn->mn.mm == mm) { if (smmu_mn->mn.mm == mm) {
...@@ -304,7 +323,16 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain, ...@@ -304,7 +323,16 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
goto err_free_cd; goto err_free_cd;
} }
ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd); spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
ret = arm_smmu_write_ctx_desc(master, mm->pasid, cd);
if (ret) {
list_for_each_entry_from_reverse(master, &smmu_domain->devices, domain_head)
arm_smmu_write_ctx_desc(master, mm->pasid, NULL);
break;
}
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
if (ret) if (ret)
goto err_put_notifier; goto err_put_notifier;
...@@ -329,7 +357,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn) ...@@ -329,7 +357,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
return; return;
list_del(&smmu_mn->list); list_del(&smmu_mn->list);
arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL);
arm_smmu_update_ctx_desc_devices(smmu_domain, mm->pasid, NULL);
/* /*
* If we went through clear(), we've already invalidated, and no * If we went through clear(), we've already invalidated, and no
...@@ -345,8 +374,7 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn) ...@@ -345,8 +374,7 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
arm_smmu_free_shared_cd(cd); arm_smmu_free_shared_cd(cd);
} }
static struct iommu_sva * static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
__arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
{ {
int ret; int ret;
struct arm_smmu_bond *bond; struct arm_smmu_bond *bond;
...@@ -355,23 +383,13 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm) ...@@ -355,23 +383,13 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (!master || !master->sva_enabled) if (!master || !master->sva_enabled)
return ERR_PTR(-ENODEV); return -ENODEV;
/* If bind() was already called for this {dev, mm} pair, reuse it. */
list_for_each_entry(bond, &master->bonds, list) {
if (bond->mm == mm) {
refcount_inc(&bond->refs);
return &bond->sva;
}
}
bond = kzalloc(sizeof(*bond), GFP_KERNEL); bond = kzalloc(sizeof(*bond), GFP_KERNEL);
if (!bond) if (!bond)
return ERR_PTR(-ENOMEM); return -ENOMEM;
bond->mm = mm; bond->mm = mm;
bond->sva.dev = dev;
refcount_set(&bond->refs, 1);
bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm); bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
if (IS_ERR(bond->smmu_mn)) { if (IS_ERR(bond->smmu_mn)) {
...@@ -380,11 +398,11 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm) ...@@ -380,11 +398,11 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
} }
list_add(&bond->list, &master->bonds); list_add(&bond->list, &master->bonds);
return &bond->sva; return 0;
err_free_bond: err_free_bond:
kfree(bond); kfree(bond);
return ERR_PTR(ret); return ret;
} }
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
...@@ -550,7 +568,7 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, ...@@ -550,7 +568,7 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
} }
} }
if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) { if (!WARN_ON(!bond)) {
list_del(&bond->list); list_del(&bond->list);
arm_smmu_mmu_notifier_put(bond->smmu_mn); arm_smmu_mmu_notifier_put(bond->smmu_mn);
kfree(bond); kfree(bond);
...@@ -562,13 +580,10 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain, ...@@ -562,13 +580,10 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t id) struct device *dev, ioasid_t id)
{ {
int ret = 0; int ret = 0;
struct iommu_sva *handle;
struct mm_struct *mm = domain->mm; struct mm_struct *mm = domain->mm;
mutex_lock(&sva_lock); mutex_lock(&sva_lock);
handle = __arm_smmu_sva_bind(dev, mm); ret = __arm_smmu_sva_bind(dev, mm);
if (IS_ERR(handle))
ret = PTR_ERR(handle);
mutex_unlock(&sva_lock); mutex_unlock(&sva_lock);
return ret; return ret;
......
This diff is collapsed.
...@@ -595,13 +595,11 @@ struct arm_smmu_ctx_desc_cfg { ...@@ -595,13 +595,11 @@ struct arm_smmu_ctx_desc_cfg {
dma_addr_t cdtab_dma; dma_addr_t cdtab_dma;
struct arm_smmu_l1_ctx_desc *l1_desc; struct arm_smmu_l1_ctx_desc *l1_desc;
unsigned int num_l1_ents; unsigned int num_l1_ents;
};
struct arm_smmu_s1_cfg {
struct arm_smmu_ctx_desc_cfg cdcfg;
struct arm_smmu_ctx_desc cd;
u8 s1fmt; u8 s1fmt;
/* log2 of the maximum number of CDs supported by this table */
u8 s1cdmax; u8 s1cdmax;
/* Whether CD entries in this table have the stall bit set. */
u8 stall_enabled:1;
}; };
struct arm_smmu_s2_cfg { struct arm_smmu_s2_cfg {
...@@ -697,6 +695,8 @@ struct arm_smmu_master { ...@@ -697,6 +695,8 @@ struct arm_smmu_master {
struct arm_smmu_domain *domain; struct arm_smmu_domain *domain;
struct list_head domain_head; struct list_head domain_head;
struct arm_smmu_stream *streams; struct arm_smmu_stream *streams;
/* Locked by the iommu core using the group mutex */
struct arm_smmu_ctx_desc_cfg cd_table;
unsigned int num_streams; unsigned int num_streams;
bool ats_enabled; bool ats_enabled;
bool stall_enabled; bool stall_enabled;
...@@ -719,13 +719,12 @@ struct arm_smmu_domain { ...@@ -719,13 +719,12 @@ struct arm_smmu_domain {
struct mutex init_mutex; /* Protects smmu pointer */ struct mutex init_mutex; /* Protects smmu pointer */
struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_ops *pgtbl_ops;
bool stall_enabled;
atomic_t nr_ats_masters; atomic_t nr_ats_masters;
enum arm_smmu_domain_stage stage; enum arm_smmu_domain_stage stage;
union { union {
struct arm_smmu_s1_cfg s1_cfg; struct arm_smmu_ctx_desc cd;
struct arm_smmu_s2_cfg s2_cfg; struct arm_smmu_s2_cfg s2_cfg;
}; };
struct iommu_domain domain; struct iommu_domain domain;
...@@ -745,7 +744,7 @@ extern struct xarray arm_smmu_asid_xa; ...@@ -745,7 +744,7 @@ extern struct xarray arm_smmu_asid_xa;
extern struct mutex arm_smmu_asid_lock; extern struct mutex arm_smmu_asid_lock;
extern struct arm_smmu_ctx_desc quiet_cd; extern struct arm_smmu_ctx_desc quiet_cd;
int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid, int arm_smmu_write_ctx_desc(struct arm_smmu_master *smmu_master, int ssid,
struct arm_smmu_ctx_desc *cd); struct arm_smmu_ctx_desc *cd);
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid); void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid, void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
......
...@@ -251,6 +251,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { ...@@ -251,6 +251,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sc7280-mss-pil" }, { .compatible = "qcom,sc7280-mss-pil" },
{ .compatible = "qcom,sc8180x-mdss" }, { .compatible = "qcom,sc8180x-mdss" },
{ .compatible = "qcom,sc8280xp-mdss" }, { .compatible = "qcom,sc8280xp-mdss" },
{ .compatible = "qcom,sdm670-mdss" },
{ .compatible = "qcom,sdm845-mdss" }, { .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" }, { .compatible = "qcom,sdm845-mss-pil" },
{ .compatible = "qcom,sm6350-mdss" }, { .compatible = "qcom,sm6350-mdss" },
...@@ -532,6 +533,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = { ...@@ -532,6 +533,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sm6375-smmu-v2", .data = &qcom_smmu_v2_data }, { .compatible = "qcom,sm6375-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sm7150-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sm8350-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,sm8350-smmu-500", .data = &qcom_smmu_500_impl0_data },
......
...@@ -332,12 +332,10 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, ...@@ -332,12 +332,10 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
return ret; return ret;
} }
static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type) static struct iommu_domain *qcom_iommu_domain_alloc_paging(struct device *dev)
{ {
struct qcom_iommu_domain *qcom_domain; struct qcom_iommu_domain *qcom_domain;
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
return NULL;
/* /*
* Allocate the domain and initialise some of its data structures. * Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a * We can't really do anything meaningful until we've added a
...@@ -400,6 +398,44 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev ...@@ -400,6 +398,44 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
return 0; return 0;
} }
static int qcom_iommu_identity_attach(struct iommu_domain *identity_domain,
struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct qcom_iommu_domain *qcom_domain;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
unsigned int i;
if (domain == identity_domain || !domain)
return 0;
qcom_domain = to_qcom_iommu_domain(domain);
if (WARN_ON(!qcom_domain->iommu))
return -EINVAL;
pm_runtime_get_sync(qcom_iommu->dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
/* Disable the context bank: */
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
ctx->domain = NULL;
}
pm_runtime_put_sync(qcom_iommu->dev);
return 0;
}
static struct iommu_domain_ops qcom_iommu_identity_ops = {
.attach_dev = qcom_iommu_identity_attach,
};
static struct iommu_domain qcom_iommu_identity_domain = {
.type = IOMMU_DOMAIN_IDENTITY,
.ops = &qcom_iommu_identity_ops,
};
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount, phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped) int prot, gfp_t gfp, size_t *mapped)
...@@ -565,8 +601,9 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) ...@@ -565,8 +601,9 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
} }
static const struct iommu_ops qcom_iommu_ops = { static const struct iommu_ops qcom_iommu_ops = {
.identity_domain = &qcom_iommu_identity_domain,
.capable = qcom_iommu_capable, .capable = qcom_iommu_capable,
.domain_alloc = qcom_iommu_domain_alloc, .domain_alloc_paging = qcom_iommu_domain_alloc_paging,
.probe_device = qcom_iommu_probe_device, .probe_device = qcom_iommu_probe_device,
.device_group = generic_device_group, .device_group = generic_device_group,
.of_xlate = qcom_iommu_of_xlate, .of_xlate = qcom_iommu_of_xlate,
......
This diff is collapsed.
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
typedef u32 sysmmu_iova_t; typedef u32 sysmmu_iova_t;
typedef u32 sysmmu_pte_t; typedef u32 sysmmu_pte_t;
static struct iommu_domain exynos_identity_domain;
/* We do not consider super section mapping (16MB) */ /* We do not consider super section mapping (16MB) */
#define SECT_ORDER 20 #define SECT_ORDER 20
...@@ -829,7 +830,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev) ...@@ -829,7 +830,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock); mutex_lock(&owner->rpm_lock);
if (data->domain) { if (&data->domain->domain != &exynos_identity_domain) {
dev_dbg(data->sysmmu, "saving state\n"); dev_dbg(data->sysmmu, "saving state\n");
__sysmmu_disable(data); __sysmmu_disable(data);
} }
...@@ -847,7 +848,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev) ...@@ -847,7 +848,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock); mutex_lock(&owner->rpm_lock);
if (data->domain) { if (&data->domain->domain != &exynos_identity_domain) {
dev_dbg(data->sysmmu, "restoring state\n"); dev_dbg(data->sysmmu, "restoring state\n");
__sysmmu_enable(data); __sysmmu_enable(data);
} }
...@@ -886,7 +887,7 @@ static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val) ...@@ -886,7 +887,7 @@ static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
{ {
struct exynos_iommu_domain *domain; struct exynos_iommu_domain *domain;
dma_addr_t handle; dma_addr_t handle;
...@@ -895,9 +896,6 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) ...@@ -895,9 +896,6 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
/* Check if correct PTE offsets are initialized */ /* Check if correct PTE offsets are initialized */
BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev); BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
domain = kzalloc(sizeof(*domain), GFP_KERNEL); domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain) if (!domain)
return NULL; return NULL;
...@@ -980,17 +978,20 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) ...@@ -980,17 +978,20 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
kfree(domain); kfree(domain);
} }
static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, static int exynos_iommu_identity_attach(struct iommu_domain *identity_domain,
struct device *dev) struct device *dev)
{ {
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
phys_addr_t pagetable = virt_to_phys(domain->pgtable); struct exynos_iommu_domain *domain;
phys_addr_t pagetable;
struct sysmmu_drvdata *data, *next; struct sysmmu_drvdata *data, *next;
unsigned long flags; unsigned long flags;
if (!has_sysmmu(dev) || owner->domain != iommu_domain) if (owner->domain == identity_domain)
return; return 0;
domain = to_exynos_domain(owner->domain);
pagetable = virt_to_phys(domain->pgtable);
mutex_lock(&owner->rpm_lock); mutex_lock(&owner->rpm_lock);
...@@ -1009,15 +1010,25 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, ...@@ -1009,15 +1010,25 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
list_del_init(&data->domain_node); list_del_init(&data->domain_node);
spin_unlock(&data->lock); spin_unlock(&data->lock);
} }
owner->domain = NULL; owner->domain = identity_domain;
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
mutex_unlock(&owner->rpm_lock); mutex_unlock(&owner->rpm_lock);
dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__, dev_dbg(dev, "%s: Restored IOMMU to IDENTITY from pgtable %pa\n",
&pagetable); __func__, &pagetable);
return 0;
} }
static struct iommu_domain_ops exynos_identity_ops = {
.attach_dev = exynos_iommu_identity_attach,
};
static struct iommu_domain exynos_identity_domain = {
.type = IOMMU_DOMAIN_IDENTITY,
.ops = &exynos_identity_ops,
};
static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
struct device *dev) struct device *dev)
{ {
...@@ -1026,12 +1037,11 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, ...@@ -1026,12 +1037,11 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
struct sysmmu_drvdata *data; struct sysmmu_drvdata *data;
phys_addr_t pagetable = virt_to_phys(domain->pgtable); phys_addr_t pagetable = virt_to_phys(domain->pgtable);
unsigned long flags; unsigned long flags;
int err;
if (!has_sysmmu(dev)) err = exynos_iommu_identity_attach(&exynos_identity_domain, dev);
return -ENODEV; if (err)
return err;
if (owner->domain)
exynos_iommu_detach_device(owner->domain, dev);
mutex_lock(&owner->rpm_lock); mutex_lock(&owner->rpm_lock);
...@@ -1219,7 +1229,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, ...@@ -1219,7 +1229,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
*/ */
static int exynos_iommu_map(struct iommu_domain *iommu_domain, static int exynos_iommu_map(struct iommu_domain *iommu_domain,
unsigned long l_iova, phys_addr_t paddr, size_t size, unsigned long l_iova, phys_addr_t paddr, size_t size,
int prot, gfp_t gfp) size_t count, int prot, gfp_t gfp, size_t *mapped)
{ {
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
sysmmu_pte_t *entry; sysmmu_pte_t *entry;
...@@ -1253,6 +1263,8 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain, ...@@ -1253,6 +1263,8 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
if (ret) if (ret)
pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n", pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
__func__, ret, size, iova); __func__, ret, size, iova);
else
*mapped = size;
spin_unlock_irqrestore(&domain->pgtablelock, flags); spin_unlock_irqrestore(&domain->pgtablelock, flags);
...@@ -1274,7 +1286,7 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain ...@@ -1274,7 +1286,7 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain
} }
static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
unsigned long l_iova, size_t size, unsigned long l_iova, size_t size, size_t count,
struct iommu_iotlb_gather *gather) struct iommu_iotlb_gather *gather)
{ {
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
...@@ -1407,26 +1419,12 @@ static struct iommu_device *exynos_iommu_probe_device(struct device *dev) ...@@ -1407,26 +1419,12 @@ static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
return &data->iommu; return &data->iommu;
} }
static void exynos_iommu_set_platform_dma(struct device *dev)
{
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
if (owner->domain) {
struct iommu_group *group = iommu_group_get(dev);
if (group) {
exynos_iommu_detach_device(owner->domain, dev);
iommu_group_put(group);
}
}
}
static void exynos_iommu_release_device(struct device *dev) static void exynos_iommu_release_device(struct device *dev)
{ {
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data; struct sysmmu_drvdata *data;
exynos_iommu_set_platform_dma(dev); WARN_ON(exynos_iommu_identity_attach(&exynos_identity_domain, dev));
list_for_each_entry(data, &owner->controllers, owner_node) list_for_each_entry(data, &owner->controllers, owner_node)
device_link_del(data->link); device_link_del(data->link);
...@@ -1457,6 +1455,7 @@ static int exynos_iommu_of_xlate(struct device *dev, ...@@ -1457,6 +1455,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
INIT_LIST_HEAD(&owner->controllers); INIT_LIST_HEAD(&owner->controllers);
mutex_init(&owner->rpm_lock); mutex_init(&owner->rpm_lock);
owner->domain = &exynos_identity_domain;
dev_iommu_priv_set(dev, owner); dev_iommu_priv_set(dev, owner);
} }
...@@ -1471,19 +1470,17 @@ static int exynos_iommu_of_xlate(struct device *dev, ...@@ -1471,19 +1470,17 @@ static int exynos_iommu_of_xlate(struct device *dev,
} }
static const struct iommu_ops exynos_iommu_ops = { static const struct iommu_ops exynos_iommu_ops = {
.domain_alloc = exynos_iommu_domain_alloc, .identity_domain = &exynos_identity_domain,
.domain_alloc_paging = exynos_iommu_domain_alloc_paging,
.device_group = generic_device_group, .device_group = generic_device_group,
#ifdef CONFIG_ARM
.set_platform_dma_ops = exynos_iommu_set_platform_dma,
#endif
.probe_device = exynos_iommu_probe_device, .probe_device = exynos_iommu_probe_device,
.release_device = exynos_iommu_release_device, .release_device = exynos_iommu_release_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
.of_xlate = exynos_iommu_of_xlate, .of_xlate = exynos_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) { .default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = exynos_iommu_attach_device, .attach_dev = exynos_iommu_attach_device,
.map = exynos_iommu_map, .map_pages = exynos_iommu_map,
.unmap = exynos_iommu_unmap, .unmap_pages = exynos_iommu_unmap,
.iova_to_phys = exynos_iommu_iova_to_phys, .iova_to_phys = exynos_iommu_iova_to_phys,
.free = exynos_iommu_domain_free, .free = exynos_iommu_domain_free,
} }
......
...@@ -196,6 +196,13 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) ...@@ -196,6 +196,13 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
{ {
struct fsl_dma_domain *dma_domain; struct fsl_dma_domain *dma_domain;
/*
* FIXME: This isn't creating an unmanaged domain since the
* default_domain_ops do not have any map/unmap function it doesn't meet
* the requirements for __IOMMU_DOMAIN_PAGING. The only purpose seems to
* allow drivers/soc/fsl/qbman/qman_portal.c to do
* fsl_pamu_configure_l1_stash()
*/
if (type != IOMMU_DOMAIN_UNMANAGED) if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL; return NULL;
...@@ -283,15 +290,33 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain, ...@@ -283,15 +290,33 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
return ret; return ret;
} }
static void fsl_pamu_set_platform_dma(struct device *dev) /*
* FIXME: fsl/pamu is completely broken in terms of how it works with the iommu
* API. Immediately after probe the HW is left in an IDENTITY translation and
* the driver provides a non-working UNMANAGED domain that it can switch over
* to. However it cannot switch back to an IDENTITY translation, instead it
* switches to what looks like BLOCKING.
*/
static int fsl_pamu_platform_attach(struct iommu_domain *platform_domain,
struct device *dev)
{ {
struct iommu_domain *domain = iommu_get_domain_for_dev(dev); struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); struct fsl_dma_domain *dma_domain;
const u32 *prop; const u32 *prop;
int len; int len;
struct pci_dev *pdev = NULL; struct pci_dev *pdev = NULL;
struct pci_controller *pci_ctl; struct pci_controller *pci_ctl;
/*
* Hack to keep things working as they always have, only leaving an
* UNMANAGED domain makes it BLOCKING.
*/
if (domain == platform_domain || !domain ||
domain->type != IOMMU_DOMAIN_UNMANAGED)
return 0;
dma_domain = to_fsl_dma_domain(domain);
/* /*
* Use LIODN of the PCI controller while detaching a * Use LIODN of the PCI controller while detaching a
* PCI device. * PCI device.
...@@ -312,8 +337,18 @@ static void fsl_pamu_set_platform_dma(struct device *dev) ...@@ -312,8 +337,18 @@ static void fsl_pamu_set_platform_dma(struct device *dev)
detach_device(dev, dma_domain); detach_device(dev, dma_domain);
else else
pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
return 0;
} }
static struct iommu_domain_ops fsl_pamu_platform_ops = {
.attach_dev = fsl_pamu_platform_attach,
};
static struct iommu_domain fsl_pamu_platform_domain = {
.type = IOMMU_DOMAIN_PLATFORM,
.ops = &fsl_pamu_platform_ops,
};
/* Set the domain stash attribute */ /* Set the domain stash attribute */
int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu) int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
{ {
...@@ -395,11 +430,11 @@ static struct iommu_device *fsl_pamu_probe_device(struct device *dev) ...@@ -395,11 +430,11 @@ static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
} }
static const struct iommu_ops fsl_pamu_ops = { static const struct iommu_ops fsl_pamu_ops = {
.default_domain = &fsl_pamu_platform_domain,
.capable = fsl_pamu_capable, .capable = fsl_pamu_capable,
.domain_alloc = fsl_pamu_domain_alloc, .domain_alloc = fsl_pamu_domain_alloc,
.probe_device = fsl_pamu_probe_device, .probe_device = fsl_pamu_probe_device,
.device_group = fsl_pamu_device_group, .device_group = fsl_pamu_device_group,
.set_platform_dma_ops = fsl_pamu_set_platform_dma,
.default_domain_ops = &(const struct iommu_domain_ops) { .default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = fsl_pamu_attach_device, .attach_dev = fsl_pamu_attach_device,
.iova_to_phys = fsl_pamu_iova_to_phys, .iova_to_phys = fsl_pamu_iova_to_phys,
......
This diff is collapsed.
This diff is collapsed.
...@@ -716,12 +716,18 @@ struct device_domain_info { ...@@ -716,12 +716,18 @@ struct device_domain_info {
struct intel_iommu *iommu; /* IOMMU used by this device */ struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */ struct dmar_domain *domain; /* pointer to domain */
struct pasid_table *pasid_table; /* pasid table */ struct pasid_table *pasid_table; /* pasid table */
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
struct dentry *debugfs_dentry; /* pointer to device directory dentry */
#endif
}; };
struct dev_pasid_info { struct dev_pasid_info {
struct list_head link_domain; /* link to domain siblings */ struct list_head link_domain; /* link to domain siblings */
struct device *dev; struct device *dev;
ioasid_t pasid; ioasid_t pasid;
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
struct dentry *debugfs_dentry; /* pointer to pasid directory dentry */
#endif
}; };
static inline void __iommu_flush_cache( static inline void __iommu_flush_cache(
...@@ -883,8 +889,16 @@ static inline void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid ...@@ -883,8 +889,16 @@ static inline void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS #ifdef CONFIG_INTEL_IOMMU_DEBUGFS
void intel_iommu_debugfs_init(void); void intel_iommu_debugfs_init(void);
void intel_iommu_debugfs_create_dev(struct device_domain_info *info);
void intel_iommu_debugfs_remove_dev(struct device_domain_info *info);
void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid);
void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid);
#else #else
static inline void intel_iommu_debugfs_init(void) {} static inline void intel_iommu_debugfs_init(void) {}
static inline void intel_iommu_debugfs_create_dev(struct device_domain_info *info) {}
static inline void intel_iommu_debugfs_remove_dev(struct device_domain_info *info) {}
static inline void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid) {}
static inline void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid) {}
#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */ #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
extern const struct attribute_group *intel_iommu_groups[]; extern const struct attribute_group *intel_iommu_groups[];
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -80,7 +80,7 @@ struct omap_iommu { ...@@ -80,7 +80,7 @@ struct omap_iommu {
u32 id; u32 id;
struct iommu_device iommu; struct iommu_device iommu;
struct iommu_group *group; bool has_iommu_driver;
u8 pwrst; u8 pwrst;
}; };
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment