Commit 26ac2b6e authored by Joerg Roedel's avatar Joerg Roedel

Merge branch 'for-joerg/arm-smmu/updates' of...

Merge branch 'for-joerg/arm-smmu/updates' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/smmu
parents 085b7755 bc580b56
......@@ -1028,6 +1028,14 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
}
static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
{
struct acpi_iort_root_complex *pci_rc;
pci_rc = (struct acpi_iort_root_complex *)node->node_data;
return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
}
/**
* iort_iommu_configure - Set-up IOMMU configuration for a device.
*
......@@ -1063,6 +1071,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
info.node = node;
err = pci_for_each_dma_alias(to_pci_dev(dev),
iort_pci_iommu_init, &info);
if (!err && iort_pci_rc_supports_ats(node))
dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
} else {
int i = 0;
......
......@@ -359,6 +359,31 @@ config ARM_SMMU
Say Y here if your SoC includes an IOMMU device implementing
the ARM SMMU architecture.
config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
bool "Default to disabling bypass on ARM SMMU v1 and v2"
depends on ARM_SMMU
default y
help
Say Y here to (by default) disable bypass streams such that
incoming transactions from devices that are not attached to
an iommu domain will report an abort back to the device and
will not be allowed to pass through the SMMU.
Any old kernels that existed before this KConfig was
introduced would default to _allowing_ bypass (AKA the
equivalent of NO for this config). However the default for
this option is YES because the old behavior is insecure.
There are few reasons to allow unmatched stream bypass, and
even fewer good ones. If saying YES here breaks your board
you should work on fixing your board. This KConfig option
is expected to be removed in the future and we'll simply
hardcode the bypass disable in the code.
NOTE: the kernel command line parameter
'arm-smmu.disable_bypass' will continue to override this
config.
config ARM_SMMU_V3
bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
depends on ARM64
......
......@@ -147,6 +147,8 @@ enum arm_smmu_s2cr_privcfg {
#define CBAR_IRPTNDX_SHIFT 24
#define CBAR_IRPTNDX_MASK 0xff
#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
#define CBA2R_RW64_32BIT (0 << 0)
#define CBA2R_RW64_64BIT (1 << 0)
......
This diff is collapsed.
......@@ -110,7 +110,8 @@ static int force_stage;
module_param(force_stage, int, S_IRUGO);
MODULE_PARM_DESC(force_stage,
"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
static bool disable_bypass;
static bool disable_bypass =
IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
module_param(disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
......@@ -569,12 +570,13 @@ static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
u32 fsr, fsynr;
u32 fsr, fsynr, cbfrsynra;
unsigned long iova;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *gr1_base = ARM_SMMU_GR1(smmu);
void __iomem *cb_base;
cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
......@@ -585,10 +587,11 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
dev_err_ratelimited(smmu->dev,
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
fsr, iova, fsynr, cfg->cbndx);
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
fsr, iova, fsynr, cbfrsynra, cfg->cbndx);
writel(fsr, cb_base + ARM_SMMU_CB_FSR);
return IRQ_HANDLED;
......
......@@ -392,10 +392,14 @@ struct iommu_fwspec {
const struct iommu_ops *ops;
struct fwnode_handle *iommu_fwnode;
void *iommu_priv;
u32 flags;
unsigned int num_ids;
u32 ids[1];
};
/* ATS is supported */
#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops);
void iommu_fwspec_free(struct device *dev);
......
......@@ -1521,21 +1521,6 @@ static inline void pcie_ecrc_get_policy(char *str) { }
bool pci_ats_disabled(void);
#ifdef CONFIG_PCI_ATS
/* Address Translation Service */
void pci_ats_init(struct pci_dev *dev);
int pci_enable_ats(struct pci_dev *dev, int ps);
void pci_disable_ats(struct pci_dev *dev);
int pci_ats_queue_depth(struct pci_dev *dev);
int pci_ats_page_aligned(struct pci_dev *dev);
#else
static inline void pci_ats_init(struct pci_dev *d) { }
static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
static inline void pci_disable_ats(struct pci_dev *d) { }
static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
#endif
#ifdef CONFIG_PCIE_PTM
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
#else
......@@ -1728,8 +1713,24 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
struct pci_dev *dev)
{ return NULL; }
static inline bool pci_ats_disabled(void) { return true; }
#endif /* CONFIG_PCI */
#ifdef CONFIG_PCI_ATS
/* Address Translation Service */
void pci_ats_init(struct pci_dev *dev);
int pci_enable_ats(struct pci_dev *dev, int ps);
void pci_disable_ats(struct pci_dev *dev);
int pci_ats_queue_depth(struct pci_dev *dev);
int pci_ats_page_aligned(struct pci_dev *dev);
#else
static inline void pci_ats_init(struct pci_dev *d) { }
static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
static inline void pci_disable_ats(struct pci_dev *d) { }
static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
#endif
/* Include architecture-dependent settings and functions */
#include <asm/pci.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment