Commit 67a8c7d6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v4.8-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU fixes from Joerg Roedel:
 "Fixes from Will Deacon:

   - fix a couple of thinkos in the CMDQ error handling and
     short-descriptor page table code that have been there since day one

   - disable stalling faults, since they may result in hardware deadlock

   - fix an accidental BUG() when passing disable_bypass=1 on the
     cmdline"

* tag 'iommu-fixes-v4.8-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/arm-smmu: Don't BUG() if we find aborting STEs with disable_bypass
  iommu/arm-smmu: Disable stalling faults for all endpoints
  iommu/arm-smmu: Fix CMDQ error handling
  iommu/io-pgtable-arm-v7s: Fix attributes when splitting blocks
parents fd1ae514 4df36185
......@@ -879,7 +879,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
* We may have concurrent producers, so we need to be careful
* not to touch any of the shadow cmdq state.
*/
queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
dev_err(smmu->dev, "skipping command in error state:\n");
for (i = 0; i < ARRAY_SIZE(cmd); ++i)
dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
......@@ -890,7 +890,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
return;
}
queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
......@@ -1034,6 +1034,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
case STRTAB_STE_0_CFG_S2_TRANS:
ste_live = true;
break;
case STRTAB_STE_0_CFG_ABORT:
if (disable_bypass)
break;
default:
BUG(); /* STE corruption */
}
......
......@@ -686,8 +686,7 @@ static struct iommu_gather_ops arm_smmu_gather_ops = {
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
int flags, ret;
u32 fsr, fsynr, resume;
u32 fsr, fsynr;
unsigned long iova;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
......@@ -701,34 +700,15 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
if (!(fsr & FSR_FAULT))
return IRQ_NONE;
if (fsr & FSR_IGN)
dev_err_ratelimited(smmu->dev,
"Unexpected context fault (fsr 0x%x)\n",
fsr);
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
ret = IRQ_HANDLED;
resume = RESUME_RETRY;
} else {
dev_err_ratelimited(smmu->dev,
"Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
iova, fsynr, cfg->cbndx);
ret = IRQ_NONE;
resume = RESUME_TERMINATE;
}
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
fsr, iova, fsynr, cfg->cbndx);
/* Clear the faulting FSR */
writel(fsr, cb_base + ARM_SMMU_CB_FSR);
/* Retry or terminate any stalled transactions */
if (fsr & FSR_SS)
writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
return ret;
return IRQ_HANDLED;
}
static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
......@@ -837,7 +817,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
}
/* SCTLR */
reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
if (stage1)
reg |= SCTLR_S1_ASIDPNE;
#ifdef __BIG_ENDIAN
......
......@@ -286,12 +286,14 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
int prot = IOMMU_READ;
arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
if (attr & ARM_V7S_PTE_AP_RDONLY)
if (!(attr & ARM_V7S_PTE_AP_RDONLY))
prot |= IOMMU_WRITE;
if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
prot |= IOMMU_MMIO;
else if (pte & ARM_V7S_ATTR_C)
prot |= IOMMU_CACHE;
if (pte & ARM_V7S_ATTR_XN(lvl))
prot |= IOMMU_NOEXEC;
return prot;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment