Commit eb4a52bc authored by Fenghua Yu's avatar Fenghua Yu Committed by David Woodhouse

Intel IOMMU Suspend/Resume Support - Queued Invalidation

This patch supports queued invalidation suspend/resume.
Signed-off-by: default avatarFenghua Yu <fenghua.yu@intel.com>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarDavid Woodhouse <David.Woodhouse@intel.com>
parent f59c7b69
...@@ -789,6 +789,35 @@ void dmar_disable_qi(struct intel_iommu *iommu) ...@@ -789,6 +789,35 @@ void dmar_disable_qi(struct intel_iommu *iommu)
spin_unlock_irqrestore(&iommu->register_lock, flags); spin_unlock_irqrestore(&iommu->register_lock, flags);
} }
/*
* Enable queued invalidation.
*/
static void __dmar_enable_qi(struct intel_iommu *iommu)
{
u32 cmd, sts;
unsigned long flags;
struct q_inval *qi = iommu->qi;
qi->free_head = qi->free_tail = 0;
qi->free_cnt = QI_LENGTH;
spin_lock_irqsave(&iommu->register_lock, flags);
/* write zero to the tail reg */
writel(0, iommu->reg + DMAR_IQT_REG);
dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
cmd = iommu->gcmd | DMA_GCMD_QIE;
iommu->gcmd |= DMA_GCMD_QIE;
writel(cmd, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
/* /*
* Enable Queued Invalidation interface. This is a must to support * Enable Queued Invalidation interface. This is a must to support
* interrupt-remapping. Also used by DMA-remapping, which replaces * interrupt-remapping. Also used by DMA-remapping, which replaces
...@@ -796,8 +825,6 @@ void dmar_disable_qi(struct intel_iommu *iommu) ...@@ -796,8 +825,6 @@ void dmar_disable_qi(struct intel_iommu *iommu)
*/ */
int dmar_enable_qi(struct intel_iommu *iommu) int dmar_enable_qi(struct intel_iommu *iommu)
{ {
u32 cmd, sts;
unsigned long flags;
struct q_inval *qi; struct q_inval *qi;
if (!ecap_qis(iommu->ecap)) if (!ecap_qis(iommu->ecap))
...@@ -835,19 +862,7 @@ int dmar_enable_qi(struct intel_iommu *iommu) ...@@ -835,19 +862,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
spin_lock_init(&qi->q_lock); spin_lock_init(&qi->q_lock);
spin_lock_irqsave(&iommu->register_lock, flags); __dmar_enable_qi(iommu);
/* write zero to the tail reg */
writel(0, iommu->reg + DMAR_IQT_REG);
dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
cmd = iommu->gcmd | DMA_GCMD_QIE;
iommu->gcmd |= DMA_GCMD_QIE;
writel(cmd, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags);
return 0; return 0;
} }
...@@ -1102,3 +1117,28 @@ int __init enable_drhd_fault_handling(void) ...@@ -1102,3 +1117,28 @@ int __init enable_drhd_fault_handling(void)
return 0; return 0;
} }
/*
* Re-enable Queued Invalidation interface.
*/
int dmar_reenable_qi(struct intel_iommu *iommu)
{
if (!ecap_qis(iommu->ecap))
return -ENOENT;
if (!iommu->qi)
return -ENOENT;
/*
* First disable queued invalidation.
*/
dmar_disable_qi(iommu);
/*
* Then enable queued invalidation again. Since there is no pending
* invalidation requests now, it's safe to re-enable queued
* invalidation.
*/
__dmar_enable_qi(iommu);
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment