Commit 1f5b3c3f authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

locking, x86, iommu: Annotate iommu->register_lock as raw

The iommu->register_lock can be taken in atomic context and therefore
must not be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 289b4e7a
...@@ -800,7 +800,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -800,7 +800,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
(unsigned long long)iommu->cap, (unsigned long long)iommu->cap,
(unsigned long long)iommu->ecap); (unsigned long long)iommu->ecap);
spin_lock_init(&iommu->register_lock); raw_spin_lock_init(&iommu->register_lock);
drhd->iommu = iommu; drhd->iommu = iommu;
return 0; return 0;
...@@ -1062,7 +1062,7 @@ void dmar_disable_qi(struct intel_iommu *iommu) ...@@ -1062,7 +1062,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
if (!ecap_qis(iommu->ecap)) if (!ecap_qis(iommu->ecap))
return; return;
spin_lock_irqsave(&iommu->register_lock, flags); raw_spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
if (!(sts & DMA_GSTS_QIES)) if (!(sts & DMA_GSTS_QIES))
...@@ -1082,7 +1082,7 @@ void dmar_disable_qi(struct intel_iommu *iommu) ...@@ -1082,7 +1082,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
!(sts & DMA_GSTS_QIES), sts); !(sts & DMA_GSTS_QIES), sts);
end: end:
spin_unlock_irqrestore(&iommu->register_lock, flags); raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
} }
/* /*
...@@ -1097,7 +1097,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu) ...@@ -1097,7 +1097,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
qi->free_head = qi->free_tail = 0; qi->free_head = qi->free_tail = 0;
qi->free_cnt = QI_LENGTH; qi->free_cnt = QI_LENGTH;
spin_lock_irqsave(&iommu->register_lock, flags); raw_spin_lock_irqsave(&iommu->register_lock, flags);
/* write zero to the tail reg */ /* write zero to the tail reg */
writel(0, iommu->reg + DMAR_IQT_REG); writel(0, iommu->reg + DMAR_IQT_REG);
...@@ -1110,7 +1110,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu) ...@@ -1110,7 +1110,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
/* Make sure hardware complete it */ /* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags); raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
} }
/* /*
...@@ -1225,11 +1225,11 @@ void dmar_msi_unmask(struct irq_data *data) ...@@ -1225,11 +1225,11 @@ void dmar_msi_unmask(struct irq_data *data)
unsigned long flag; unsigned long flag;
/* unmask it */ /* unmask it */
spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(0, iommu->reg + DMAR_FECTL_REG); writel(0, iommu->reg + DMAR_FECTL_REG);
/* Read a reg to force flush the post write */ /* Read a reg to force flush the post write */
readl(iommu->reg + DMAR_FECTL_REG); readl(iommu->reg + DMAR_FECTL_REG);
spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
} }
void dmar_msi_mask(struct irq_data *data) void dmar_msi_mask(struct irq_data *data)
...@@ -1238,11 +1238,11 @@ void dmar_msi_mask(struct irq_data *data) ...@@ -1238,11 +1238,11 @@ void dmar_msi_mask(struct irq_data *data)
struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
/* mask it */ /* mask it */
spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
/* Read a reg to force flush the post write */ /* Read a reg to force flush the post write */
readl(iommu->reg + DMAR_FECTL_REG); readl(iommu->reg + DMAR_FECTL_REG);
spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
} }
void dmar_msi_write(int irq, struct msi_msg *msg) void dmar_msi_write(int irq, struct msi_msg *msg)
...@@ -1250,11 +1250,11 @@ void dmar_msi_write(int irq, struct msi_msg *msg) ...@@ -1250,11 +1250,11 @@ void dmar_msi_write(int irq, struct msi_msg *msg)
struct intel_iommu *iommu = irq_get_handler_data(irq); struct intel_iommu *iommu = irq_get_handler_data(irq);
unsigned long flag; unsigned long flag;
spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(msg->data, iommu->reg + DMAR_FEDATA_REG); writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
} }
void dmar_msi_read(int irq, struct msi_msg *msg) void dmar_msi_read(int irq, struct msi_msg *msg)
...@@ -1262,11 +1262,11 @@ void dmar_msi_read(int irq, struct msi_msg *msg) ...@@ -1262,11 +1262,11 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
struct intel_iommu *iommu = irq_get_handler_data(irq); struct intel_iommu *iommu = irq_get_handler_data(irq);
unsigned long flag; unsigned long flag;
spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
msg->data = readl(iommu->reg + DMAR_FEDATA_REG); msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
} }
static int dmar_fault_do_one(struct intel_iommu *iommu, int type, static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
...@@ -1303,7 +1303,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) ...@@ -1303,7 +1303,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
u32 fault_status; u32 fault_status;
unsigned long flag; unsigned long flag;
spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
fault_status = readl(iommu->reg + DMAR_FSTS_REG); fault_status = readl(iommu->reg + DMAR_FSTS_REG);
if (fault_status) if (fault_status)
printk(KERN_ERR "DRHD: handling fault status reg %x\n", printk(KERN_ERR "DRHD: handling fault status reg %x\n",
...@@ -1342,7 +1342,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) ...@@ -1342,7 +1342,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
writel(DMA_FRCD_F, iommu->reg + reg + writel(DMA_FRCD_F, iommu->reg + reg +
fault_index * PRIMARY_FAULT_REG_LEN + 12); fault_index * PRIMARY_FAULT_REG_LEN + 12);
spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
dmar_fault_do_one(iommu, type, fault_reason, dmar_fault_do_one(iommu, type, fault_reason,
source_id, guest_addr); source_id, guest_addr);
...@@ -1350,14 +1350,14 @@ irqreturn_t dmar_fault(int irq, void *dev_id) ...@@ -1350,14 +1350,14 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
fault_index++; fault_index++;
if (fault_index >= cap_num_fault_regs(iommu->cap)) if (fault_index >= cap_num_fault_regs(iommu->cap))
fault_index = 0; fault_index = 0;
spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
} }
clear_rest: clear_rest:
/* clear all the other faults */ /* clear all the other faults */
fault_status = readl(iommu->reg + DMAR_FSTS_REG); fault_status = readl(iommu->reg + DMAR_FSTS_REG);
writel(fault_status, iommu->reg + DMAR_FSTS_REG); writel(fault_status, iommu->reg + DMAR_FSTS_REG);
spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -932,7 +932,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu) ...@@ -932,7 +932,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
addr = iommu->root_entry; addr = iommu->root_entry;
spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
...@@ -941,7 +941,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu) ...@@ -941,7 +941,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_RTPS), sts); readl, (sts & DMA_GSTS_RTPS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
} }
static void iommu_flush_write_buffer(struct intel_iommu *iommu) static void iommu_flush_write_buffer(struct intel_iommu *iommu)
...@@ -952,14 +952,14 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu) ...@@ -952,14 +952,14 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
if (!rwbf_quirk && !cap_rwbf(iommu->cap)) if (!rwbf_quirk && !cap_rwbf(iommu->cap))
return; return;
spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */ /* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (!(val & DMA_GSTS_WBFS)), val); readl, (!(val & DMA_GSTS_WBFS)), val);
spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
} }
/* return value determine if we need a write buffer flush */ /* return value determine if we need a write buffer flush */
...@@ -986,14 +986,14 @@ static void __iommu_flush_context(struct intel_iommu *iommu, ...@@ -986,14 +986,14 @@ static void __iommu_flush_context(struct intel_iommu *iommu,
} }
val |= DMA_CCMD_ICC; val |= DMA_CCMD_ICC;
spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
/* Make sure hardware complete it */ /* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
dmar_readq, (!(val & DMA_CCMD_ICC)), val); dmar_readq, (!(val & DMA_CCMD_ICC)), val);
spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
} }
/* return value determine if we need a write buffer flush */ /* return value determine if we need a write buffer flush */
...@@ -1032,7 +1032,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, ...@@ -1032,7 +1032,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
if (cap_write_drain(iommu->cap)) if (cap_write_drain(iommu->cap))
val |= DMA_TLB_WRITE_DRAIN; val |= DMA_TLB_WRITE_DRAIN;
spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
/* Note: Only uses first TLB reg currently */ /* Note: Only uses first TLB reg currently */
if (val_iva) if (val_iva)
dmar_writeq(iommu->reg + tlb_offset, val_iva); dmar_writeq(iommu->reg + tlb_offset, val_iva);
...@@ -1042,7 +1042,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, ...@@ -1042,7 +1042,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
IOMMU_WAIT_OP(iommu, tlb_offset + 8, IOMMU_WAIT_OP(iommu, tlb_offset + 8,
dmar_readq, (!(val & DMA_TLB_IVT)), val); dmar_readq, (!(val & DMA_TLB_IVT)), val);
spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
/* check IOTLB invalidation granularity */ /* check IOTLB invalidation granularity */
if (DMA_TLB_IAIG(val) == 0) if (DMA_TLB_IAIG(val) == 0)
...@@ -1158,7 +1158,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) ...@@ -1158,7 +1158,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
u32 pmen; u32 pmen;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&iommu->register_lock, flags); raw_spin_lock_irqsave(&iommu->register_lock, flags);
pmen = readl(iommu->reg + DMAR_PMEN_REG); pmen = readl(iommu->reg + DMAR_PMEN_REG);
pmen &= ~DMA_PMEN_EPM; pmen &= ~DMA_PMEN_EPM;
writel(pmen, iommu->reg + DMAR_PMEN_REG); writel(pmen, iommu->reg + DMAR_PMEN_REG);
...@@ -1167,7 +1167,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) ...@@ -1167,7 +1167,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
readl, !(pmen & DMA_PMEN_PRS), pmen); readl, !(pmen & DMA_PMEN_PRS), pmen);
spin_unlock_irqrestore(&iommu->register_lock, flags); raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
} }
static int iommu_enable_translation(struct intel_iommu *iommu) static int iommu_enable_translation(struct intel_iommu *iommu)
...@@ -1175,7 +1175,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu) ...@@ -1175,7 +1175,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
u32 sts; u32 sts;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&iommu->register_lock, flags); raw_spin_lock_irqsave(&iommu->register_lock, flags);
iommu->gcmd |= DMA_GCMD_TE; iommu->gcmd |= DMA_GCMD_TE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
...@@ -1183,7 +1183,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu) ...@@ -1183,7 +1183,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_TES), sts); readl, (sts & DMA_GSTS_TES), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags); raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
return 0; return 0;
} }
...@@ -1192,7 +1192,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu) ...@@ -1192,7 +1192,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
u32 sts; u32 sts;
unsigned long flag; unsigned long flag;
spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
iommu->gcmd &= ~DMA_GCMD_TE; iommu->gcmd &= ~DMA_GCMD_TE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
...@@ -1200,7 +1200,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu) ...@@ -1200,7 +1200,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (!(sts & DMA_GSTS_TES)), sts); readl, (!(sts & DMA_GSTS_TES)), sts);
spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
return 0; return 0;
} }
...@@ -3320,7 +3320,7 @@ static int iommu_suspend(void) ...@@ -3320,7 +3320,7 @@ static int iommu_suspend(void)
for_each_active_iommu(iommu, drhd) { for_each_active_iommu(iommu, drhd) {
iommu_disable_translation(iommu); iommu_disable_translation(iommu);
spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
iommu->iommu_state[SR_DMAR_FECTL_REG] = iommu->iommu_state[SR_DMAR_FECTL_REG] =
readl(iommu->reg + DMAR_FECTL_REG); readl(iommu->reg + DMAR_FECTL_REG);
...@@ -3331,7 +3331,7 @@ static int iommu_suspend(void) ...@@ -3331,7 +3331,7 @@ static int iommu_suspend(void)
iommu->iommu_state[SR_DMAR_FEUADDR_REG] = iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
readl(iommu->reg + DMAR_FEUADDR_REG); readl(iommu->reg + DMAR_FEUADDR_REG);
spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
} }
return 0; return 0;
...@@ -3358,7 +3358,7 @@ static void iommu_resume(void) ...@@ -3358,7 +3358,7 @@ static void iommu_resume(void)
for_each_active_iommu(iommu, drhd) { for_each_active_iommu(iommu, drhd) {
spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(iommu->iommu_state[SR_DMAR_FECTL_REG], writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
iommu->reg + DMAR_FECTL_REG); iommu->reg + DMAR_FECTL_REG);
...@@ -3369,7 +3369,7 @@ static void iommu_resume(void) ...@@ -3369,7 +3369,7 @@ static void iommu_resume(void)
writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
iommu->reg + DMAR_FEUADDR_REG); iommu->reg + DMAR_FEUADDR_REG);
spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
} }
for_each_active_iommu(iommu, drhd) for_each_active_iommu(iommu, drhd)
......
...@@ -409,7 +409,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) ...@@ -409,7 +409,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
addr = virt_to_phys((void *)iommu->ir_table->base); addr = virt_to_phys((void *)iommu->ir_table->base);
spin_lock_irqsave(&iommu->register_lock, flags); raw_spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writeq(iommu->reg + DMAR_IRTA_REG, dmar_writeq(iommu->reg + DMAR_IRTA_REG,
(addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
...@@ -420,7 +420,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) ...@@ -420,7 +420,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRTPS), sts); readl, (sts & DMA_GSTS_IRTPS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags); raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
/* /*
* global invalidation of interrupt entry cache before enabling * global invalidation of interrupt entry cache before enabling
...@@ -428,7 +428,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) ...@@ -428,7 +428,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
*/ */
qi_global_iec(iommu); qi_global_iec(iommu);
spin_lock_irqsave(&iommu->register_lock, flags); raw_spin_lock_irqsave(&iommu->register_lock, flags);
/* Enable interrupt-remapping */ /* Enable interrupt-remapping */
iommu->gcmd |= DMA_GCMD_IRE; iommu->gcmd |= DMA_GCMD_IRE;
...@@ -437,7 +437,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) ...@@ -437,7 +437,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts); readl, (sts & DMA_GSTS_IRES), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags); raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
} }
...@@ -485,7 +485,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu) ...@@ -485,7 +485,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
*/ */
qi_global_iec(iommu); qi_global_iec(iommu);
spin_lock_irqsave(&iommu->register_lock, flags); raw_spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
if (!(sts & DMA_GSTS_IRES)) if (!(sts & DMA_GSTS_IRES))
...@@ -498,7 +498,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu) ...@@ -498,7 +498,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
readl, !(sts & DMA_GSTS_IRES), sts); readl, !(sts & DMA_GSTS_IRES), sts);
end: end:
spin_unlock_irqrestore(&iommu->register_lock, flags); raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
} }
int __init intr_remapping_supported(void) int __init intr_remapping_supported(void)
......
...@@ -311,7 +311,7 @@ struct intel_iommu { ...@@ -311,7 +311,7 @@ struct intel_iommu {
u64 cap; u64 cap;
u64 ecap; u64 ecap;
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
spinlock_t register_lock; /* protect register handling */ raw_spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */ int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */ int agaw; /* agaw of this iommu */
int msagaw; /* max sagaw of this iommu */ int msagaw; /* max sagaw of this iommu */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment