Commit 6770ab5c authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.infradead.org/iommu-2.6

* git://git.infradead.org/iommu-2.6:
  Admit to maintaining VT-d, for my sins.
  dmar: fix uninitialised 'ret' variable in dmar_parse_dev()
  intel-iommu: use coherent_dma_mask in alloc_coherent
  amd_iommu: fix nasty bug that caused ILLEGAL_DEVICE_TABLE_ENTRY errors
  intel-iommu: IA64 support
  dmar: remove the quirk which disables dma-remapping when intr-remapping enabled
  dmar: Use queued invalidation interface for IOTLB and context invalidation
  dmar: context cache and IOTLB invalidation using queued invalidation
  dmar: use spin_lock_irqsave() in qi_submit_sync()
parents 326528a5 b876d08f
...@@ -2182,6 +2182,13 @@ M: maciej.sosnowski@intel.com ...@@ -2182,6 +2182,13 @@ M: maciej.sosnowski@intel.com
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Supported S: Supported
INTEL IOMMU (VT-d)
P: David Woodhouse
M: dwmw2@infradead.org
L: iommu@lists.linux-foundation.org
T: git://git.infradead.org/iommu-2.6.git
S: Supported
INTEL IOP-ADMA DMA DRIVER INTEL IOP-ADMA DMA DRIVER
P: Dan Williams P: Dan Williams
M: dan.j.williams@intel.com M: dan.j.williams@intel.com
......
...@@ -212,7 +212,7 @@ static void __init iommu_set_exclusion_range(struct amd_iommu *iommu) ...@@ -212,7 +212,7 @@ static void __init iommu_set_exclusion_range(struct amd_iommu *iommu)
/* Programs the physical address of the device table into the IOMMU hardware */ /* Programs the physical address of the device table into the IOMMU hardware */
static void __init iommu_set_device_table(struct amd_iommu *iommu) static void __init iommu_set_device_table(struct amd_iommu *iommu)
{ {
u32 entry; u64 entry;
BUG_ON(iommu->mmio_base == NULL); BUG_ON(iommu->mmio_base == NULL);
......
...@@ -9,8 +9,6 @@ ...@@ -9,8 +9,6 @@
#include <asm/calgary.h> #include <asm/calgary.h>
#include <asm/amd_iommu.h> #include <asm/amd_iommu.h>
static int forbid_dac __read_mostly;
struct dma_mapping_ops *dma_ops; struct dma_mapping_ops *dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
...@@ -293,17 +291,3 @@ void pci_iommu_shutdown(void) ...@@ -293,17 +291,3 @@ void pci_iommu_shutdown(void)
} }
/* Must execute after PCI subsystem */ /* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init); fs_initcall(pci_iommu_init);
#ifdef CONFIG_PCI
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
static __devinit void via_no_dac(struct pci_dev *dev)
{
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
printk(KERN_INFO "PCI: VIA PCI bridge detected."
"Disabling DAC.\n");
forbid_dac = 1;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
#endif
...@@ -188,8 +188,7 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) ...@@ -188,8 +188,7 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
return 0; return 0;
} }
static int __init static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
dmar_parse_dev(struct dmar_drhd_unit *dmaru)
{ {
struct acpi_dmar_hardware_unit *drhd; struct acpi_dmar_hardware_unit *drhd;
static int include_all; static int include_all;
...@@ -277,14 +276,15 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) ...@@ -277,14 +276,15 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
drhd = (struct acpi_dmar_hardware_unit *)header; drhd = (struct acpi_dmar_hardware_unit *)header;
printk (KERN_INFO PREFIX printk (KERN_INFO PREFIX
"DRHD (flags: 0x%08x)base: 0x%016Lx\n", "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
drhd->flags, drhd->address); drhd->flags, (unsigned long long)drhd->address);
break; break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY: case ACPI_DMAR_TYPE_RESERVED_MEMORY:
rmrr = (struct acpi_dmar_reserved_memory *)header; rmrr = (struct acpi_dmar_reserved_memory *)header;
printk (KERN_INFO PREFIX printk (KERN_INFO PREFIX
"RMRR base: 0x%016Lx end: 0x%016Lx\n", "RMRR base: 0x%016Lx end: 0x%016Lx\n",
rmrr->base_address, rmrr->end_address); (unsigned long long)rmrr->base_address,
(unsigned long long)rmrr->end_address);
break; break;
} }
} }
...@@ -328,7 +328,7 @@ parse_dmar_table(void) ...@@ -328,7 +328,7 @@ parse_dmar_table(void)
if (!dmar) if (!dmar)
return -ENODEV; return -ENODEV;
if (dmar->width < PAGE_SHIFT_4K - 1) { if (dmar->width < PAGE_SHIFT - 1) {
printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
return -EINVAL; return -EINVAL;
} }
...@@ -460,8 +460,8 @@ void __init detect_intel_iommu(void) ...@@ -460,8 +460,8 @@ void __init detect_intel_iommu(void)
ret = dmar_table_detect(); ret = dmar_table_detect();
#ifdef CONFIG_DMAR
{ {
#ifdef CONFIG_INTR_REMAP
struct acpi_table_dmar *dmar; struct acpi_table_dmar *dmar;
/* /*
* for now we will disable dma-remapping when interrupt * for now we will disable dma-remapping when interrupt
...@@ -470,29 +470,17 @@ void __init detect_intel_iommu(void) ...@@ -470,29 +470,17 @@ void __init detect_intel_iommu(void)
* is added, we will not need this any more. * is added, we will not need this any more.
*/ */
dmar = (struct acpi_table_dmar *) dmar_tbl; dmar = (struct acpi_table_dmar *) dmar_tbl;
if (ret && cpu_has_x2apic && dmar->flags & 0x1) { if (ret && cpu_has_x2apic && dmar->flags & 0x1)
printk(KERN_INFO printk(KERN_INFO
"Queued invalidation will be enabled to support " "Queued invalidation will be enabled to support "
"x2apic and Intr-remapping.\n"); "x2apic and Intr-remapping.\n");
printk(KERN_INFO #endif
"Disabling IOMMU detection, because of missing " #ifdef CONFIG_DMAR
"queued invalidation support for IOTLB "
"invalidation\n");
printk(KERN_INFO
"Use \"nox2apic\", if you want to use Intel "
" IOMMU for DMA-remapping and don't care about "
" x2apic support\n");
dmar_disabled = 1;
goto end;
}
if (ret && !no_iommu && !iommu_detected && !swiotlb && if (ret && !no_iommu && !iommu_detected && !swiotlb &&
!dmar_disabled) !dmar_disabled)
iommu_detected = 1; iommu_detected = 1;
}
end:
#endif #endif
}
dmar_tbl = NULL; dmar_tbl = NULL;
} }
...@@ -510,7 +498,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -510,7 +498,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->seq_id = iommu_allocated++; iommu->seq_id = iommu_allocated++;
iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
if (!iommu->reg) { if (!iommu->reg) {
printk(KERN_ERR "IOMMU: can't map the region\n"); printk(KERN_ERR "IOMMU: can't map the region\n");
goto error; goto error;
...@@ -521,8 +509,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -521,8 +509,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
/* the registers might be more than one page */ /* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
cap_max_fault_reg_offset(iommu->cap)); cap_max_fault_reg_offset(iommu->cap));
map_size = PAGE_ALIGN_4K(map_size); map_size = VTD_PAGE_ALIGN(map_size);
if (map_size > PAGE_SIZE_4K) { if (map_size > VTD_PAGE_SIZE) {
iounmap(iommu->reg); iounmap(iommu->reg);
iommu->reg = ioremap(drhd->reg_base_addr, map_size); iommu->reg = ioremap(drhd->reg_base_addr, map_size);
if (!iommu->reg) { if (!iommu->reg) {
...@@ -533,8 +521,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -533,8 +521,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
ver = readl(iommu->reg + DMAR_VER_REG); ver = readl(iommu->reg + DMAR_VER_REG);
pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), (unsigned long long)drhd->reg_base_addr,
iommu->cap, iommu->ecap); DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
(unsigned long long)iommu->cap,
(unsigned long long)iommu->ecap);
spin_lock_init(&iommu->register_lock); spin_lock_init(&iommu->register_lock);
...@@ -587,11 +577,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) ...@@ -587,11 +577,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
hw = qi->desc; hw = qi->desc;
spin_lock(&qi->q_lock); spin_lock_irqsave(&qi->q_lock, flags);
while (qi->free_cnt < 3) { while (qi->free_cnt < 3) {
spin_unlock(&qi->q_lock); spin_unlock_irqrestore(&qi->q_lock, flags);
cpu_relax(); cpu_relax();
spin_lock(&qi->q_lock); spin_lock_irqsave(&qi->q_lock, flags);
} }
index = qi->free_head; index = qi->free_head;
...@@ -612,15 +602,22 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) ...@@ -612,15 +602,22 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
qi->free_head = (qi->free_head + 2) % QI_LENGTH; qi->free_head = (qi->free_head + 2) % QI_LENGTH;
qi->free_cnt -= 2; qi->free_cnt -= 2;
spin_lock_irqsave(&iommu->register_lock, flags); spin_lock(&iommu->register_lock);
/* /*
* update the HW tail register indicating the presence of * update the HW tail register indicating the presence of
* new descriptors. * new descriptors.
*/ */
writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
spin_unlock_irqrestore(&iommu->register_lock, flags); spin_unlock(&iommu->register_lock);
while (qi->desc_status[wait_index] != QI_DONE) { while (qi->desc_status[wait_index] != QI_DONE) {
/*
* We will leave the interrupts disabled, to prevent interrupt
* context to queue another cmd while a cmd is already submitted
* and waiting for completion on this cpu. This is to avoid
* a deadlock where the interrupt context can wait indefinitely
* for free slots in the queue.
*/
spin_unlock(&qi->q_lock); spin_unlock(&qi->q_lock);
cpu_relax(); cpu_relax();
spin_lock(&qi->q_lock); spin_lock(&qi->q_lock);
...@@ -629,7 +626,7 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) ...@@ -629,7 +626,7 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
qi->desc_status[index] = QI_DONE; qi->desc_status[index] = QI_DONE;
reclaim_free_desc(qi); reclaim_free_desc(qi);
spin_unlock(&qi->q_lock); spin_unlock_irqrestore(&qi->q_lock, flags);
} }
/* /*
...@@ -645,6 +642,62 @@ void qi_global_iec(struct intel_iommu *iommu) ...@@ -645,6 +642,62 @@ void qi_global_iec(struct intel_iommu *iommu)
qi_submit_sync(&desc, iommu); qi_submit_sync(&desc, iommu);
} }
int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
u64 type, int non_present_entry_flush)
{
struct qi_desc desc;
if (non_present_entry_flush) {
if (!cap_caching_mode(iommu->cap))
return 1;
else
did = 0;
}
desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
| QI_CC_GRAN(type) | QI_CC_TYPE;
desc.high = 0;
qi_submit_sync(&desc, iommu);
return 0;
}
int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type,
int non_present_entry_flush)
{
u8 dw = 0, dr = 0;
struct qi_desc desc;
int ih = 0;
if (non_present_entry_flush) {
if (!cap_caching_mode(iommu->cap))
return 1;
else
did = 0;
}
if (cap_write_drain(iommu->cap))
dw = 1;
if (cap_read_drain(iommu->cap))
dr = 1;
desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
| QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
| QI_IOTLB_AM(size_order);
qi_submit_sync(&desc, iommu);
return 0;
}
/* /*
* Enable Queued Invalidation interface. This is a must to support * Enable Queued Invalidation interface. This is a must to support
* interrupt-remapping. Also used by DMA-remapping, which replaces * interrupt-remapping. Also used by DMA-remapping, which replaces
......
This diff is collapsed.
...@@ -43,6 +43,20 @@ static void __devinit quirk_mellanox_tavor(struct pci_dev *dev) ...@@ -43,6 +43,20 @@ static void __devinit quirk_mellanox_tavor(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
int forbid_dac __read_mostly;
EXPORT_SYMBOL(forbid_dac);
static __devinit void via_no_dac(struct pci_dev *dev)
{
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
dev_info(&dev->dev,
"VIA PCI bridge detected. Disabling DAC.\n");
forbid_dac = 1;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
/* Deal with broken BIOS'es that neglect to enable passive release, /* Deal with broken BIOS'es that neglect to enable passive release,
which can cause problems in combination with the 82441FX/PPro MTRRs */ which can cause problems in combination with the 82441FX/PPro MTRRs */
static void quirk_passive_release(struct pci_dev *dev) static void quirk_passive_release(struct pci_dev *dev)
......
...@@ -7,9 +7,13 @@ extern struct dma_mapping_ops nommu_dma_ops; ...@@ -7,9 +7,13 @@ extern struct dma_mapping_ops nommu_dma_ops;
extern int force_iommu, no_iommu; extern int force_iommu, no_iommu;
extern int iommu_detected; extern int iommu_detected;
extern int dmar_disabled; extern int dmar_disabled;
extern int forbid_dac;
extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
#ifdef CONFIG_GART_IOMMU #ifdef CONFIG_GART_IOMMU
extern int gart_iommu_aperture; extern int gart_iommu_aperture;
extern int gart_iommu_aperture_allowed; extern int gart_iommu_aperture_allowed;
......
...@@ -2,15 +2,14 @@ ...@@ -2,15 +2,14 @@
#define _DMA_REMAPPING_H #define _DMA_REMAPPING_H
/* /*
* We need a fixed PAGE_SIZE of 4K irrespective of * VT-d hardware uses 4KiB page size regardless of host page size.
* arch PAGE_SIZE for IOMMU page tables.
*/ */
#define PAGE_SHIFT_4K (12) #define VTD_PAGE_SHIFT (12)
#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) #define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) #define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
...@@ -25,7 +24,7 @@ struct root_entry { ...@@ -25,7 +24,7 @@ struct root_entry {
u64 val; u64 val;
u64 rsvd1; u64 rsvd1;
}; };
#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
static inline bool root_present(struct root_entry *root) static inline bool root_present(struct root_entry *root)
{ {
return (root->val & 1); return (root->val & 1);
...@@ -36,7 +35,7 @@ static inline void set_root_present(struct root_entry *root) ...@@ -36,7 +35,7 @@ static inline void set_root_present(struct root_entry *root)
} }
static inline void set_root_value(struct root_entry *root, unsigned long value) static inline void set_root_value(struct root_entry *root, unsigned long value)
{ {
root->val |= value & PAGE_MASK_4K; root->val |= value & VTD_PAGE_MASK;
} }
struct context_entry; struct context_entry;
...@@ -45,7 +44,7 @@ get_context_addr_from_root(struct root_entry *root) ...@@ -45,7 +44,7 @@ get_context_addr_from_root(struct root_entry *root)
{ {
return (struct context_entry *) return (struct context_entry *)
(root_present(root)?phys_to_virt( (root_present(root)?phys_to_virt(
root->val & PAGE_MASK_4K): root->val & VTD_PAGE_MASK) :
NULL); NULL);
} }
...@@ -67,7 +66,7 @@ struct context_entry { ...@@ -67,7 +66,7 @@ struct context_entry {
#define context_present(c) ((c).lo & 1) #define context_present(c) ((c).lo & 1)
#define context_fault_disable(c) (((c).lo >> 1) & 1) #define context_fault_disable(c) (((c).lo >> 1) & 1)
#define context_translation_type(c) (((c).lo >> 2) & 3) #define context_translation_type(c) (((c).lo >> 2) & 3)
#define context_address_root(c) ((c).lo & PAGE_MASK_4K) #define context_address_root(c) ((c).lo & VTD_PAGE_MASK)
#define context_address_width(c) ((c).hi & 7) #define context_address_width(c) ((c).hi & 7)
#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
...@@ -81,7 +80,7 @@ struct context_entry { ...@@ -81,7 +80,7 @@ struct context_entry {
} while (0) } while (0)
#define CONTEXT_TT_MULTI_LEVEL 0 #define CONTEXT_TT_MULTI_LEVEL 0
#define context_set_address_root(c, val) \ #define context_set_address_root(c, val) \
do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0)
#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
#define context_set_domain_id(c, val) \ #define context_set_domain_id(c, val) \
do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
...@@ -107,9 +106,9 @@ struct dma_pte { ...@@ -107,9 +106,9 @@ struct dma_pte {
#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
#define dma_set_pte_prot(p, prot) \ #define dma_set_pte_prot(p, prot) \
do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) #define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK)
#define dma_set_pte_addr(p, addr) do {\ #define dma_set_pte_addr(p, addr) do {\
(p).val |= ((addr) & PAGE_MASK_4K); } while (0) (p).val |= ((addr) & VTD_PAGE_MASK); } while (0)
#define dma_pte_present(p) (((p).val & 3) != 0) #define dma_pte_present(p) (((p).val & 3) != 0)
struct intel_iommu; struct intel_iommu;
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/dma_remapping.h> #include <linux/dma_remapping.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/iommu.h>
/* /*
* Intel IOMMU register specification per version 1.0 public spec. * Intel IOMMU register specification per version 1.0 public spec.
...@@ -127,6 +128,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) ...@@ -127,6 +128,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
/* IOTLB_REG */ /* IOTLB_REG */
#define DMA_TLB_FLUSH_GRANU_OFFSET 60
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
#define DMA_TLB_DSI_FLUSH (((u64)2) << 60) #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
#define DMA_TLB_PSI_FLUSH (((u64)3) << 60) #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
...@@ -140,6 +142,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) ...@@ -140,6 +142,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define DMA_TLB_MAX_SIZE (0x3f) #define DMA_TLB_MAX_SIZE (0x3f)
/* INVALID_DESC */ /* INVALID_DESC */
#define DMA_CCMD_INVL_GRANU_OFFSET 61
#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
...@@ -200,22 +203,21 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) ...@@ -200,22 +203,21 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define dma_frcd_type(d) ((d >> 30) & 1) #define dma_frcd_type(d) ((d >> 30) & 1)
#define dma_frcd_fault_reason(c) (c & 0xff) #define dma_frcd_fault_reason(c) (c & 0xff)
#define dma_frcd_source_id(c) (c & 0xffff) #define dma_frcd_source_id(c) (c & 0xffff)
#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ /* low 64 bit */
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
{\ do { \
cycles_t start_time = get_cycles();\ cycles_t start_time = get_cycles(); \
while (1) {\ while (1) { \
sts = op (iommu->reg + offset);\ sts = op(iommu->reg + offset); \
if (cond)\ if (cond) \
break;\ break; \
if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
panic("DMAR hardware is malfunctioning\n");\ panic("DMAR hardware is malfunctioning\n"); \
cpu_relax();\ cpu_relax(); \
}\ } \
} } while (0)
#define QI_LENGTH 256 /* queue length */ #define QI_LENGTH 256 /* queue length */
...@@ -238,6 +240,19 @@ enum { ...@@ -238,6 +240,19 @@ enum {
#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
#define QI_IWD_STATUS_WRITE (((u64)1) << 5) #define QI_IWD_STATUS_WRITE (((u64)1) << 5)
#define QI_IOTLB_DID(did) (((u64)did) << 16)
#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
#define QI_IOTLB_AM(am) (((u8)am))
#define QI_CC_FM(fm) (((u64)fm) << 48)
#define QI_CC_SID(sid) (((u64)sid) << 32)
#define QI_CC_DID(did) (((u64)did) << 16)
#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
struct qi_desc { struct qi_desc {
u64 low, high; u64 low, high;
}; };
...@@ -263,6 +278,13 @@ struct ir_table { ...@@ -263,6 +278,13 @@ struct ir_table {
}; };
#endif #endif
struct iommu_flush {
int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
u64 type, int non_present_entry_flush);
int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type, int non_present_entry_flush);
};
struct intel_iommu { struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */ void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 cap; u64 cap;
...@@ -282,6 +304,7 @@ struct intel_iommu { ...@@ -282,6 +304,7 @@ struct intel_iommu {
unsigned char name[7]; /* Device Name */ unsigned char name[7]; /* Device Name */
struct msi_msg saved_msg; struct msi_msg saved_msg;
struct sys_device sysdev; struct sys_device sysdev;
struct iommu_flush flush;
#endif #endif
struct q_inval *qi; /* Queued invalidation info */ struct q_inval *qi; /* Queued invalidation info */
#ifdef CONFIG_INTR_REMAP #ifdef CONFIG_INTR_REMAP
...@@ -303,6 +326,12 @@ extern void free_iommu(struct intel_iommu *iommu); ...@@ -303,6 +326,12 @@ extern void free_iommu(struct intel_iommu *iommu);
extern int dmar_enable_qi(struct intel_iommu *iommu); extern int dmar_enable_qi(struct intel_iommu *iommu);
extern void qi_global_iec(struct intel_iommu *iommu); extern void qi_global_iec(struct intel_iommu *iommu);
extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type, int non_present_entry_flush);
extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type,
int non_present_entry_flush);
extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
void intel_iommu_domain_exit(struct dmar_domain *domain); void intel_iommu_domain_exit(struct dmar_domain *domain);
...@@ -324,4 +353,11 @@ static inline int intel_iommu_found(void) ...@@ -324,4 +353,11 @@ static inline int intel_iommu_found(void)
} }
#endif /* CONFIG_DMAR */ #endif /* CONFIG_DMAR */
extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int);
extern int intel_map_sg(struct device *, struct scatterlist *, int, int);
extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment