Commit 395e51f1 authored by Joerg Roedel's avatar Joerg Roedel

Merge branches 'iommu/fixes', 'x86/amd', 'groups', 'arm/tegra' and 'api/domain-attr' into next

Conflicts:
	drivers/iommu/iommu.c
	include/linux/iommu.h
What: /sys/kernel/iommu_groups/
Date: May 2012
KernelVersion: v3.5
Contact: Alex Williamson <alex.williamson@redhat.com>
Description: /sys/kernel/iommu_groups/ contains a number of sub-
directories, each representing an IOMMU group. The
name of the sub-directory matches the iommu_group_id()
for the group, which is an integer value. Within each
subdirectory is another directory named "devices" with
links to the sysfs devices contained in this group.
The group directory also optionally contains a "name"
file if the IOMMU driver has chosen to register a more
common name for the group.
Users:
NVIDIA Tegra 30 IOMMU H/W, SMMU (System Memory Management Unit)
Required properties:
- compatible : "nvidia,tegra30-smmu"
- reg : Should contain 3 register banks(address and length) for each
of the SMMU register blocks.
- interrupts : Should contain MC General interrupt.
- nvidia,#asids : # of ASIDs
- dma-window : IOVA start address and length.
- nvidia,ahb : phandle to the ahb bus connected to SMMU.
Example:
smmu {
compatible = "nvidia,tegra30-smmu";
reg = <0x7000f010 0x02c
0x7000f1f0 0x010
0x7000f228 0x05c>;
nvidia,#asids = <4>; /* # of ASIDs */
dma-window = <0 0x40000000>; /* IOVA start & length */
nvidia,ahb = <&ahb>;
};
...@@ -1134,7 +1134,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -1134,7 +1134,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
forcesac forcesac
soft soft
pt [x86, IA-64] pt [x86, IA-64]
group_mf [x86, IA-64]
io7= [HW] IO7 for Marvel based alpha systems io7= [HW] IO7 for Marvel based alpha systems
......
...@@ -11,12 +11,10 @@ extern void no_iommu_init(void); ...@@ -11,12 +11,10 @@ extern void no_iommu_init(void);
extern int force_iommu, no_iommu; extern int force_iommu, no_iommu;
extern int iommu_pass_through; extern int iommu_pass_through;
extern int iommu_detected; extern int iommu_detected;
extern int iommu_group_mf;
#else #else
#define iommu_pass_through (0) #define iommu_pass_through (0)
#define no_iommu (1) #define no_iommu (1)
#define iommu_detected (0) #define iommu_detected (0)
#define iommu_group_mf (0)
#endif #endif
extern void iommu_dma_init(void); extern void iommu_dma_init(void);
extern void machvec_init(const char *name); extern void machvec_init(const char *name);
......
...@@ -32,7 +32,6 @@ int force_iommu __read_mostly; ...@@ -32,7 +32,6 @@ int force_iommu __read_mostly;
#endif #endif
int iommu_pass_through; int iommu_pass_through;
int iommu_group_mf;
/* Dummy device used for NULL arguments (normally ISA). Better would /* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask, but this is bug-to-bug compatible be probably a smaller DMA mask, but this is bug-to-bug compatible
......
...@@ -5,7 +5,6 @@ extern struct dma_map_ops nommu_dma_ops; ...@@ -5,7 +5,6 @@ extern struct dma_map_ops nommu_dma_ops;
extern int force_iommu, no_iommu; extern int force_iommu, no_iommu;
extern int iommu_detected; extern int iommu_detected;
extern int iommu_pass_through; extern int iommu_pass_through;
extern int iommu_group_mf;
/* 10 seconds */ /* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
......
...@@ -45,15 +45,6 @@ int iommu_detected __read_mostly = 0; ...@@ -45,15 +45,6 @@ int iommu_detected __read_mostly = 0;
*/ */
int iommu_pass_through __read_mostly; int iommu_pass_through __read_mostly;
/*
* Group multi-function PCI devices into a single device-group for the
* iommu_device_group interface. This tells the iommu driver to pretend
* it cannot distinguish between functions of a device, exposing only one
* group for the device. Useful for disallowing use of individual PCI
* functions from userspace drivers.
*/
int iommu_group_mf __read_mostly;
extern struct iommu_table_entry __iommu_table[], __iommu_table_end[]; extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
/* Dummy device used for NULL arguments (normally ISA). */ /* Dummy device used for NULL arguments (normally ISA). */
...@@ -194,8 +185,6 @@ static __init int iommu_setup(char *p) ...@@ -194,8 +185,6 @@ static __init int iommu_setup(char *p)
#endif #endif
if (!strncmp(p, "pt", 2)) if (!strncmp(p, "pt", 2))
iommu_pass_through = 1; iommu_pass_through = 1;
if (!strncmp(p, "group_mf", 8))
iommu_group_mf = 1;
gart_parse_options(p); gart_parse_options(p);
......
...@@ -13,6 +13,10 @@ menuconfig IOMMU_SUPPORT ...@@ -13,6 +13,10 @@ menuconfig IOMMU_SUPPORT
if IOMMU_SUPPORT if IOMMU_SUPPORT
config OF_IOMMU
def_bool y
depends on OF
# MSM IOMMU support # MSM IOMMU support
config MSM_IOMMU config MSM_IOMMU
bool "MSM IOMMU Support" bool "MSM IOMMU Support"
...@@ -154,7 +158,7 @@ config TEGRA_IOMMU_GART ...@@ -154,7 +158,7 @@ config TEGRA_IOMMU_GART
config TEGRA_IOMMU_SMMU config TEGRA_IOMMU_SMMU
bool "Tegra SMMU IOMMU Support" bool "Tegra SMMU IOMMU Support"
depends on ARCH_TEGRA_3x_SOC depends on ARCH_TEGRA_3x_SOC && TEGRA_AHB
select IOMMU_API select IOMMU_API
help help
Enables support for remapping discontiguous physical memory Enables support for remapping discontiguous physical memory
......
obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
......
...@@ -256,11 +256,21 @@ static bool check_device(struct device *dev) ...@@ -256,11 +256,21 @@ static bool check_device(struct device *dev)
return true; return true;
} }
static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
{
pci_dev_put(*from);
*from = to;
}
#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
static int iommu_init_device(struct device *dev) static int iommu_init_device(struct device *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *dma_pdev, *pdev = to_pci_dev(dev);
struct iommu_dev_data *dev_data; struct iommu_dev_data *dev_data;
struct iommu_group *group;
u16 alias; u16 alias;
int ret;
if (dev->archdata.iommu) if (dev->archdata.iommu)
return 0; return 0;
...@@ -281,8 +291,43 @@ static int iommu_init_device(struct device *dev) ...@@ -281,8 +291,43 @@ static int iommu_init_device(struct device *dev)
return -ENOTSUPP; return -ENOTSUPP;
} }
dev_data->alias_data = alias_data; dev_data->alias_data = alias_data;
dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
} else
dma_pdev = pci_dev_get(pdev);
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
if (dma_pdev->multifunction &&
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
swap_pci_ref(&dma_pdev,
pci_get_slot(dma_pdev->bus,
PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
0)));
while (!pci_is_root_bus(dma_pdev->bus)) {
if (pci_acs_path_enabled(dma_pdev->bus->self,
NULL, REQ_ACS_FLAGS))
break;
swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
}
group = iommu_group_get(&dma_pdev->dev);
pci_dev_put(dma_pdev);
if (!group) {
group = iommu_group_alloc();
if (IS_ERR(group))
return PTR_ERR(group);
} }
ret = iommu_group_add_device(group, dev);
iommu_group_put(group);
if (ret)
return ret;
if (pci_iommuv2_capable(pdev)) { if (pci_iommuv2_capable(pdev)) {
struct amd_iommu *iommu; struct amd_iommu *iommu;
...@@ -311,6 +356,8 @@ static void iommu_ignore_device(struct device *dev) ...@@ -311,6 +356,8 @@ static void iommu_ignore_device(struct device *dev)
static void iommu_uninit_device(struct device *dev) static void iommu_uninit_device(struct device *dev)
{ {
iommu_group_remove_device(dev);
/* /*
* Nothing to do here - we keep dev_data around for unplugged devices * Nothing to do here - we keep dev_data around for unplugged devices
* and reuse it when the device is re-plugged - not doing so would * and reuse it when the device is re-plugged - not doing so would
...@@ -384,7 +431,6 @@ DECLARE_STATS_COUNTER(invalidate_iotlb); ...@@ -384,7 +431,6 @@ DECLARE_STATS_COUNTER(invalidate_iotlb);
DECLARE_STATS_COUNTER(invalidate_iotlb_all); DECLARE_STATS_COUNTER(invalidate_iotlb_all);
DECLARE_STATS_COUNTER(pri_requests); DECLARE_STATS_COUNTER(pri_requests);
static struct dentry *stats_dir; static struct dentry *stats_dir;
static struct dentry *de_fflush; static struct dentry *de_fflush;
...@@ -2073,7 +2119,7 @@ static int pdev_iommuv2_enable(struct pci_dev *pdev) ...@@ -2073,7 +2119,7 @@ static int pdev_iommuv2_enable(struct pci_dev *pdev)
/* FIXME: Move this to PCI code */ /* FIXME: Move this to PCI code */
#define PCI_PRI_TLP_OFF (1 << 15) #define PCI_PRI_TLP_OFF (1 << 15)
bool pci_pri_tlp_required(struct pci_dev *pdev) static bool pci_pri_tlp_required(struct pci_dev *pdev)
{ {
u16 status; u16 status;
int pos; int pos;
...@@ -2254,6 +2300,18 @@ static int device_change_notifier(struct notifier_block *nb, ...@@ -2254,6 +2300,18 @@ static int device_change_notifier(struct notifier_block *nb,
iommu_init_device(dev); iommu_init_device(dev);
/*
* dev_data is still NULL and
* got initialized in iommu_init_device
*/
dev_data = get_dev_data(dev);
if (iommu_pass_through || dev_data->iommu_v2) {
dev_data->passthrough = true;
attach_device(dev, pt_domain);
break;
}
domain = domain_for_device(dev); domain = domain_for_device(dev);
/* allocate a protection domain if a device is added */ /* allocate a protection domain if a device is added */
...@@ -2271,10 +2329,7 @@ static int device_change_notifier(struct notifier_block *nb, ...@@ -2271,10 +2329,7 @@ static int device_change_notifier(struct notifier_block *nb,
dev_data = get_dev_data(dev); dev_data = get_dev_data(dev);
if (!dev_data->passthrough)
dev->archdata.dma_ops = &amd_iommu_dma_ops; dev->archdata.dma_ops = &amd_iommu_dma_ops;
else
dev->archdata.dma_ops = &nommu_dma_ops;
break; break;
case BUS_NOTIFY_DEL_DEVICE: case BUS_NOTIFY_DEL_DEVICE:
...@@ -2972,6 +3027,11 @@ int __init amd_iommu_init_dma_ops(void) ...@@ -2972,6 +3027,11 @@ int __init amd_iommu_init_dma_ops(void)
amd_iommu_stats_init(); amd_iommu_stats_init();
if (amd_iommu_unmap_flush)
pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
else
pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
return 0; return 0;
free_domains: free_domains:
...@@ -3078,6 +3138,10 @@ static int amd_iommu_domain_init(struct iommu_domain *dom) ...@@ -3078,6 +3138,10 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
dom->priv = domain; dom->priv = domain;
dom->geometry.aperture_start = 0;
dom->geometry.aperture_end = ~0ULL;
dom->geometry.force_aperture = true;
return 0; return 0;
out_free: out_free:
...@@ -3236,26 +3300,6 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain, ...@@ -3236,26 +3300,6 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
return 0; return 0;
} }
static int amd_iommu_device_group(struct device *dev, unsigned int *groupid)
{
struct iommu_dev_data *dev_data = dev->archdata.iommu;
struct pci_dev *pdev = to_pci_dev(dev);
u16 devid;
if (!dev_data)
return -ENODEV;
if (pdev->is_virtfn || !iommu_group_mf)
devid = dev_data->devid;
else
devid = calc_devid(pdev->bus->number,
PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
*groupid = amd_iommu_alias_table[devid];
return 0;
}
static struct iommu_ops amd_iommu_ops = { static struct iommu_ops amd_iommu_ops = {
.domain_init = amd_iommu_domain_init, .domain_init = amd_iommu_domain_init,
.domain_destroy = amd_iommu_domain_destroy, .domain_destroy = amd_iommu_domain_destroy,
...@@ -3265,7 +3309,6 @@ static struct iommu_ops amd_iommu_ops = { ...@@ -3265,7 +3309,6 @@ static struct iommu_ops amd_iommu_ops = {
.unmap = amd_iommu_unmap, .unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys, .iova_to_phys = amd_iommu_iova_to_phys,
.domain_has_cap = amd_iommu_domain_has_cap, .domain_has_cap = amd_iommu_domain_has_cap,
.device_group = amd_iommu_device_group,
.pgsize_bitmap = AMD_IOMMU_PGSIZES, .pgsize_bitmap = AMD_IOMMU_PGSIZES,
}; };
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/amd-iommu.h> #include <linux/amd-iommu.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/acpi.h>
#include <acpi/acpi.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/gart.h> #include <asm/gart.h>
...@@ -122,7 +124,7 @@ struct ivmd_header { ...@@ -122,7 +124,7 @@ struct ivmd_header {
bool amd_iommu_dump; bool amd_iommu_dump;
static int __initdata amd_iommu_detected; static bool amd_iommu_detected;
static bool __initdata amd_iommu_disabled; static bool __initdata amd_iommu_disabled;
u16 amd_iommu_last_bdf; /* largest PCI device id we have u16 amd_iommu_last_bdf; /* largest PCI device id we have
...@@ -148,11 +150,6 @@ bool amd_iommu_v2_present __read_mostly; ...@@ -148,11 +150,6 @@ bool amd_iommu_v2_present __read_mostly;
bool amd_iommu_force_isolation __read_mostly; bool amd_iommu_force_isolation __read_mostly;
/*
* The ACPI table parsing functions set this variable on an error
*/
static int __initdata amd_iommu_init_err;
/* /*
* List of protection domains - used during resume * List of protection domains - used during resume
*/ */
...@@ -190,13 +187,23 @@ static u32 dev_table_size; /* size of the device table */ ...@@ -190,13 +187,23 @@ static u32 dev_table_size; /* size of the device table */
static u32 alias_table_size; /* size of the alias table */ static u32 alias_table_size; /* size of the alias table */
static u32 rlookup_table_size; /* size if the rlookup table */ static u32 rlookup_table_size; /* size if the rlookup table */
/* enum iommu_init_state {
* This function flushes all internal caches of IOMMU_START_STATE,
* the IOMMU used by this driver. IOMMU_IVRS_DETECTED,
*/ IOMMU_ACPI_FINISHED,
extern void iommu_flush_all_caches(struct amd_iommu *iommu); IOMMU_ENABLED,
IOMMU_PCI_INIT,
IOMMU_INTERRUPTS_EN,
IOMMU_DMA_OPS,
IOMMU_INITIALIZED,
IOMMU_NOT_FOUND,
IOMMU_INIT_ERROR,
};
static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void); static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static inline void update_last_devid(u16 devid) static inline void update_last_devid(u16 devid)
{ {
...@@ -321,23 +328,6 @@ static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) ...@@ -321,23 +328,6 @@ static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
/* Function to enable the hardware */ /* Function to enable the hardware */
static void iommu_enable(struct amd_iommu *iommu) static void iommu_enable(struct amd_iommu *iommu)
{ {
static const char * const feat_str[] = {
"PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
"IA", "GA", "HE", "PC", NULL
};
int i;
printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx",
dev_name(&iommu->dev->dev), iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
printk(KERN_CONT " extended features: ");
for (i = 0; feat_str[i]; ++i)
if (iommu_feature(iommu, (1ULL << i)))
printk(KERN_CONT " %s", feat_str[i]);
}
printk(KERN_CONT "\n");
iommu_feature_enable(iommu, CONTROL_IOMMU_EN); iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
} }
...@@ -358,7 +348,7 @@ static void iommu_disable(struct amd_iommu *iommu) ...@@ -358,7 +348,7 @@ static void iommu_disable(struct amd_iommu *iommu)
* mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
* the system has one. * the system has one.
*/ */
static u8 * __init iommu_map_mmio_space(u64 address) static u8 __iomem * __init iommu_map_mmio_space(u64 address)
{ {
if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
...@@ -367,7 +357,7 @@ static u8 * __init iommu_map_mmio_space(u64 address) ...@@ -367,7 +357,7 @@ static u8 * __init iommu_map_mmio_space(u64 address)
return NULL; return NULL;
} }
return ioremap_nocache(address, MMIO_REGION_LENGTH); return (u8 __iomem *)ioremap_nocache(address, MMIO_REGION_LENGTH);
} }
static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
...@@ -463,11 +453,9 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table) ...@@ -463,11 +453,9 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
*/ */
for (i = 0; i < table->length; ++i) for (i = 0; i < table->length; ++i)
checksum += p[i]; checksum += p[i];
if (checksum != 0) { if (checksum != 0)
/* ACPI table corrupt */ /* ACPI table corrupt */
amd_iommu_init_err = -ENODEV; return -ENODEV;
return 0;
}
p += IVRS_HEADER_LENGTH; p += IVRS_HEADER_LENGTH;
...@@ -725,90 +713,6 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) ...@@ -725,90 +713,6 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
} }
} }
/*
* This function reads some important data from the IOMMU PCI space and
* initializes the driver data structure with it. It reads the hardware
* capabilities and the first/last device entries
*/
static void __init init_iommu_from_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
u32 range, misc, low, high;
int i, j;
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
&iommu->cap);
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
&range);
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
&misc);
iommu->first_device = calc_devid(MMIO_GET_BUS(range),
MMIO_GET_FD(range));
iommu->last_device = calc_devid(MMIO_GET_BUS(range),
MMIO_GET_LD(range));
iommu->evt_msi_num = MMIO_MSI_NUM(misc);
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
amd_iommu_iotlb_sup = false;
/* read extended feature bits */
low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
iommu->features = ((u64)high << 32) | low;
if (iommu_feature(iommu, FEATURE_GT)) {
int glxval;
u32 pasids;
u64 shift;
shift = iommu->features & FEATURE_PASID_MASK;
shift >>= FEATURE_PASID_SHIFT;
pasids = (1 << shift);
amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
glxval = iommu->features & FEATURE_GLXVAL_MASK;
glxval >>= FEATURE_GLXVAL_SHIFT;
if (amd_iommu_max_glx_val == -1)
amd_iommu_max_glx_val = glxval;
else
amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
}
if (iommu_feature(iommu, FEATURE_GT) &&
iommu_feature(iommu, FEATURE_PPR)) {
iommu->is_iommu_v2 = true;
amd_iommu_v2_present = true;
}
if (!is_rd890_iommu(iommu->dev))
return;
/*
* Some rd890 systems may not be fully reconfigured by the BIOS, so
* it's necessary for us to store this information so it can be
* reprogrammed on resume
*/
pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
&iommu->stored_addr_lo);
pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
&iommu->stored_addr_hi);
/* Low bit locks writes to configuration space */
iommu->stored_addr_lo &= ~1;
for (i = 0; i < 6; i++)
for (j = 0; j < 0x12; j++)
iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
for (i = 0; i < 0x83; i++)
iommu->stored_l2[i] = iommu_read_l2(iommu, i);
}
/* /*
* Takes a pointer to an AMD IOMMU entry in the ACPI table and * Takes a pointer to an AMD IOMMU entry in the ACPI table and
* initializes the hardware and our data structures with it. * initializes the hardware and our data structures with it.
...@@ -1025,13 +929,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) ...@@ -1025,13 +929,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
/* /*
* Copy data from ACPI table entry to the iommu struct * Copy data from ACPI table entry to the iommu struct
*/ */
iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff); iommu->devid = h->devid;
if (!iommu->dev)
return 1;
iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
PCI_DEVFN(0, 0));
iommu->cap_ptr = h->cap_ptr; iommu->cap_ptr = h->cap_ptr;
iommu->pci_seg = h->pci_seg; iommu->pci_seg = h->pci_seg;
iommu->mmio_phys = h->mmio_phys; iommu->mmio_phys = h->mmio_phys;
...@@ -1049,20 +947,10 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) ...@@ -1049,20 +947,10 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->int_enabled = false; iommu->int_enabled = false;
init_iommu_from_pci(iommu);
init_iommu_from_acpi(iommu, h); init_iommu_from_acpi(iommu, h);
init_iommu_devices(iommu); init_iommu_devices(iommu);
if (iommu_feature(iommu, FEATURE_PPR)) { return 0;
iommu->ppr_log = alloc_ppr_log(iommu);
if (!iommu->ppr_log)
return -ENOMEM;
}
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
amd_iommu_np_cache = true;
return pci_enable_device(iommu->dev);
} }
/* /*
...@@ -1093,16 +981,12 @@ static int __init init_iommu_all(struct acpi_table_header *table) ...@@ -1093,16 +981,12 @@ static int __init init_iommu_all(struct acpi_table_header *table)
h->mmio_phys); h->mmio_phys);
iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
if (iommu == NULL) { if (iommu == NULL)
amd_iommu_init_err = -ENOMEM; return -ENOMEM;
return 0;
}
ret = init_iommu_one(iommu, h); ret = init_iommu_one(iommu, h);
if (ret) { if (ret)
amd_iommu_init_err = ret; return ret;
return 0;
}
break; break;
default: default:
break; break;
...@@ -1115,6 +999,148 @@ static int __init init_iommu_all(struct acpi_table_header *table) ...@@ -1115,6 +999,148 @@ static int __init init_iommu_all(struct acpi_table_header *table)
return 0; return 0;
} }
static int iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
u32 range, misc, low, high;
iommu->dev = pci_get_bus_and_slot(PCI_BUS(iommu->devid),
iommu->devid & 0xff);
if (!iommu->dev)
return -ENODEV;
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
&iommu->cap);
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
&range);
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
&misc);
iommu->first_device = calc_devid(MMIO_GET_BUS(range),
MMIO_GET_FD(range));
iommu->last_device = calc_devid(MMIO_GET_BUS(range),
MMIO_GET_LD(range));
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
amd_iommu_iotlb_sup = false;
/* read extended feature bits */
low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
iommu->features = ((u64)high << 32) | low;
if (iommu_feature(iommu, FEATURE_GT)) {
int glxval;
u32 pasids;
u64 shift;
shift = iommu->features & FEATURE_PASID_MASK;
shift >>= FEATURE_PASID_SHIFT;
pasids = (1 << shift);
amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
glxval = iommu->features & FEATURE_GLXVAL_MASK;
glxval >>= FEATURE_GLXVAL_SHIFT;
if (amd_iommu_max_glx_val == -1)
amd_iommu_max_glx_val = glxval;
else
amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
}
if (iommu_feature(iommu, FEATURE_GT) &&
iommu_feature(iommu, FEATURE_PPR)) {
iommu->is_iommu_v2 = true;
amd_iommu_v2_present = true;
}
if (iommu_feature(iommu, FEATURE_PPR)) {
iommu->ppr_log = alloc_ppr_log(iommu);
if (!iommu->ppr_log)
return -ENOMEM;
}
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
amd_iommu_np_cache = true;
if (is_rd890_iommu(iommu->dev)) {
int i, j;
iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
PCI_DEVFN(0, 0));
/*
* Some rd890 systems may not be fully reconfigured by the
* BIOS, so it's necessary for us to store this information so
* it can be reprogrammed on resume
*/
pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
&iommu->stored_addr_lo);
pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
&iommu->stored_addr_hi);
/* Low bit locks writes to configuration space */
iommu->stored_addr_lo &= ~1;
for (i = 0; i < 6; i++)
for (j = 0; j < 0x12; j++)
iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
for (i = 0; i < 0x83; i++)
iommu->stored_l2[i] = iommu_read_l2(iommu, i);
}
return pci_enable_device(iommu->dev);
}
static void print_iommu_info(void)
{
static const char * const feat_str[] = {
"PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
"IA", "GA", "HE", "PC"
};
struct amd_iommu *iommu;
for_each_iommu(iommu) {
int i;
pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
dev_name(&iommu->dev->dev), iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
pr_info("AMD-Vi: Extended features: ");
for (i = 0; ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i)))
pr_cont(" %s", feat_str[i]);
}
}
pr_cont("\n");
}
}
static int __init amd_iommu_init_pci(void)
{
struct amd_iommu *iommu;
int ret = 0;
for_each_iommu(iommu) {
ret = iommu_init_pci(iommu);
if (ret)
break;
}
/* Make sure ACS will be enabled */
pci_request_acs();
ret = amd_iommu_init_devices();
print_iommu_info();
return ret;
}
/**************************************************************************** /****************************************************************************
* *
* The following functions initialize the MSI interrupts for all IOMMUs * The following functions initialize the MSI interrupts for all IOMMUs
...@@ -1217,7 +1243,7 @@ static int __init init_exclusion_range(struct ivmd_header *m) ...@@ -1217,7 +1243,7 @@ static int __init init_exclusion_range(struct ivmd_header *m)
/* called for unity map ACPI definition */ /* called for unity map ACPI definition */
static int __init init_unity_map_range(struct ivmd_header *m) static int __init init_unity_map_range(struct ivmd_header *m)
{ {
struct unity_map_entry *e = 0; struct unity_map_entry *e = NULL;
char *s; char *s;
e = kzalloc(sizeof(*e), GFP_KERNEL); e = kzalloc(sizeof(*e), GFP_KERNEL);
...@@ -1369,7 +1395,7 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) ...@@ -1369,7 +1395,7 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
* This function finally enables all IOMMUs found in the system after * This function finally enables all IOMMUs found in the system after
* they have been initialized * they have been initialized
*/ */
static void enable_iommus(void) static void early_enable_iommus(void)
{ {
struct amd_iommu *iommu; struct amd_iommu *iommu;
...@@ -1379,14 +1405,29 @@ static void enable_iommus(void) ...@@ -1379,14 +1405,29 @@ static void enable_iommus(void)
iommu_set_device_table(iommu); iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu); iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu); iommu_enable_event_buffer(iommu);
iommu_enable_ppr_log(iommu);
iommu_enable_gt(iommu);
iommu_set_exclusion_range(iommu); iommu_set_exclusion_range(iommu);
iommu_enable(iommu); iommu_enable(iommu);
iommu_flush_all_caches(iommu); iommu_flush_all_caches(iommu);
} }
} }
static void enable_iommus_v2(void)
{
struct amd_iommu *iommu;
for_each_iommu(iommu) {
iommu_enable_ppr_log(iommu);
iommu_enable_gt(iommu);
}
}
static void enable_iommus(void)
{
early_enable_iommus();
enable_iommus_v2();
}
static void disable_iommus(void) static void disable_iommus(void)
{ {
struct amd_iommu *iommu; struct amd_iommu *iommu;
...@@ -1481,16 +1522,23 @@ static void __init free_on_init_error(void) ...@@ -1481,16 +1522,23 @@ static void __init free_on_init_error(void)
* After everything is set up the IOMMUs are enabled and the necessary * After everything is set up the IOMMUs are enabled and the necessary
* hotplug and suspend notifiers are registered. * hotplug and suspend notifiers are registered.
*/ */
int __init amd_iommu_init_hardware(void) static int __init early_amd_iommu_init(void)
{ {
struct acpi_table_header *ivrs_base;
acpi_size ivrs_size;
acpi_status status;
int i, ret = 0; int i, ret = 0;
if (!amd_iommu_detected) if (!amd_iommu_detected)
return -ENODEV; return -ENODEV;
if (amd_iommu_dev_table != NULL) { status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
/* Hardware already initialized */ if (status == AE_NOT_FOUND)
return 0; return -ENODEV;
else if (ACPI_FAILURE(status)) {
const char *err = acpi_format_exception(status);
pr_err("AMD-Vi: IVRS table error: %s\n", err);
return -EINVAL;
} }
/* /*
...@@ -1498,10 +1546,7 @@ int __init amd_iommu_init_hardware(void) ...@@ -1498,10 +1546,7 @@ int __init amd_iommu_init_hardware(void)
* we need to handle. Upon this information the shared data * we need to handle. Upon this information the shared data
* structures for the IOMMUs in the system will be allocated * structures for the IOMMUs in the system will be allocated
*/ */
if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) ret = find_last_devid_acpi(ivrs_base);
return -ENODEV;
ret = amd_iommu_init_err;
if (ret) if (ret)
goto out; goto out;
...@@ -1523,20 +1568,20 @@ int __init amd_iommu_init_hardware(void) ...@@ -1523,20 +1568,20 @@ int __init amd_iommu_init_hardware(void)
amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
get_order(alias_table_size)); get_order(alias_table_size));
if (amd_iommu_alias_table == NULL) if (amd_iommu_alias_table == NULL)
goto free; goto out;
/* IOMMU rlookup table - find the IOMMU for a specific device */ /* IOMMU rlookup table - find the IOMMU for a specific device */
amd_iommu_rlookup_table = (void *)__get_free_pages( amd_iommu_rlookup_table = (void *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO, GFP_KERNEL | __GFP_ZERO,
get_order(rlookup_table_size)); get_order(rlookup_table_size));
if (amd_iommu_rlookup_table == NULL) if (amd_iommu_rlookup_table == NULL)
goto free; goto out;
amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO, GFP_KERNEL | __GFP_ZERO,
get_order(MAX_DOMAIN_ID/8)); get_order(MAX_DOMAIN_ID/8));
if (amd_iommu_pd_alloc_bitmap == NULL) if (amd_iommu_pd_alloc_bitmap == NULL)
goto free; goto out;
/* init the device table */ /* init the device table */
init_device_table(); init_device_table();
...@@ -1559,38 +1604,18 @@ int __init amd_iommu_init_hardware(void) ...@@ -1559,38 +1604,18 @@ int __init amd_iommu_init_hardware(void)
* now the data structures are allocated and basically initialized * now the data structures are allocated and basically initialized
* start the real acpi table scan * start the real acpi table scan
*/ */
ret = -ENODEV; ret = init_iommu_all(ivrs_base);
if (acpi_table_parse("IVRS", init_iommu_all) != 0)
goto free;
if (amd_iommu_init_err) {
ret = amd_iommu_init_err;
goto free;
}
if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
goto free;
if (amd_iommu_init_err) {
ret = amd_iommu_init_err;
goto free;
}
ret = amd_iommu_init_devices();
if (ret) if (ret)
goto free; goto out;
enable_iommus();
amd_iommu_init_notifier();
register_syscore_ops(&amd_iommu_syscore_ops); ret = init_memory_definitions(ivrs_base);
if (ret)
goto out;
out: out:
return ret; /* Don't leak any ACPI memory */
early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
free: ivrs_base = NULL;
free_on_init_error();
return ret; return ret;
} }
...@@ -1610,26 +1635,29 @@ static int amd_iommu_enable_interrupts(void) ...@@ -1610,26 +1635,29 @@ static int amd_iommu_enable_interrupts(void)
return ret; return ret;
} }
/* static bool detect_ivrs(void)
* This is the core init function for AMD IOMMU hardware in the system.
* This function is called from the generic x86 DMA layer initialization
* code.
*
* The function calls amd_iommu_init_hardware() to setup and enable the
* IOMMU hardware if this has not happened yet. After that the driver
* registers for the DMA-API and for the IOMMU-API as necessary.
*/
static int __init amd_iommu_init(void)
{ {
int ret = 0; struct acpi_table_header *ivrs_base;
acpi_size ivrs_size;
acpi_status status;
ret = amd_iommu_init_hardware(); status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
if (ret) if (status == AE_NOT_FOUND)
goto out; return false;
else if (ACPI_FAILURE(status)) {
const char *err = acpi_format_exception(status);
pr_err("AMD-Vi: IVRS table error: %s\n", err);
return false;
}
ret = amd_iommu_enable_interrupts(); early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
if (ret)
goto free; return true;
}
static int amd_iommu_init_dma(void)
{
int ret;
if (iommu_pass_through) if (iommu_pass_through)
ret = amd_iommu_init_passthrough(); ret = amd_iommu_init_passthrough();
...@@ -1637,29 +1665,108 @@ static int __init amd_iommu_init(void) ...@@ -1637,29 +1665,108 @@ static int __init amd_iommu_init(void)
ret = amd_iommu_init_dma_ops(); ret = amd_iommu_init_dma_ops();
if (ret) if (ret)
goto free; return ret;
amd_iommu_init_api(); amd_iommu_init_api();
amd_iommu_init_notifier();
return 0;
}
/****************************************************************************
*
* AMD IOMMU Initialization State Machine
*
****************************************************************************/
static int __init state_next(void)
{
int ret = 0;
switch (init_state) {
case IOMMU_START_STATE:
if (!detect_ivrs()) {
init_state = IOMMU_NOT_FOUND;
ret = -ENODEV;
} else {
init_state = IOMMU_IVRS_DETECTED;
}
break;
case IOMMU_IVRS_DETECTED:
ret = early_amd_iommu_init();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
break;
case IOMMU_ACPI_FINISHED:
early_enable_iommus();
register_syscore_ops(&amd_iommu_syscore_ops);
x86_platform.iommu_shutdown = disable_iommus; x86_platform.iommu_shutdown = disable_iommus;
init_state = IOMMU_ENABLED;
break;
case IOMMU_ENABLED:
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
enable_iommus_v2();
break;
case IOMMU_PCI_INIT:
ret = amd_iommu_enable_interrupts();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
break;
case IOMMU_INTERRUPTS_EN:
ret = amd_iommu_init_dma();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
break;
case IOMMU_DMA_OPS:
init_state = IOMMU_INITIALIZED;
break;
case IOMMU_INITIALIZED:
/* Nothing to do */
break;
case IOMMU_NOT_FOUND:
case IOMMU_INIT_ERROR:
/* Error states => do nothing */
ret = -EINVAL;
break;
default:
/* Unknown state */
BUG();
}
if (iommu_pass_through) return ret;
goto out; }
if (amd_iommu_unmap_flush) static int __init iommu_go_to_state(enum iommu_init_state state)
printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); {
else int ret = 0;
printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
while (init_state != state) {
ret = state_next();
if (init_state == IOMMU_NOT_FOUND ||
init_state == IOMMU_INIT_ERROR)
break;
}
out:
return ret; return ret;
}
free:
disable_iommus();
/*
* This is the core init function for AMD IOMMU hardware in the system.
* This function is called from the generic x86 DMA layer initialization
* code.
*/
static int __init amd_iommu_init(void)
{
int ret;
ret = iommu_go_to_state(IOMMU_INITIALIZED);
if (ret) {
disable_iommus();
free_on_init_error(); free_on_init_error();
}
goto out; return ret;
} }
/**************************************************************************** /****************************************************************************
...@@ -1669,29 +1776,25 @@ static int __init amd_iommu_init(void) ...@@ -1669,29 +1776,25 @@ static int __init amd_iommu_init(void)
* IOMMUs * IOMMUs
* *
****************************************************************************/ ****************************************************************************/
static int __init early_amd_iommu_detect(struct acpi_table_header *table)
{
return 0;
}
int __init amd_iommu_detect(void) int __init amd_iommu_detect(void)
{ {
int ret;
if (no_iommu || (iommu_detected && !gart_iommu_aperture)) if (no_iommu || (iommu_detected && !gart_iommu_aperture))
return -ENODEV; return -ENODEV;
if (amd_iommu_disabled) if (amd_iommu_disabled)
return -ENODEV; return -ENODEV;
if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
if (ret)
return ret;
amd_iommu_detected = true;
iommu_detected = 1; iommu_detected = 1;
amd_iommu_detected = 1;
x86_init.iommu.iommu_init = amd_iommu_init; x86_init.iommu.iommu_init = amd_iommu_init;
/* Make sure ACS will be enabled */ return 0;
pci_request_acs();
return 1;
}
return -ENODEV;
} }
/**************************************************************************** /****************************************************************************
...@@ -1727,8 +1830,8 @@ __setup("amd_iommu=", parse_amd_iommu_options); ...@@ -1727,8 +1830,8 @@ __setup("amd_iommu=", parse_amd_iommu_options);
IOMMU_INIT_FINISH(amd_iommu_detect, IOMMU_INIT_FINISH(amd_iommu_detect,
gart_iommu_hole_init, gart_iommu_hole_init,
0, NULL,
0); NULL);
bool amd_iommu_v2_supported(void) bool amd_iommu_v2_supported(void)
{ {
......
...@@ -487,7 +487,7 @@ struct amd_iommu { ...@@ -487,7 +487,7 @@ struct amd_iommu {
/* physical address of MMIO space */ /* physical address of MMIO space */
u64 mmio_phys; u64 mmio_phys;
/* virtual address of MMIO space */ /* virtual address of MMIO space */
u8 *mmio_base; u8 __iomem *mmio_base;
/* capabilities of that IOMMU read from ACPI */ /* capabilities of that IOMMU read from ACPI */
u32 cap; u32 cap;
...@@ -501,6 +501,9 @@ struct amd_iommu { ...@@ -501,6 +501,9 @@ struct amd_iommu {
/* IOMMUv2 */ /* IOMMUv2 */
bool is_iommu_v2; bool is_iommu_v2;
/* PCI device id of the IOMMU device */
u16 devid;
/* /*
* Capability pointer. There could be more than one IOMMU per PCI * Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability * device function if there are more than one AMD IOMMU capability
...@@ -530,8 +533,6 @@ struct amd_iommu { ...@@ -530,8 +533,6 @@ struct amd_iommu {
u32 evt_buf_size; u32 evt_buf_size;
/* event buffer virtual address */ /* event buffer virtual address */
u8 *evt_buf; u8 *evt_buf;
/* MSI number for event interrupt */
u16 evt_msi_num;
/* Base of the PPR log, if present */ /* Base of the PPR log, if present */
u8 *ppr_log; u8 *ppr_log;
...@@ -664,6 +665,12 @@ extern bool amd_iommu_force_isolation; ...@@ -664,6 +665,12 @@ extern bool amd_iommu_force_isolation;
/* Max levels of glxval supported */ /* Max levels of glxval supported */
extern int amd_iommu_max_glx_val; extern int amd_iommu_max_glx_val;
/*
* This function flushes all internal caches of
* the IOMMU used by this driver.
*/
extern void iommu_flush_all_caches(struct amd_iommu *iommu);
/* takes bus and device/function and returns the device id /* takes bus and device/function and returns the device id
* FIXME: should that be in generic PCI code? */ * FIXME: should that be in generic PCI code? */
static inline u16 calc_devid(u8 bus, u8 devfn) static inline u16 calc_devid(u8 bus, u8 devfn)
......
...@@ -81,7 +81,7 @@ struct fault { ...@@ -81,7 +81,7 @@ struct fault {
u16 flags; u16 flags;
}; };
struct device_state **state_table; static struct device_state **state_table;
static spinlock_t state_lock; static spinlock_t state_lock;
/* List and lock for all pasid_states */ /* List and lock for all pasid_states */
...@@ -681,6 +681,8 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, ...@@ -681,6 +681,8 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
atomic_set(&pasid_state->count, 1); atomic_set(&pasid_state->count, 1);
init_waitqueue_head(&pasid_state->wq); init_waitqueue_head(&pasid_state->wq);
spin_lock_init(&pasid_state->lock);
pasid_state->task = task; pasid_state->task = task;
pasid_state->mm = get_task_mm(task); pasid_state->mm = get_task_mm(task);
pasid_state->device_state = dev_state; pasid_state->device_state = dev_state;
......
...@@ -732,6 +732,10 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain) ...@@ -732,6 +732,10 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain)
spin_lock_init(&priv->pgtablelock); spin_lock_init(&priv->pgtablelock);
INIT_LIST_HEAD(&priv->clients); INIT_LIST_HEAD(&priv->clients);
dom->geometry.aperture_start = 0;
dom->geometry.aperture_end = ~0UL;
dom->geometry.force_aperture = true;
domain->priv = priv; domain->priv = priv;
return 0; return 0;
......
...@@ -3932,6 +3932,10 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) ...@@ -3932,6 +3932,10 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
domain_update_iommu_cap(dmar_domain); domain_update_iommu_cap(dmar_domain);
domain->priv = dmar_domain; domain->priv = dmar_domain;
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
domain->geometry.force_aperture = true;
return 0; return 0;
} }
...@@ -4090,52 +4094,70 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain, ...@@ -4090,52 +4094,70 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
return 0; return 0;
} }
/* static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
* Group numbers are arbitrary. Device with the same group number
* indicate the iommu cannot differentiate between them. To avoid
* tracking used groups we just use the seg|bus|devfn of the lowest
* level we're able to differentiate devices
*/
static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
{ {
struct pci_dev *pdev = to_pci_dev(dev); pci_dev_put(*from);
struct pci_dev *bridge; *from = to;
union { }
struct {
u8 devfn;
u8 bus;
u16 segment;
} pci;
u32 group;
} id;
if (iommu_no_mapping(dev)) #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
return -ENODEV;
id.pci.segment = pci_domain_nr(pdev->bus); static int intel_iommu_add_device(struct device *dev)
id.pci.bus = pdev->bus->number; {
id.pci.devfn = pdev->devfn; struct pci_dev *pdev = to_pci_dev(dev);
struct pci_dev *bridge, *dma_pdev;
struct iommu_group *group;
int ret;
if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn)) if (!device_to_iommu(pci_domain_nr(pdev->bus),
pdev->bus->number, pdev->devfn))
return -ENODEV; return -ENODEV;
bridge = pci_find_upstream_pcie_bridge(pdev); bridge = pci_find_upstream_pcie_bridge(pdev);
if (bridge) { if (bridge) {
if (pci_is_pcie(bridge)) { if (pci_is_pcie(bridge))
id.pci.bus = bridge->subordinate->number; dma_pdev = pci_get_domain_bus_and_slot(
id.pci.devfn = 0; pci_domain_nr(pdev->bus),
} else { bridge->subordinate->number, 0);
id.pci.bus = bridge->bus->number; else
id.pci.devfn = bridge->devfn; dma_pdev = pci_dev_get(bridge);
} else
dma_pdev = pci_dev_get(pdev);
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
if (dma_pdev->multifunction &&
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
swap_pci_ref(&dma_pdev,
pci_get_slot(dma_pdev->bus,
PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
0)));
while (!pci_is_root_bus(dma_pdev->bus)) {
if (pci_acs_path_enabled(dma_pdev->bus->self,
NULL, REQ_ACS_FLAGS))
break;
swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
} }
group = iommu_group_get(&dma_pdev->dev);
pci_dev_put(dma_pdev);
if (!group) {
group = iommu_group_alloc();
if (IS_ERR(group))
return PTR_ERR(group);
} }
if (!pdev->is_virtfn && iommu_group_mf) ret = iommu_group_add_device(group, dev);
id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
*groupid = id.group; iommu_group_put(group);
return ret;
}
return 0; static void intel_iommu_remove_device(struct device *dev)
{
iommu_group_remove_device(dev);
} }
static struct iommu_ops intel_iommu_ops = { static struct iommu_ops intel_iommu_ops = {
...@@ -4147,7 +4169,8 @@ static struct iommu_ops intel_iommu_ops = { ...@@ -4147,7 +4169,8 @@ static struct iommu_ops intel_iommu_ops = {
.unmap = intel_iommu_unmap, .unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys, .iova_to_phys = intel_iommu_iova_to_phys,
.domain_has_cap = intel_iommu_domain_has_cap, .domain_has_cap = intel_iommu_domain_has_cap,
.device_group = intel_iommu_device_group, .add_device = intel_iommu_add_device,
.remove_device = intel_iommu_remove_device,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES, .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
}; };
......
...@@ -26,60 +26,535 @@ ...@@ -26,60 +26,535 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/idr.h>
#include <linux/notifier.h>
#include <linux/err.h>
static struct kset *iommu_group_kset;
static struct ida iommu_group_ida;
static struct mutex iommu_group_mutex;
struct iommu_group {
struct kobject kobj;
struct kobject *devices_kobj;
struct list_head devices;
struct mutex mutex;
struct blocking_notifier_head notifier;
void *iommu_data;
void (*iommu_data_release)(void *iommu_data);
char *name;
int id;
};
struct iommu_device {
struct list_head list;
struct device *dev;
char *name;
};
struct iommu_group_attribute {
struct attribute attr;
ssize_t (*show)(struct iommu_group *group, char *buf);
ssize_t (*store)(struct iommu_group *group,
const char *buf, size_t count);
};
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
#define to_iommu_group_attr(_attr) \
container_of(_attr, struct iommu_group_attribute, attr)
#define to_iommu_group(_kobj) \
container_of(_kobj, struct iommu_group, kobj)
static ssize_t iommu_group_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
{
struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
struct iommu_group *group = to_iommu_group(kobj);
ssize_t ret = -EIO;
if (attr->show)
ret = attr->show(group, buf);
return ret;
}
static ssize_t iommu_group_attr_store(struct kobject *kobj,
struct attribute *__attr,
const char *buf, size_t count)
{
struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
struct iommu_group *group = to_iommu_group(kobj);
ssize_t ret = -EIO;
if (attr->store)
ret = attr->store(group, buf, count);
return ret;
}
static const struct sysfs_ops iommu_group_sysfs_ops = {
.show = iommu_group_attr_show,
.store = iommu_group_attr_store,
};
static int iommu_group_create_file(struct iommu_group *group,
struct iommu_group_attribute *attr)
{
return sysfs_create_file(&group->kobj, &attr->attr);
}
static void iommu_group_remove_file(struct iommu_group *group,
struct iommu_group_attribute *attr)
{
sysfs_remove_file(&group->kobj, &attr->attr);
}
static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
{
return sprintf(buf, "%s\n", group->name);
}
static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
static void iommu_group_release(struct kobject *kobj)
{
struct iommu_group *group = to_iommu_group(kobj);
if (group->iommu_data_release)
group->iommu_data_release(group->iommu_data);
mutex_lock(&iommu_group_mutex);
ida_remove(&iommu_group_ida, group->id);
mutex_unlock(&iommu_group_mutex);
kfree(group->name);
kfree(group);
}
static struct kobj_type iommu_group_ktype = {
.sysfs_ops = &iommu_group_sysfs_ops,
.release = iommu_group_release,
};
/**
* iommu_group_alloc - Allocate a new group
* @name: Optional name to associate with group, visible in sysfs
*
* This function is called by an iommu driver to allocate a new iommu
* group. The iommu group represents the minimum granularity of the iommu.
* Upon successful return, the caller holds a reference to the supplied
* group in order to hold the group until devices are added. Use
* iommu_group_put() to release this extra reference count, allowing the
* group to be automatically reclaimed once it has no devices or external
* references.
*/
struct iommu_group *iommu_group_alloc(void)
{
struct iommu_group *group;
int ret;
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
group->kobj.kset = iommu_group_kset;
mutex_init(&group->mutex);
INIT_LIST_HEAD(&group->devices);
BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
mutex_lock(&iommu_group_mutex);
again:
if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
kfree(group);
mutex_unlock(&iommu_group_mutex);
return ERR_PTR(-ENOMEM);
}
if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
goto again;
mutex_unlock(&iommu_group_mutex);
ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
NULL, "%d", group->id);
if (ret) {
mutex_lock(&iommu_group_mutex);
ida_remove(&iommu_group_ida, group->id);
mutex_unlock(&iommu_group_mutex);
kfree(group);
return ERR_PTR(ret);
}
group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
if (!group->devices_kobj) {
kobject_put(&group->kobj); /* triggers .release & free */
return ERR_PTR(-ENOMEM);
}
static ssize_t show_iommu_group(struct device *dev, /*
struct device_attribute *attr, char *buf) * The devices_kobj holds a reference on the group kobject, so
* as long as that exists so will the group. We can therefore
* use the devices_kobj for reference counting.
*/
kobject_put(&group->kobj);
return group;
}
EXPORT_SYMBOL_GPL(iommu_group_alloc);
/**
* iommu_group_get_iommudata - retrieve iommu_data registered for a group
* @group: the group
*
* iommu drivers can store data in the group for use when doing iommu
* operations. This function provides a way to retrieve it. Caller
* should hold a group reference.
*/
void *iommu_group_get_iommudata(struct iommu_group *group)
{ {
unsigned int groupid; return group->iommu_data;
}
EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
if (iommu_device_group(dev, &groupid)) /**
* iommu_group_set_iommudata - set iommu_data for a group
* @group: the group
* @iommu_data: new data
* @release: release function for iommu_data
*
* iommu drivers can store data in the group for use when doing iommu
* operations. This function provides a way to set the data after
* the group has been allocated. Caller should hold a group reference.
*/
void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
void (*release)(void *iommu_data))
{
group->iommu_data = iommu_data;
group->iommu_data_release = release;
}
EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
/**
* iommu_group_set_name - set name for a group
* @group: the group
* @name: name
*
* Allow iommu driver to set a name for a group. When set it will
* appear in a name attribute file under the group in sysfs.
*/
int iommu_group_set_name(struct iommu_group *group, const char *name)
{
int ret;
if (group->name) {
iommu_group_remove_file(group, &iommu_group_attr_name);
kfree(group->name);
group->name = NULL;
if (!name)
return 0; return 0;
}
group->name = kstrdup(name, GFP_KERNEL);
if (!group->name)
return -ENOMEM;
ret = iommu_group_create_file(group, &iommu_group_attr_name);
if (ret) {
kfree(group->name);
group->name = NULL;
return ret;
}
return sprintf(buf, "%u", groupid); return 0;
} }
static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL); EXPORT_SYMBOL_GPL(iommu_group_set_name);
static int add_iommu_group(struct device *dev, void *data) /**
* iommu_group_add_device - add a device to an iommu group
* @group: the group into which to add the device (reference should be held)
* @dev: the device
*
* This function is called by an iommu driver to add a device into a
* group. Adding a device increments the group reference count.
*/
int iommu_group_add_device(struct iommu_group *group, struct device *dev)
{ {
unsigned int groupid; int ret, i = 0;
struct iommu_device *device;
device = kzalloc(sizeof(*device), GFP_KERNEL);
if (!device)
return -ENOMEM;
device->dev = dev;
ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
if (ret) {
kfree(device);
return ret;
}
device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
rename:
if (!device->name) {
sysfs_remove_link(&dev->kobj, "iommu_group");
kfree(device);
return -ENOMEM;
}
if (iommu_device_group(dev, &groupid) == 0) ret = sysfs_create_link_nowarn(group->devices_kobj,
return device_create_file(dev, &dev_attr_iommu_group); &dev->kobj, device->name);
if (ret) {
kfree(device->name);
if (ret == -EEXIST && i >= 0) {
/*
* Account for the slim chance of collision
* and append an instance to the name.
*/
device->name = kasprintf(GFP_KERNEL, "%s.%d",
kobject_name(&dev->kobj), i++);
goto rename;
}
sysfs_remove_link(&dev->kobj, "iommu_group");
kfree(device);
return ret;
}
kobject_get(group->devices_kobj);
dev->iommu_group = group;
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
mutex_unlock(&group->mutex);
/* Notify any listeners about change to group. */
blocking_notifier_call_chain(&group->notifier,
IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(iommu_group_add_device);
/**
* iommu_group_remove_device - remove a device from it's current group
* @dev: device to be removed
*
* This function is called by an iommu driver to remove the device from
* it's current group. This decrements the iommu group reference count.
*/
void iommu_group_remove_device(struct device *dev)
{
struct iommu_group *group = dev->iommu_group;
struct iommu_device *tmp_device, *device = NULL;
/* Pre-notify listeners that a device is being removed. */
blocking_notifier_call_chain(&group->notifier,
IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
mutex_lock(&group->mutex);
list_for_each_entry(tmp_device, &group->devices, list) {
if (tmp_device->dev == dev) {
device = tmp_device;
list_del(&device->list);
break;
}
}
mutex_unlock(&group->mutex);
if (!device)
return;
sysfs_remove_link(group->devices_kobj, device->name);
sysfs_remove_link(&dev->kobj, "iommu_group");
kfree(device->name);
kfree(device);
dev->iommu_group = NULL;
kobject_put(group->devices_kobj);
}
EXPORT_SYMBOL_GPL(iommu_group_remove_device);
/**
* iommu_group_for_each_dev - iterate over each device in the group
* @group: the group
* @data: caller opaque data to be passed to callback function
* @fn: caller supplied callback function
*
* This function is called by group users to iterate over group devices.
* Callers should hold a reference count to the group during callback.
* The group->mutex is held across callbacks, which will block calls to
* iommu_group_add/remove_device.
*/
int iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *))
{
struct iommu_device *device;
int ret = 0;
mutex_lock(&group->mutex);
list_for_each_entry(device, &group->devices, list) {
ret = fn(device->dev, data);
if (ret)
break;
}
mutex_unlock(&group->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
/**
* iommu_group_get - Return the group for a device and increment reference
* @dev: get the group that this device belongs to
*
* This function is called by iommu drivers and users to get the group
* for the specified device. If found, the group is returned and the group
* reference in incremented, else NULL.
*/
struct iommu_group *iommu_group_get(struct device *dev)
{
struct iommu_group *group = dev->iommu_group;
if (group)
kobject_get(group->devices_kobj);
return group;
}
EXPORT_SYMBOL_GPL(iommu_group_get);
/**
* iommu_group_put - Decrement group reference
* @group: the group to use
*
* This function is called by iommu drivers and users to release the
* iommu group. Once the reference count is zero, the group is released.
*/
void iommu_group_put(struct iommu_group *group)
{
if (group)
kobject_put(group->devices_kobj);
}
EXPORT_SYMBOL_GPL(iommu_group_put);
/**
* iommu_group_register_notifier - Register a notifier for group changes
* @group: the group to watch
* @nb: notifier block to signal
*
* This function allows iommu group users to track changes in a group.
* See include/linux/iommu.h for actions sent via this notifier. Caller
* should hold a reference to the group throughout notifier registration.
*/
int iommu_group_register_notifier(struct iommu_group *group,
struct notifier_block *nb)
{
return blocking_notifier_chain_register(&group->notifier, nb);
}
EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
static int remove_iommu_group(struct device *dev) /**
* iommu_group_unregister_notifier - Unregister a notifier
* @group: the group to watch
* @nb: notifier block to signal
*
* Unregister a previously registered group notifier block.
*/
int iommu_group_unregister_notifier(struct iommu_group *group,
struct notifier_block *nb)
{ {
unsigned int groupid; return blocking_notifier_chain_unregister(&group->notifier, nb);
}
EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
/**
* iommu_group_id - Return ID for a group
* @group: the group to ID
*
* Return the unique ID for the group matching the sysfs group number.
*/
int iommu_group_id(struct iommu_group *group)
{
return group->id;
}
EXPORT_SYMBOL_GPL(iommu_group_id);
static int add_iommu_group(struct device *dev, void *data)
{
struct iommu_ops *ops = data;
if (!ops->add_device)
return -ENODEV;
WARN_ON(dev->iommu_group);
if (iommu_device_group(dev, &groupid) == 0) ops->add_device(dev);
device_remove_file(dev, &dev_attr_iommu_group);
return 0; return 0;
} }
static int iommu_device_notifier(struct notifier_block *nb, static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data) unsigned long action, void *data)
{ {
struct device *dev = data; struct device *dev = data;
struct iommu_ops *ops = dev->bus->iommu_ops;
struct iommu_group *group;
unsigned long group_action = 0;
if (action == BUS_NOTIFY_ADD_DEVICE) /*
return add_iommu_group(dev, NULL); * ADD/DEL call into iommu driver ops if provided, which may
else if (action == BUS_NOTIFY_DEL_DEVICE) * result in ADD/DEL notifiers to group->notifier
return remove_iommu_group(dev); */
if (action == BUS_NOTIFY_ADD_DEVICE) {
if (ops->add_device)
return ops->add_device(dev);
} else if (action == BUS_NOTIFY_DEL_DEVICE) {
if (ops->remove_device && dev->iommu_group) {
ops->remove_device(dev);
return 0;
}
}
/*
* Remaining BUS_NOTIFYs get filtered and republished to the
* group, if anyone is listening
*/
group = iommu_group_get(dev);
if (!group)
return 0;
switch (action) {
case BUS_NOTIFY_BIND_DRIVER:
group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
break;
case BUS_NOTIFY_BOUND_DRIVER:
group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
break;
case BUS_NOTIFY_UNBIND_DRIVER:
group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
break;
case BUS_NOTIFY_UNBOUND_DRIVER:
group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
break;
}
if (group_action)
blocking_notifier_call_chain(&group->notifier,
group_action, dev);
iommu_group_put(group);
return 0; return 0;
} }
static struct notifier_block iommu_device_nb = { static struct notifier_block iommu_bus_nb = {
.notifier_call = iommu_device_notifier, .notifier_call = iommu_bus_notifier,
}; };
static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
{ {
bus_register_notifier(bus, &iommu_device_nb); bus_register_notifier(bus, &iommu_bus_nb);
bus_for_each_dev(bus, NULL, NULL, add_iommu_group); bus_for_each_dev(bus, NULL, ops, add_iommu_group);
} }
/** /**
...@@ -192,6 +667,45 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) ...@@ -192,6 +667,45 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
} }
EXPORT_SYMBOL_GPL(iommu_detach_device); EXPORT_SYMBOL_GPL(iommu_detach_device);
/*
* IOMMU groups are really the natrual working unit of the IOMMU, but
* the IOMMU API works on domains and devices. Bridge that gap by
* iterating over the devices in a group. Ideally we'd have a single
* device which represents the requestor ID of the group, but we also
* allow IOMMU drivers to create policy defined minimum sets, where
* the physical hardware may be able to distiguish members, but we
* wish to group them at a higher level (ex. untrusted multi-function
* PCI devices). Thus we attach each device.
*/
static int iommu_group_do_attach_device(struct device *dev, void *data)
{
struct iommu_domain *domain = data;
return iommu_attach_device(domain, dev);
}
int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
{
return iommu_group_for_each_dev(group, domain,
iommu_group_do_attach_device);
}
EXPORT_SYMBOL_GPL(iommu_attach_group);
static int iommu_group_do_detach_device(struct device *dev, void *data)
{
struct iommu_domain *domain = data;
iommu_detach_device(domain, dev);
return 0;
}
void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
{
iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device);
}
EXPORT_SYMBOL_GPL(iommu_detach_group);
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova) unsigned long iova)
{ {
...@@ -336,11 +850,48 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) ...@@ -336,11 +850,48 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
} }
EXPORT_SYMBOL_GPL(iommu_unmap); EXPORT_SYMBOL_GPL(iommu_unmap);
int iommu_device_group(struct device *dev, unsigned int *groupid) static int __init iommu_init(void)
{ {
if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group) iommu_group_kset = kset_create_and_add("iommu_groups",
return dev->bus->iommu_ops->device_group(dev, groupid); NULL, kernel_kobj);
ida_init(&iommu_group_ida);
mutex_init(&iommu_group_mutex);
return -ENODEV; BUG_ON(!iommu_group_kset);
return 0;
}
subsys_initcall(iommu_init);
int iommu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
struct iommu_domain_geometry *geometry;
int ret = 0;
switch (attr) {
case DOMAIN_ATTR_GEOMETRY:
geometry = data;
*geometry = domain->geometry;
break;
default:
if (!domain->ops->domain_get_attr)
return -EINVAL;
ret = domain->ops->domain_get_attr(domain, attr, data);
}
return ret;
}
EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
int iommu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
if (!domain->ops->domain_set_attr)
return -EINVAL;
return domain->ops->domain_set_attr(domain, attr, data);
} }
EXPORT_SYMBOL_GPL(iommu_device_group); EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/cpumask.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/msi.h>
#include <asm/hw_irq.h>
#include <asm/irq_remapping.h>
#include "irq_remapping.h" #include "irq_remapping.h"
......
...@@ -226,6 +226,11 @@ static int msm_iommu_domain_init(struct iommu_domain *domain) ...@@ -226,6 +226,11 @@ static int msm_iommu_domain_init(struct iommu_domain *domain)
memset(priv->pgtable, 0, SZ_16K); memset(priv->pgtable, 0, SZ_16K);
domain->priv = priv; domain->priv = priv;
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end = (1ULL << 32) - 1;
domain->geometry.force_aperture = true;
return 0; return 0;
fail_nomem: fail_nomem:
......
/*
* OF helpers for IOMMU
*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/export.h>
#include <linux/limits.h>
#include <linux/of.h>
/**
* of_get_dma_window - Parse *dma-window property and returns 0 if found.
*
* @dn: device node
* @prefix: prefix for property name if any
* @index: index to start to parse
* @busno: Returns busno if supported. Otherwise pass NULL
* @addr: Returns address that DMA starts
* @size: Returns the range that DMA can handle
*
* This supports different formats flexibly. "prefix" can be
* configured if any. "busno" and "index" are optionally
* specified. Set 0(or NULL) if not used.
*/
int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
unsigned long *busno, dma_addr_t *addr, size_t *size)
{
const __be32 *dma_window, *end;
int bytes, cur_index = 0;
char propname[NAME_MAX], addrname[NAME_MAX], sizename[NAME_MAX];
if (!dn || !addr || !size)
return -EINVAL;
if (!prefix)
prefix = "";
snprintf(propname, sizeof(propname), "%sdma-window", prefix);
snprintf(addrname, sizeof(addrname), "%s#dma-address-cells", prefix);
snprintf(sizename, sizeof(sizename), "%s#dma-size-cells", prefix);
dma_window = of_get_property(dn, propname, &bytes);
if (!dma_window)
return -ENODEV;
end = dma_window + bytes / sizeof(*dma_window);
while (dma_window < end) {
u32 cells;
const void *prop;
/* busno is one cell if supported */
if (busno)
*busno = be32_to_cpup(dma_window++);
prop = of_get_property(dn, addrname, NULL);
if (!prop)
prop = of_get_property(dn, "#address-cells", NULL);
cells = prop ? be32_to_cpup(prop) : of_n_addr_cells(dn);
if (!cells)
return -EINVAL;
*addr = of_read_number(dma_window, cells);
dma_window += cells;
prop = of_get_property(dn, sizename, NULL);
cells = prop ? be32_to_cpup(prop) : of_n_size_cells(dn);
if (!cells)
return -EINVAL;
*size = of_read_number(dma_window, cells);
dma_window += cells;
if (cur_index++ == index)
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(of_get_dma_window);
...@@ -1148,6 +1148,10 @@ static int omap_iommu_domain_init(struct iommu_domain *domain) ...@@ -1148,6 +1148,10 @@ static int omap_iommu_domain_init(struct iommu_domain *domain)
domain->priv = omap_domain; domain->priv = omap_domain;
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end = (1ULL << 32) - 1;
domain->geometry.force_aperture = true;
return 0; return 0;
fail_nomem: fail_nomem:
......
...@@ -165,6 +165,11 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain, ...@@ -165,6 +165,11 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
return -EINVAL; return -EINVAL;
domain->priv = gart; domain->priv = gart;
domain->geometry.aperture_start = gart->iovmm_base;
domain->geometry.aperture_end = gart->iovmm_base +
gart->page_count * GART_PAGE_SIZE - 1;
domain->geometry.force_aperture = true;
client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL); client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
if (!client) if (!client)
return -ENOMEM; return -ENOMEM;
......
...@@ -30,12 +30,15 @@ ...@@ -30,12 +30,15 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <mach/iomap.h> #include <mach/iomap.h>
#include <mach/smmu.h> #include <mach/smmu.h>
#include <mach/tegra-ahb.h>
/* bitmap of the page sizes currently supported */ /* bitmap of the page sizes currently supported */
#define SMMU_IOMMU_PGSIZES (SZ_4K) #define SMMU_IOMMU_PGSIZES (SZ_4K)
...@@ -111,12 +114,6 @@ ...@@ -111,12 +114,6 @@
#define SMMU_PDE_NEXT_SHIFT 28 #define SMMU_PDE_NEXT_SHIFT 28
/* AHB Arbiter Registers */
#define AHB_XBAR_CTRL 0xe0
#define AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE 1
#define AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT 17
#define SMMU_NUM_ASIDS 4
#define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000 #define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000
#define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */ #define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
#define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000 #define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000
...@@ -136,6 +133,7 @@ ...@@ -136,6 +133,7 @@
#define SMMU_PAGE_SHIFT 12 #define SMMU_PAGE_SHIFT 12
#define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT) #define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT)
#define SMMU_PAGE_MASK ((1 << SMMU_PAGE_SHIFT) - 1)
#define SMMU_PDIR_COUNT 1024 #define SMMU_PDIR_COUNT 1024
#define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT) #define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT)
...@@ -177,6 +175,8 @@ ...@@ -177,6 +175,8 @@
#define SMMU_ASID_DISABLE 0 #define SMMU_ASID_DISABLE 0
#define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0)) #define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0))
#define NUM_SMMU_REG_BANKS 3
#define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1) #define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1)
#define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0) #define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0)
#define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1) #define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
...@@ -235,14 +235,12 @@ struct smmu_as { ...@@ -235,14 +235,12 @@ struct smmu_as {
* Per SMMU device - IOMMU device * Per SMMU device - IOMMU device
*/ */
struct smmu_device { struct smmu_device {
void __iomem *regs, *regs_ahbarb; void __iomem *regs[NUM_SMMU_REG_BANKS];
unsigned long iovmm_base; /* remappable base address */ unsigned long iovmm_base; /* remappable base address */
unsigned long page_count; /* total remappable size */ unsigned long page_count; /* total remappable size */
spinlock_t lock; spinlock_t lock;
char *name; char *name;
struct device *dev; struct device *dev;
int num_as;
struct smmu_as *as; /* Run-time allocated array */
struct page *avp_vector_page; /* dummy page shared by all AS's */ struct page *avp_vector_page; /* dummy page shared by all AS's */
/* /*
...@@ -252,29 +250,50 @@ struct smmu_device { ...@@ -252,29 +250,50 @@ struct smmu_device {
unsigned long translation_enable_1; unsigned long translation_enable_1;
unsigned long translation_enable_2; unsigned long translation_enable_2;
unsigned long asid_security; unsigned long asid_security;
struct device_node *ahb;
int num_as;
struct smmu_as as[0]; /* Run-time allocated array */
}; };
static struct smmu_device *smmu_handle; /* unique for a system */ static struct smmu_device *smmu_handle; /* unique for a system */
/* /*
* SMMU/AHB register accessors * SMMU register accessors
*/ */
static inline u32 smmu_read(struct smmu_device *smmu, size_t offs) static inline u32 smmu_read(struct smmu_device *smmu, size_t offs)
{ {
return readl(smmu->regs + offs); BUG_ON(offs < 0x10);
} if (offs < 0x3c)
static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs) return readl(smmu->regs[0] + offs - 0x10);
{ BUG_ON(offs < 0x1f0);
writel(val, smmu->regs + offs); if (offs < 0x200)
return readl(smmu->regs[1] + offs - 0x1f0);
BUG_ON(offs < 0x228);
if (offs < 0x284)
return readl(smmu->regs[2] + offs - 0x228);
BUG();
} }
static inline u32 ahb_read(struct smmu_device *smmu, size_t offs) static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
{
return readl(smmu->regs_ahbarb + offs);
}
static inline void ahb_write(struct smmu_device *smmu, u32 val, size_t offs)
{ {
writel(val, smmu->regs_ahbarb + offs); BUG_ON(offs < 0x10);
if (offs < 0x3c) {
writel(val, smmu->regs[0] + offs - 0x10);
return;
}
BUG_ON(offs < 0x1f0);
if (offs < 0x200) {
writel(val, smmu->regs[1] + offs - 0x1f0);
return;
}
BUG_ON(offs < 0x228);
if (offs < 0x284) {
writel(val, smmu->regs[2] + offs - 0x228);
return;
}
BUG();
} }
#define VA_PAGE_TO_PA(va, page) \ #define VA_PAGE_TO_PA(va, page) \
...@@ -370,7 +389,7 @@ static void smmu_flush_regs(struct smmu_device *smmu, int enable) ...@@ -370,7 +389,7 @@ static void smmu_flush_regs(struct smmu_device *smmu, int enable)
FLUSH_SMMU_REGS(smmu); FLUSH_SMMU_REGS(smmu);
} }
static void smmu_setup_regs(struct smmu_device *smmu) static int smmu_setup_regs(struct smmu_device *smmu)
{ {
int i; int i;
u32 val; u32 val;
...@@ -398,10 +417,7 @@ static void smmu_setup_regs(struct smmu_device *smmu) ...@@ -398,10 +417,7 @@ static void smmu_setup_regs(struct smmu_device *smmu)
smmu_flush_regs(smmu, 1); smmu_flush_regs(smmu, 1);
val = ahb_read(smmu, AHB_XBAR_CTRL); return tegra_ahb_enable_smmu(smmu->ahb);
val |= AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE <<
AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT;
ahb_write(smmu, val, AHB_XBAR_CTRL);
} }
static void flush_ptc_and_tlb(struct smmu_device *smmu, static void flush_ptc_and_tlb(struct smmu_device *smmu,
...@@ -537,33 +553,42 @@ static inline void put_signature(struct smmu_as *as, ...@@ -537,33 +553,42 @@ static inline void put_signature(struct smmu_as *as,
#endif #endif
/* /*
* Caller must lock/unlock as * Caller must not hold as->lock
*/ */
static int alloc_pdir(struct smmu_as *as) static int alloc_pdir(struct smmu_as *as)
{ {
unsigned long *pdir; unsigned long *pdir, flags;
int pdn; int pdn, err = 0;
u32 val; u32 val;
struct smmu_device *smmu = as->smmu; struct smmu_device *smmu = as->smmu;
struct page *page;
unsigned int *cnt;
if (as->pdir_page) /*
return 0; * do the allocation, then grab as->lock
*/
cnt = devm_kzalloc(smmu->dev,
sizeof(cnt[0]) * SMMU_PDIR_COUNT,
GFP_KERNEL);
page = alloc_page(GFP_KERNEL | __GFP_DMA);
as->pte_count = devm_kzalloc(smmu->dev, spin_lock_irqsave(&as->lock, flags);
sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_ATOMIC);
if (!as->pte_count) { if (as->pdir_page) {
dev_err(smmu->dev, /* We raced, free the redundant */
"failed to allocate smmu_device PTE cunters\n"); err = -EAGAIN;
return -ENOMEM; goto err_out;
} }
as->pdir_page = alloc_page(GFP_ATOMIC | __GFP_DMA);
if (!as->pdir_page) { if (!page || !cnt) {
dev_err(smmu->dev, dev_err(smmu->dev, "failed to allocate at %s\n", __func__);
"failed to allocate smmu_device page directory\n"); err = -ENOMEM;
devm_kfree(smmu->dev, as->pte_count); goto err_out;
as->pte_count = NULL;
return -ENOMEM;
} }
as->pdir_page = page;
as->pte_count = cnt;
SetPageReserved(as->pdir_page); SetPageReserved(as->pdir_page);
pdir = page_address(as->pdir_page); pdir = page_address(as->pdir_page);
...@@ -579,7 +604,17 @@ static int alloc_pdir(struct smmu_as *as) ...@@ -579,7 +604,17 @@ static int alloc_pdir(struct smmu_as *as)
smmu_write(smmu, val, SMMU_TLB_FLUSH); smmu_write(smmu, val, SMMU_TLB_FLUSH);
FLUSH_SMMU_REGS(as->smmu); FLUSH_SMMU_REGS(as->smmu);
spin_unlock_irqrestore(&as->lock, flags);
return 0; return 0;
err_out:
spin_unlock_irqrestore(&as->lock, flags);
devm_kfree(smmu->dev, cnt);
if (page)
__free_page(page);
return err;
} }
static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova) static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
...@@ -771,30 +806,28 @@ static void smmu_iommu_detach_dev(struct iommu_domain *domain, ...@@ -771,30 +806,28 @@ static void smmu_iommu_detach_dev(struct iommu_domain *domain,
static int smmu_iommu_domain_init(struct iommu_domain *domain) static int smmu_iommu_domain_init(struct iommu_domain *domain)
{ {
int i; int i, err = -ENODEV;
unsigned long flags; unsigned long flags;
struct smmu_as *as; struct smmu_as *as;
struct smmu_device *smmu = smmu_handle; struct smmu_device *smmu = smmu_handle;
/* Look for a free AS with lock held */ /* Look for a free AS with lock held */
for (i = 0; i < smmu->num_as; i++) { for (i = 0; i < smmu->num_as; i++) {
struct smmu_as *tmp = &smmu->as[i]; as = &smmu->as[i];
if (!as->pdir_page) {
spin_lock_irqsave(&tmp->lock, flags); err = alloc_pdir(as);
if (!tmp->pdir_page) { if (!err)
as = tmp;
goto found; goto found;
} }
spin_unlock_irqrestore(&tmp->lock, flags); if (err != -EAGAIN)
break;
} }
if (i == smmu->num_as)
dev_err(smmu->dev, "no free AS\n"); dev_err(smmu->dev, "no free AS\n");
return -ENODEV; return err;
found: found:
if (alloc_pdir(as) < 0) spin_lock_irqsave(&smmu->lock, flags);
goto err_alloc_pdir;
spin_lock(&smmu->lock);
/* Update PDIR register */ /* Update PDIR register */
smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
...@@ -802,17 +835,18 @@ static int smmu_iommu_domain_init(struct iommu_domain *domain) ...@@ -802,17 +835,18 @@ static int smmu_iommu_domain_init(struct iommu_domain *domain)
SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA); SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA);
FLUSH_SMMU_REGS(smmu); FLUSH_SMMU_REGS(smmu);
spin_unlock(&smmu->lock); spin_unlock_irqrestore(&smmu->lock, flags);
spin_unlock_irqrestore(&as->lock, flags);
domain->priv = as; domain->priv = as;
domain->geometry.aperture_start = smmu->iovmm_base;
domain->geometry.aperture_end = smmu->iovmm_base +
smmu->page_count * SMMU_PAGE_SIZE - 1;
domain->geometry.force_aperture = true;
dev_dbg(smmu->dev, "smmu_as@%p\n", as); dev_dbg(smmu->dev, "smmu_as@%p\n", as);
return 0;
err_alloc_pdir: return 0;
spin_unlock_irqrestore(&as->lock, flags);
return -ENODEV;
} }
static void smmu_iommu_domain_destroy(struct iommu_domain *domain) static void smmu_iommu_domain_destroy(struct iommu_domain *domain)
...@@ -873,65 +907,73 @@ static int tegra_smmu_resume(struct device *dev) ...@@ -873,65 +907,73 @@ static int tegra_smmu_resume(struct device *dev)
{ {
struct smmu_device *smmu = dev_get_drvdata(dev); struct smmu_device *smmu = dev_get_drvdata(dev);
unsigned long flags; unsigned long flags;
int err;
spin_lock_irqsave(&smmu->lock, flags); spin_lock_irqsave(&smmu->lock, flags);
smmu_setup_regs(smmu); err = smmu_setup_regs(smmu);
spin_unlock_irqrestore(&smmu->lock, flags); spin_unlock_irqrestore(&smmu->lock, flags);
return 0; return err;
} }
static int tegra_smmu_probe(struct platform_device *pdev) static int tegra_smmu_probe(struct platform_device *pdev)
{ {
struct smmu_device *smmu; struct smmu_device *smmu;
struct resource *regs, *regs2, *window;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
int i, err = 0; int i, asids, err = 0;
dma_addr_t uninitialized_var(base);
size_t bytes, uninitialized_var(size);
if (smmu_handle) if (smmu_handle)
return -EIO; return -EIO;
BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT); BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (of_property_read_u32(dev->of_node, "nvidia,#asids", &asids))
regs2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
window = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (!regs || !regs2 || !window) {
dev_err(dev, "No SMMU resources\n");
return -ENODEV; return -ENODEV;
}
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); bytes = sizeof(*smmu) + asids * sizeof(*smmu->as);
smmu = devm_kzalloc(dev, bytes, GFP_KERNEL);
if (!smmu) { if (!smmu) {
dev_err(dev, "failed to allocate smmu_device\n"); dev_err(dev, "failed to allocate smmu_device\n");
return -ENOMEM; return -ENOMEM;
} }
smmu->dev = dev; for (i = 0; i < ARRAY_SIZE(smmu->regs); i++) {
smmu->num_as = SMMU_NUM_ASIDS; struct resource *res;
smmu->iovmm_base = (unsigned long)window->start;
smmu->page_count = resource_size(window) >> SMMU_PAGE_SHIFT; res = platform_get_resource(pdev, IORESOURCE_MEM, i);
smmu->regs = devm_ioremap(dev, regs->start, resource_size(regs)); if (!res)
smmu->regs_ahbarb = devm_ioremap(dev, regs2->start, return -ENODEV;
resource_size(regs2)); smmu->regs[i] = devm_request_and_ioremap(&pdev->dev, res);
if (!smmu->regs || !smmu->regs_ahbarb) { if (!smmu->regs[i])
dev_err(dev, "failed to remap SMMU registers\n"); return -EBUSY;
err = -ENXIO;
goto fail;
} }
err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size);
if (err)
return -ENODEV;
if (size & SMMU_PAGE_MASK)
return -EINVAL;
size >>= SMMU_PAGE_SHIFT;
if (!size)
return -EINVAL;
smmu->ahb = of_parse_phandle(dev->of_node, "nvidia,ahb", 0);
if (!smmu->ahb)
return -ENODEV;
smmu->dev = dev;
smmu->num_as = asids;
smmu->iovmm_base = base;
smmu->page_count = size;
smmu->translation_enable_0 = ~0; smmu->translation_enable_0 = ~0;
smmu->translation_enable_1 = ~0; smmu->translation_enable_1 = ~0;
smmu->translation_enable_2 = ~0; smmu->translation_enable_2 = ~0;
smmu->asid_security = 0; smmu->asid_security = 0;
smmu->as = devm_kzalloc(dev,
sizeof(smmu->as[0]) * smmu->num_as, GFP_KERNEL);
if (!smmu->as) {
dev_err(dev, "failed to allocate smmu_as\n");
err = -ENOMEM;
goto fail;
}
for (i = 0; i < smmu->num_as; i++) { for (i = 0; i < smmu->num_as; i++) {
struct smmu_as *as = &smmu->as[i]; struct smmu_as *as = &smmu->as[i];
...@@ -945,57 +987,28 @@ static int tegra_smmu_probe(struct platform_device *pdev) ...@@ -945,57 +987,28 @@ static int tegra_smmu_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&as->client); INIT_LIST_HEAD(&as->client);
} }
spin_lock_init(&smmu->lock); spin_lock_init(&smmu->lock);
smmu_setup_regs(smmu); err = smmu_setup_regs(smmu);
if (err)
return err;
platform_set_drvdata(pdev, smmu); platform_set_drvdata(pdev, smmu);
smmu->avp_vector_page = alloc_page(GFP_KERNEL); smmu->avp_vector_page = alloc_page(GFP_KERNEL);
if (!smmu->avp_vector_page) if (!smmu->avp_vector_page)
goto fail; return -ENOMEM;
smmu_handle = smmu; smmu_handle = smmu;
return 0; return 0;
fail:
if (smmu->avp_vector_page)
__free_page(smmu->avp_vector_page);
if (smmu->regs)
devm_iounmap(dev, smmu->regs);
if (smmu->regs_ahbarb)
devm_iounmap(dev, smmu->regs_ahbarb);
if (smmu && smmu->as) {
for (i = 0; i < smmu->num_as; i++) {
if (smmu->as[i].pdir_page) {
ClearPageReserved(smmu->as[i].pdir_page);
__free_page(smmu->as[i].pdir_page);
}
}
devm_kfree(dev, smmu->as);
}
devm_kfree(dev, smmu);
return err;
} }
static int tegra_smmu_remove(struct platform_device *pdev) static int tegra_smmu_remove(struct platform_device *pdev)
{ {
struct smmu_device *smmu = platform_get_drvdata(pdev); struct smmu_device *smmu = platform_get_drvdata(pdev);
struct device *dev = smmu->dev;
smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
platform_set_drvdata(pdev, NULL);
if (smmu->as) {
int i; int i;
smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
for (i = 0; i < smmu->num_as; i++) for (i = 0; i < smmu->num_as; i++)
free_pdir(&smmu->as[i]); free_pdir(&smmu->as[i]);
devm_kfree(dev, smmu->as);
}
if (smmu->avp_vector_page)
__free_page(smmu->avp_vector_page); __free_page(smmu->avp_vector_page);
if (smmu->regs)
devm_iounmap(dev, smmu->regs);
if (smmu->regs_ahbarb)
devm_iounmap(dev, smmu->regs_ahbarb);
devm_kfree(dev, smmu);
smmu_handle = NULL; smmu_handle = NULL;
return 0; return 0;
} }
...@@ -1005,6 +1018,14 @@ const struct dev_pm_ops tegra_smmu_pm_ops = { ...@@ -1005,6 +1018,14 @@ const struct dev_pm_ops tegra_smmu_pm_ops = {
.resume = tegra_smmu_resume, .resume = tegra_smmu_resume,
}; };
#ifdef CONFIG_OF
static struct of_device_id tegra_smmu_of_match[] __devinitdata = {
{ .compatible = "nvidia,tegra30-smmu", },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_smmu_of_match);
#endif
static struct platform_driver tegra_smmu_driver = { static struct platform_driver tegra_smmu_driver = {
.probe = tegra_smmu_probe, .probe = tegra_smmu_probe,
.remove = tegra_smmu_remove, .remove = tegra_smmu_remove,
...@@ -1012,6 +1033,7 @@ static struct platform_driver tegra_smmu_driver = { ...@@ -1012,6 +1033,7 @@ static struct platform_driver tegra_smmu_driver = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "tegra-smmu", .name = "tegra-smmu",
.pm = &tegra_smmu_pm_ops, .pm = &tegra_smmu_pm_ops,
.of_match_table = of_match_ptr(tegra_smmu_of_match),
}, },
}; };
...@@ -1031,4 +1053,5 @@ module_exit(tegra_smmu_exit); ...@@ -1031,4 +1053,5 @@ module_exit(tegra_smmu_exit);
MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30"); MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30");
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
MODULE_ALIAS("platform:tegra-smmu");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -162,7 +162,8 @@ int pci_user_read_config_##size \ ...@@ -162,7 +162,8 @@ int pci_user_read_config_##size \
if (ret > 0) \ if (ret > 0) \
ret = -EINVAL; \ ret = -EINVAL; \
return ret; \ return ret; \
} } \
EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
/* Returns 0 on success, negative values indicate error. */ /* Returns 0 on success, negative values indicate error. */
#define PCI_USER_WRITE_CONFIG(size,type) \ #define PCI_USER_WRITE_CONFIG(size,type) \
...@@ -181,7 +182,8 @@ int pci_user_write_config_##size \ ...@@ -181,7 +182,8 @@ int pci_user_write_config_##size \
if (ret > 0) \ if (ret > 0) \
ret = -EINVAL; \ ret = -EINVAL; \
return ret; \ return ret; \
} } \
EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
PCI_USER_READ_CONFIG(byte, u8) PCI_USER_READ_CONFIG(byte, u8)
PCI_USER_READ_CONFIG(word, u16) PCI_USER_READ_CONFIG(word, u16)
......
...@@ -2359,6 +2359,75 @@ void pci_enable_acs(struct pci_dev *dev) ...@@ -2359,6 +2359,75 @@ void pci_enable_acs(struct pci_dev *dev)
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
} }
/**
* pci_acs_enabled - test ACS against required flags for a given device
* @pdev: device to test
* @acs_flags: required PCI ACS flags
*
* Return true if the device supports the provided flags. Automatically
* filters out flags that are not implemented on multifunction devices.
*/
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
{
int pos, ret;
u16 ctrl;
ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
if (ret >= 0)
return ret > 0;
if (!pci_is_pcie(pdev))
return false;
/* Filter out flags not applicable to multifunction */
if (pdev->multifunction)
acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
PCI_ACS_EC | PCI_ACS_DT);
if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM ||
pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
pdev->multifunction) {
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
if (!pos)
return false;
pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
if ((ctrl & acs_flags) != acs_flags)
return false;
}
return true;
}
/**
* pci_acs_path_enable - test ACS flags from start to end in a hierarchy
* @start: starting downstream device
* @end: ending upstream device or NULL to search to the root bus
* @acs_flags: required flags
*
* Walk up a device tree from start to end testing PCI ACS support. If
* any step along the way does not support the required flags, return false.
*/
bool pci_acs_path_enabled(struct pci_dev *start,
struct pci_dev *end, u16 acs_flags)
{
struct pci_dev *pdev, *parent = start;
do {
pdev = parent;
if (!pci_acs_enabled(pdev, acs_flags))
return false;
if (pci_is_root_bus(pdev->bus))
return (end == NULL);
parent = pdev->bus->self;
} while (pdev != end);
return true;
}
/** /**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device * @dev: the PCI device
......
...@@ -86,13 +86,6 @@ static inline bool pci_is_bridge(struct pci_dev *pci_dev) ...@@ -86,13 +86,6 @@ static inline bool pci_is_bridge(struct pci_dev *pci_dev)
return !!(pci_dev->subordinate); return !!(pci_dev->subordinate);
} }
extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
extern int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
extern int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
extern int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
extern int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
struct pci_vpd_ops { struct pci_vpd_ops {
ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf); ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
......
...@@ -3179,3 +3179,87 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe) ...@@ -3179,3 +3179,87 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe)
return -ENOTTY; return -ENOTTY;
} }
static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev)
{
if (!PCI_FUNC(dev->devfn))
return pci_dev_get(dev);
return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
}
static const struct pci_dev_dma_source {
u16 vendor;
u16 device;
struct pci_dev *(*dma_source)(struct pci_dev *dev);
} pci_dev_dma_source[] = {
/*
* https://bugzilla.redhat.com/show_bug.cgi?id=605888
*
* Some Ricoh devices use the function 0 source ID for DMA on
* other functions of a multifunction device. The DMA devices
* is therefore function 0, which will have implications of the
* iommu grouping of these devices.
*/
{ PCI_VENDOR_ID_RICOH, 0xe822, pci_func_0_dma_source },
{ PCI_VENDOR_ID_RICOH, 0xe230, pci_func_0_dma_source },
{ PCI_VENDOR_ID_RICOH, 0xe832, pci_func_0_dma_source },
{ PCI_VENDOR_ID_RICOH, 0xe476, pci_func_0_dma_source },
{ 0 }
};
/*
* IOMMUs with isolation capabilities need to be programmed with the
* correct source ID of a device. In most cases, the source ID matches
* the device doing the DMA, but sometimes hardware is broken and will
* tag the DMA as being sourced from a different device. This function
* allows that translation. Note that the reference count of the
* returned device is incremented on all paths.
*/
struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
{
const struct pci_dev_dma_source *i;
for (i = pci_dev_dma_source; i->dma_source; i++) {
if ((i->vendor == dev->vendor ||
i->vendor == (u16)PCI_ANY_ID) &&
(i->device == dev->device ||
i->device == (u16)PCI_ANY_ID))
return i->dma_source(dev);
}
return pci_dev_get(dev);
}
static const struct pci_dev_acs_enabled {
u16 vendor;
u16 device;
int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
} pci_dev_acs_enabled[] = {
{ 0 }
};
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
{
const struct pci_dev_acs_enabled *i;
int ret;
/*
* Allow devices that do not expose standard PCIe ACS capabilities
* or control to indicate their support here. Multi-function express
* devices which do not allow internal peer-to-peer between functions,
* but do not implement PCIe ACS may wish to return true here.
*/
for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
if ((i->vendor == dev->vendor ||
i->vendor == (u16)PCI_ANY_ID) &&
(i->device == dev->device ||
i->device == (u16)PCI_ANY_ID)) {
ret = i->acs_enabled(dev, acs_flags);
if (ret >= 0)
return ret;
}
}
return -ENOTTY;
}
...@@ -124,7 +124,7 @@ static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask, ...@@ -124,7 +124,7 @@ static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
return val; return val;
} }
static int pcibios_err_to_errno(int err) static int xen_pcibios_err_to_errno(int err)
{ {
switch (err) { switch (err) {
case PCIBIOS_SUCCESSFUL: case PCIBIOS_SUCCESSFUL:
...@@ -202,7 +202,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size, ...@@ -202,7 +202,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
pci_name(dev), size, offset, value); pci_name(dev), size, offset, value);
*ret_val = value; *ret_val = value;
return pcibios_err_to_errno(err); return xen_pcibios_err_to_errno(err);
} }
int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
...@@ -290,7 +290,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) ...@@ -290,7 +290,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
} }
} }
return pcibios_err_to_errno(err); return xen_pcibios_err_to_errno(err);
} }
void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev) void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev)
......
...@@ -36,6 +36,7 @@ struct subsys_private; ...@@ -36,6 +36,7 @@ struct subsys_private;
struct bus_type; struct bus_type;
struct device_node; struct device_node;
struct iommu_ops; struct iommu_ops;
struct iommu_group;
struct bus_attribute { struct bus_attribute {
struct attribute attr; struct attribute attr;
...@@ -687,6 +688,7 @@ struct device { ...@@ -687,6 +688,7 @@ struct device {
const struct attribute_group **groups; /* optional groups */ const struct attribute_group **groups; /* optional groups */
void (*release)(struct device *dev); void (*release)(struct device *dev);
struct iommu_group *iommu_group;
}; };
/* Get the wakeup routines, which depend on struct device */ /* Get the wakeup routines, which depend on struct device */
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define IOMMU_CACHE (4) /* DMA cache coherency */ #define IOMMU_CACHE (4) /* DMA cache coherency */
struct iommu_ops; struct iommu_ops;
struct iommu_group;
struct bus_type; struct bus_type;
struct device; struct device;
struct iommu_domain; struct iommu_domain;
...@@ -37,16 +38,28 @@ struct iommu_domain; ...@@ -37,16 +38,28 @@ struct iommu_domain;
typedef int (*iommu_fault_handler_t)(struct iommu_domain *, typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *); struct device *, unsigned long, int, void *);
struct iommu_domain_geometry {
dma_addr_t aperture_start; /* First address that can be mapped */
dma_addr_t aperture_end; /* Last address that can be mapped */
bool force_aperture; /* DMA only allowed in mappable range? */
};
struct iommu_domain { struct iommu_domain {
struct iommu_ops *ops; struct iommu_ops *ops;
void *priv; void *priv;
iommu_fault_handler_t handler; iommu_fault_handler_t handler;
void *handler_token; void *handler_token;
struct iommu_domain_geometry geometry;
}; };
#define IOMMU_CAP_CACHE_COHERENCY 0x1 #define IOMMU_CAP_CACHE_COHERENCY 0x1
#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ #define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */
enum iommu_attr {
DOMAIN_ATTR_MAX,
DOMAIN_ATTR_GEOMETRY,
};
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
/** /**
...@@ -59,7 +72,10 @@ struct iommu_domain { ...@@ -59,7 +72,10 @@ struct iommu_domain {
* @unmap: unmap a physically contiguous memory region from an iommu domain * @unmap: unmap a physically contiguous memory region from an iommu domain
* @iova_to_phys: translate iova to physical address * @iova_to_phys: translate iova to physical address
* @domain_has_cap: domain capabilities query * @domain_has_cap: domain capabilities query
* @commit: commit iommu domain * @add_device: add device to iommu grouping
* @remove_device: remove device from iommu grouping
* @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes
* @pgsize_bitmap: bitmap of supported page sizes * @pgsize_bitmap: bitmap of supported page sizes
*/ */
struct iommu_ops { struct iommu_ops {
...@@ -75,10 +91,23 @@ struct iommu_ops { ...@@ -75,10 +91,23 @@ struct iommu_ops {
unsigned long iova); unsigned long iova);
int (*domain_has_cap)(struct iommu_domain *domain, int (*domain_has_cap)(struct iommu_domain *domain,
unsigned long cap); unsigned long cap);
int (*add_device)(struct device *dev);
void (*remove_device)(struct device *dev);
int (*device_group)(struct device *dev, unsigned int *groupid); int (*device_group)(struct device *dev, unsigned int *groupid);
int (*domain_get_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data);
int (*domain_set_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data);
unsigned long pgsize_bitmap; unsigned long pgsize_bitmap;
}; };
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops); extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
extern bool iommu_present(struct bus_type *bus); extern bool iommu_present(struct bus_type *bus);
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
...@@ -97,7 +126,34 @@ extern int iommu_domain_has_cap(struct iommu_domain *domain, ...@@ -97,7 +126,34 @@ extern int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap); unsigned long cap);
extern void iommu_set_fault_handler(struct iommu_domain *domain, extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token); iommu_fault_handler_t handler, void *token);
extern int iommu_device_group(struct device *dev, unsigned int *groupid);
extern int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group);
extern void iommu_detach_group(struct iommu_domain *domain,
struct iommu_group *group);
extern struct iommu_group *iommu_group_alloc(void);
extern void *iommu_group_get_iommudata(struct iommu_group *group);
extern void iommu_group_set_iommudata(struct iommu_group *group,
void *iommu_data,
void (*release)(void *iommu_data));
extern int iommu_group_set_name(struct iommu_group *group, const char *name);
extern int iommu_group_add_device(struct iommu_group *group,
struct device *dev);
extern void iommu_group_remove_device(struct device *dev);
extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *));
extern struct iommu_group *iommu_group_get(struct device *dev);
extern void iommu_group_put(struct iommu_group *group);
extern int iommu_group_register_notifier(struct iommu_group *group,
struct notifier_block *nb);
extern int iommu_group_unregister_notifier(struct iommu_group *group,
struct notifier_block *nb);
extern int iommu_group_id(struct iommu_group *group);
extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
void *data);
extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
void *data);
/** /**
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
...@@ -142,6 +198,7 @@ static inline int report_iommu_fault(struct iommu_domain *domain, ...@@ -142,6 +198,7 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
#else /* CONFIG_IOMMU_API */ #else /* CONFIG_IOMMU_API */
struct iommu_ops {}; struct iommu_ops {};
struct iommu_group {};
static inline bool iommu_present(struct bus_type *bus) static inline bool iommu_present(struct bus_type *bus)
{ {
...@@ -197,11 +254,88 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain, ...@@ -197,11 +254,88 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
{ {
} }
static inline int iommu_device_group(struct device *dev, unsigned int *groupid) int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
{ {
return -ENODEV; return -ENODEV;
} }
void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
{
}
struct iommu_group *iommu_group_alloc(void)
{
return ERR_PTR(-ENODEV);
}
void *iommu_group_get_iommudata(struct iommu_group *group)
{
return NULL;
}
void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
void (*release)(void *iommu_data))
{
}
int iommu_group_set_name(struct iommu_group *group, const char *name)
{
return -ENODEV;
}
int iommu_group_add_device(struct iommu_group *group, struct device *dev)
{
return -ENODEV;
}
void iommu_group_remove_device(struct device *dev)
{
}
int iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *))
{
return -ENODEV;
}
struct iommu_group *iommu_group_get(struct device *dev)
{
return NULL;
}
void iommu_group_put(struct iommu_group *group)
{
}
int iommu_group_register_notifier(struct iommu_group *group,
struct notifier_block *nb)
{
return -ENODEV;
}
int iommu_group_unregister_notifier(struct iommu_group *group,
struct notifier_block *nb)
{
return 0;
}
int iommu_group_id(struct iommu_group *group)
{
return -ENODEV;
}
static inline int iommu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
return -EINVAL;
}
static inline int iommu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
return -EINVAL;
}
#endif /* CONFIG_IOMMU_API */ #endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */ #endif /* __LINUX_IOMMU_H */
#ifndef __OF_IOMMU_H
#define __OF_IOMMU_H
#ifdef CONFIG_OF_IOMMU
extern int of_get_dma_window(struct device_node *dn, const char *prefix,
int index, unsigned long *busno, dma_addr_t *addr,
size_t *size);
#else
static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
int index, unsigned long *busno, dma_addr_t *addr,
size_t *size)
{
return -EINVAL;
}
#endif /* CONFIG_OF_IOMMU */
#endif /* __OF_IOMMU_H */
...@@ -474,6 +474,32 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; ...@@ -474,6 +474,32 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false;
#define PCIBIOS_SET_FAILED 0x88 #define PCIBIOS_SET_FAILED 0x88
#define PCIBIOS_BUFFER_TOO_SMALL 0x89 #define PCIBIOS_BUFFER_TOO_SMALL 0x89
/*
* Translate above to generic errno for passing back through non-pci.
*/
static inline int pcibios_err_to_errno(int err)
{
if (err <= PCIBIOS_SUCCESSFUL)
return err; /* Assume already errno */
switch (err) {
case PCIBIOS_FUNC_NOT_SUPPORTED:
return -ENOENT;
case PCIBIOS_BAD_VENDOR_ID:
return -EINVAL;
case PCIBIOS_DEVICE_NOT_FOUND:
return -ENODEV;
case PCIBIOS_BAD_REGISTER_NUMBER:
return -EFAULT;
case PCIBIOS_SET_FAILED:
return -EIO;
case PCIBIOS_BUFFER_TOO_SMALL:
return -ENOSPC;
}
return -ENOTTY;
}
/* Low-level architecture-dependent routines */ /* Low-level architecture-dependent routines */
struct pci_ops { struct pci_ops {
...@@ -777,6 +803,14 @@ static inline int pci_write_config_dword(const struct pci_dev *dev, int where, ...@@ -777,6 +803,14 @@ static inline int pci_write_config_dword(const struct pci_dev *dev, int where,
return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
} }
/* user-space driven config access */
int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
int __must_check pci_enable_device(struct pci_dev *dev); int __must_check pci_enable_device(struct pci_dev *dev);
int __must_check pci_enable_device_io(struct pci_dev *dev); int __must_check pci_enable_device_io(struct pci_dev *dev);
int __must_check pci_enable_device_mem(struct pci_dev *dev); int __must_check pci_enable_device_mem(struct pci_dev *dev);
...@@ -1332,6 +1366,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, ...@@ -1332,6 +1366,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
static inline int pci_domain_nr(struct pci_bus *bus) static inline int pci_domain_nr(struct pci_bus *bus)
{ return 0; } { return 0; }
static inline struct pci_dev *pci_dev_get(struct pci_dev *dev)
{ return NULL; }
#define dev_is_pci(d) (false) #define dev_is_pci(d) (false)
#define dev_is_pf(d) (false) #define dev_is_pf(d) (false)
#define dev_num_vf(d) (0) #define dev_num_vf(d) (0)
...@@ -1486,9 +1523,20 @@ enum pci_fixup_pass { ...@@ -1486,9 +1523,20 @@ enum pci_fixup_pass {
#ifdef CONFIG_PCI_QUIRKS #ifdef CONFIG_PCI_QUIRKS
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
struct pci_dev *pci_get_dma_source(struct pci_dev *dev);
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
#else #else
static inline void pci_fixup_device(enum pci_fixup_pass pass, static inline void pci_fixup_device(enum pci_fixup_pass pass,
struct pci_dev *dev) {} struct pci_dev *dev) {}
static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
{
return pci_dev_get(dev);
}
static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
u16 acs_flags)
{
return -ENOTTY;
}
#endif #endif
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
...@@ -1591,7 +1639,9 @@ static inline bool pci_is_pcie(struct pci_dev *dev) ...@@ -1591,7 +1639,9 @@ static inline bool pci_is_pcie(struct pci_dev *dev)
} }
void pci_request_acs(void); void pci_request_acs(void);
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
bool pci_acs_path_enabled(struct pci_dev *start,
struct pci_dev *end, u16 acs_flags);
#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
#define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT) #define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT)
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
* Under PCI, each device has 256 bytes of configuration address space, * Under PCI, each device has 256 bytes of configuration address space,
* of which the first 64 bytes are standardized as follows: * of which the first 64 bytes are standardized as follows:
*/ */
#define PCI_STD_HEADER_SIZEOF 64
#define PCI_VENDOR_ID 0x00 /* 16 bits */ #define PCI_VENDOR_ID 0x00 /* 16 bits */
#define PCI_DEVICE_ID 0x02 /* 16 bits */ #define PCI_DEVICE_ID 0x02 /* 16 bits */
#define PCI_COMMAND 0x04 /* 16 bits */ #define PCI_COMMAND 0x04 /* 16 bits */
...@@ -209,9 +210,12 @@ ...@@ -209,9 +210,12 @@
#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */ #define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */
#define PCI_CAP_ID_SSVID 0x0D /* Bridge subsystem vendor/device ID */ #define PCI_CAP_ID_SSVID 0x0D /* Bridge subsystem vendor/device ID */
#define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */ #define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */
#define PCI_CAP_ID_SECDEV 0x0F /* Secure Device */
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */ #define PCI_CAP_ID_EXP 0x10 /* PCI Express */
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */ #define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
#define PCI_CAP_ID_SATA 0x12 /* SATA Data/Index Conf. */
#define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */ #define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */
#define PCI_CAP_ID_MAX PCI_CAP_ID_AF
#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */ #define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */ #define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */
#define PCI_CAP_SIZEOF 4 #define PCI_CAP_SIZEOF 4
...@@ -276,6 +280,7 @@ ...@@ -276,6 +280,7 @@
#define PCI_VPD_ADDR_MASK 0x7fff /* Address mask */ #define PCI_VPD_ADDR_MASK 0x7fff /* Address mask */
#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */ #define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */
#define PCI_VPD_DATA 4 /* 32-bits of data returned here */ #define PCI_VPD_DATA 4 /* 32-bits of data returned here */
#define PCI_CAP_VPD_SIZEOF 8
/* Slot Identification */ /* Slot Identification */
...@@ -297,8 +302,10 @@ ...@@ -297,8 +302,10 @@
#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */ #define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */ #define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */ #define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */
#define PCI_MSI_PENDING_32 16 /* Pending intrs for 32-bit devices */
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */ #define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */ #define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
#define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */
/* MSI-X registers */ /* MSI-X registers */
#define PCI_MSIX_FLAGS 2 #define PCI_MSIX_FLAGS 2
...@@ -308,6 +315,7 @@ ...@@ -308,6 +315,7 @@
#define PCI_MSIX_TABLE 4 #define PCI_MSIX_TABLE 4
#define PCI_MSIX_PBA 8 #define PCI_MSIX_PBA 8
#define PCI_MSIX_FLAGS_BIRMASK (7 << 0) #define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
/* MSI-X entry's format */ /* MSI-X entry's format */
#define PCI_MSIX_ENTRY_SIZE 16 #define PCI_MSIX_ENTRY_SIZE 16
...@@ -338,6 +346,7 @@ ...@@ -338,6 +346,7 @@
#define PCI_AF_CTRL_FLR 0x01 #define PCI_AF_CTRL_FLR 0x01
#define PCI_AF_STATUS 5 #define PCI_AF_STATUS 5
#define PCI_AF_STATUS_TP 0x01 #define PCI_AF_STATUS_TP 0x01
#define PCI_CAP_AF_SIZEOF 6 /* size of AF registers */
/* PCI-X registers */ /* PCI-X registers */
...@@ -374,6 +383,10 @@ ...@@ -374,6 +383,10 @@
#define PCI_X_STATUS_SPL_ERR 0x20000000 /* Rcvd Split Completion Error Msg */ #define PCI_X_STATUS_SPL_ERR 0x20000000 /* Rcvd Split Completion Error Msg */
#define PCI_X_STATUS_266MHZ 0x40000000 /* 266 MHz capable */ #define PCI_X_STATUS_266MHZ 0x40000000 /* 266 MHz capable */
#define PCI_X_STATUS_533MHZ 0x80000000 /* 533 MHz capable */ #define PCI_X_STATUS_533MHZ 0x80000000 /* 533 MHz capable */
#define PCI_X_ECC_CSR 8 /* ECC control and status */
#define PCI_CAP_PCIX_SIZEOF_V0 8 /* size of registers for Version 0 */
#define PCI_CAP_PCIX_SIZEOF_V1 24 /* size for Version 1 */
#define PCI_CAP_PCIX_SIZEOF_V2 PCI_CAP_PCIX_SIZEOF_V1 /* Same for v2 */
/* PCI Bridge Subsystem ID registers */ /* PCI Bridge Subsystem ID registers */
...@@ -462,6 +475,7 @@ ...@@ -462,6 +475,7 @@
#define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */ #define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */
#define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */ #define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */
#define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */ #define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 /* v1 endpoints end here */
#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ #define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
#define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */ #define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */
#define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */ #define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */
...@@ -521,6 +535,7 @@ ...@@ -521,6 +535,7 @@
#define PCI_EXP_OBFF_MSGA_EN 0x2000 /* OBFF enable with Message type A */ #define PCI_EXP_OBFF_MSGA_EN 0x2000 /* OBFF enable with Message type A */
#define PCI_EXP_OBFF_MSGB_EN 0x4000 /* OBFF enable with Message type B */ #define PCI_EXP_OBFF_MSGB_EN 0x4000 /* OBFF enable with Message type B */
#define PCI_EXP_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */ #define PCI_EXP_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints end here */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ #define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */ #define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
...@@ -529,23 +544,43 @@ ...@@ -529,23 +544,43 @@
#define PCI_EXT_CAP_VER(header) ((header >> 16) & 0xf) #define PCI_EXT_CAP_VER(header) ((header >> 16) & 0xf)
#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc) #define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc)
#define PCI_EXT_CAP_ID_ERR 1 #define PCI_EXT_CAP_ID_ERR 0x01 /* Advanced Error Reporting */
#define PCI_EXT_CAP_ID_VC 2 #define PCI_EXT_CAP_ID_VC 0x02 /* Virtual Channel Capability */
#define PCI_EXT_CAP_ID_DSN 3 #define PCI_EXT_CAP_ID_DSN 0x03 /* Device Serial Number */
#define PCI_EXT_CAP_ID_PWR 4 #define PCI_EXT_CAP_ID_PWR 0x04 /* Power Budgeting */
#define PCI_EXT_CAP_ID_VNDR 11 #define PCI_EXT_CAP_ID_RCLD 0x05 /* Root Complex Link Declaration */
#define PCI_EXT_CAP_ID_ACS 13 #define PCI_EXT_CAP_ID_RCILC 0x06 /* Root Complex Internal Link Control */
#define PCI_EXT_CAP_ID_ARI 14 #define PCI_EXT_CAP_ID_RCEC 0x07 /* Root Complex Event Collector */
#define PCI_EXT_CAP_ID_ATS 15 #define PCI_EXT_CAP_ID_MFVC 0x08 /* Multi-Function VC Capability */
#define PCI_EXT_CAP_ID_SRIOV 16 #define PCI_EXT_CAP_ID_VC9 0x09 /* same as _VC */
#define PCI_EXT_CAP_ID_PRI 19 #define PCI_EXT_CAP_ID_RCRB 0x0A /* Root Complex RB? */
#define PCI_EXT_CAP_ID_LTR 24 #define PCI_EXT_CAP_ID_VNDR 0x0B /* Vendor Specific */
#define PCI_EXT_CAP_ID_PASID 27 #define PCI_EXT_CAP_ID_CAC 0x0C /* Config Access - obsolete */
#define PCI_EXT_CAP_ID_ACS 0x0D /* Access Control Services */
#define PCI_EXT_CAP_ID_ARI 0x0E /* Alternate Routing ID */
#define PCI_EXT_CAP_ID_ATS 0x0F /* Address Translation Services */
#define PCI_EXT_CAP_ID_SRIOV 0x10 /* Single Root I/O Virtualization */
#define PCI_EXT_CAP_ID_MRIOV 0x11 /* Multi Root I/O Virtualization */
#define PCI_EXT_CAP_ID_MCAST 0x12 /* Multicast */
#define PCI_EXT_CAP_ID_PRI 0x13 /* Page Request Interface */
#define PCI_EXT_CAP_ID_AMD_XXX 0x14 /* reserved for AMD */
#define PCI_EXT_CAP_ID_REBAR 0x15 /* resizable BAR */
#define PCI_EXT_CAP_ID_DPA 0x16 /* dynamic power alloc */
#define PCI_EXT_CAP_ID_TPH 0x17 /* TPH request */
#define PCI_EXT_CAP_ID_LTR 0x18 /* latency tolerance reporting */
#define PCI_EXT_CAP_ID_SECPCI 0x19 /* Secondary PCIe */
#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */
#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */
#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PASID
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
/* Advanced Error Reporting */ /* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */ #define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
#define PCI_ERR_UNC_TRAIN 0x00000001 /* Training */ #define PCI_ERR_UNC_TRAIN 0x00000001 /* Training */
#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */ #define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */ #define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */
#define PCI_ERR_UNC_FCP 0x00002000 /* Flow Control Protocol */ #define PCI_ERR_UNC_FCP 0x00002000 /* Flow Control Protocol */
#define PCI_ERR_UNC_COMP_TIME 0x00004000 /* Completion Timeout */ #define PCI_ERR_UNC_COMP_TIME 0x00004000 /* Completion Timeout */
...@@ -555,6 +590,11 @@ ...@@ -555,6 +590,11 @@
#define PCI_ERR_UNC_MALF_TLP 0x00040000 /* Malformed TLP */ #define PCI_ERR_UNC_MALF_TLP 0x00040000 /* Malformed TLP */
#define PCI_ERR_UNC_ECRC 0x00080000 /* ECRC Error Status */ #define PCI_ERR_UNC_ECRC 0x00080000 /* ECRC Error Status */
#define PCI_ERR_UNC_UNSUP 0x00100000 /* Unsupported Request */ #define PCI_ERR_UNC_UNSUP 0x00100000 /* Unsupported Request */
#define PCI_ERR_UNC_ACSV 0x00200000 /* ACS Violation */
#define PCI_ERR_UNC_INTN 0x00400000 /* internal error */
#define PCI_ERR_UNC_MCBTLP 0x00800000 /* MC blocked TLP */
#define PCI_ERR_UNC_ATOMEG 0x01000000 /* Atomic egress blocked */
#define PCI_ERR_UNC_TLPPRE 0x02000000 /* TLP prefix blocked */
#define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */ #define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */
/* Same bits as above */ /* Same bits as above */
#define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */ #define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */
...@@ -565,6 +605,9 @@ ...@@ -565,6 +605,9 @@
#define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */ #define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */
#define PCI_ERR_COR_REP_ROLL 0x00000100 /* REPLAY_NUM Rollover */ #define PCI_ERR_COR_REP_ROLL 0x00000100 /* REPLAY_NUM Rollover */
#define PCI_ERR_COR_REP_TIMER 0x00001000 /* Replay Timer Timeout */ #define PCI_ERR_COR_REP_TIMER 0x00001000 /* Replay Timer Timeout */
#define PCI_ERR_COR_ADV_NFAT 0x00002000 /* Advisory Non-Fatal */
#define PCI_ERR_COR_INTERNAL 0x00004000 /* Corrected Internal */
#define PCI_ERR_COR_LOG_OVER 0x00008000 /* Header Log Overflow */
#define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */ #define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */
/* Same bits as above */ /* Same bits as above */
#define PCI_ERR_CAP 24 /* Advanced Error Capabilities */ #define PCI_ERR_CAP 24 /* Advanced Error Capabilities */
...@@ -596,12 +639,18 @@ ...@@ -596,12 +639,18 @@
/* Virtual Channel */ /* Virtual Channel */
#define PCI_VC_PORT_REG1 4 #define PCI_VC_PORT_REG1 4
#define PCI_VC_REG1_EVCC 0x7 /* extended vc count */
#define PCI_VC_PORT_REG2 8 #define PCI_VC_PORT_REG2 8
#define PCI_VC_REG2_32_PHASE 0x2
#define PCI_VC_REG2_64_PHASE 0x4
#define PCI_VC_REG2_128_PHASE 0x8
#define PCI_VC_PORT_CTRL 12 #define PCI_VC_PORT_CTRL 12
#define PCI_VC_PORT_STATUS 14 #define PCI_VC_PORT_STATUS 14
#define PCI_VC_RES_CAP 16 #define PCI_VC_RES_CAP 16
#define PCI_VC_RES_CTRL 20 #define PCI_VC_RES_CTRL 20
#define PCI_VC_RES_STATUS 26 #define PCI_VC_RES_STATUS 26
#define PCI_CAP_VC_BASE_SIZEOF 0x10
#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C
/* Power Budgeting */ /* Power Budgeting */
#define PCI_PWR_DSR 4 /* Data Select Register */ #define PCI_PWR_DSR 4 /* Data Select Register */
...@@ -614,6 +663,7 @@ ...@@ -614,6 +663,7 @@
#define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */ #define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */
#define PCI_PWR_CAP 12 /* Capability */ #define PCI_PWR_CAP 12 /* Capability */
#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */ #define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */
#define PCI_EXT_CAP_PWR_SIZEOF 16
/* /*
* Hypertransport sub capability types * Hypertransport sub capability types
...@@ -646,6 +696,8 @@ ...@@ -646,6 +696,8 @@
#define HT_CAPTYPE_ERROR_RETRY 0xC0 /* Retry on error configuration */ #define HT_CAPTYPE_ERROR_RETRY 0xC0 /* Retry on error configuration */
#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 hypertransport configuration */ #define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 hypertransport configuration */
#define HT_CAPTYPE_PM 0xE0 /* Hypertransport powermanagement configuration */ #define HT_CAPTYPE_PM 0xE0 /* Hypertransport powermanagement configuration */
#define HT_CAP_SIZEOF_LONG 28 /* slave & primary */
#define HT_CAP_SIZEOF_SHORT 24 /* host & secondary */
/* Alternative Routing-ID Interpretation */ /* Alternative Routing-ID Interpretation */
#define PCI_ARI_CAP 0x04 /* ARI Capability Register */ #define PCI_ARI_CAP 0x04 /* ARI Capability Register */
...@@ -656,6 +708,7 @@ ...@@ -656,6 +708,7 @@
#define PCI_ARI_CTRL_MFVC 0x0001 /* MFVC Function Groups Enable */ #define PCI_ARI_CTRL_MFVC 0x0001 /* MFVC Function Groups Enable */
#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */ #define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */ #define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
#define PCI_EXT_CAP_ARI_SIZEOF 8
/* Address Translation Service */ /* Address Translation Service */
#define PCI_ATS_CAP 0x04 /* ATS Capability Register */ #define PCI_ATS_CAP 0x04 /* ATS Capability Register */
...@@ -665,6 +718,7 @@ ...@@ -665,6 +718,7 @@
#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */ #define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */
#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */ #define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */
#define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */ #define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */
#define PCI_EXT_CAP_ATS_SIZEOF 8
/* Page Request Interface */ /* Page Request Interface */
#define PCI_PRI_CTRL 0x04 /* PRI control register */ #define PCI_PRI_CTRL 0x04 /* PRI control register */
...@@ -676,6 +730,7 @@ ...@@ -676,6 +730,7 @@
#define PCI_PRI_STATUS_STOPPED 0x100 /* PRI Stopped */ #define PCI_PRI_STATUS_STOPPED 0x100 /* PRI Stopped */
#define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */ #define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */
#define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */ #define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */
#define PCI_EXT_CAP_PRI_SIZEOF 16
/* PASID capability */ /* PASID capability */
#define PCI_PASID_CAP 0x04 /* PASID feature register */ #define PCI_PASID_CAP 0x04 /* PASID feature register */
...@@ -685,6 +740,7 @@ ...@@ -685,6 +740,7 @@
#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */ #define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */
#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */ #define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */
#define PCI_PASID_CTRL_PRIV 0x04 /* Priviledge Mode Enable */ #define PCI_PASID_CTRL_PRIV 0x04 /* Priviledge Mode Enable */
#define PCI_EXT_CAP_PASID_SIZEOF 8
/* Single Root I/O Virtualization */ /* Single Root I/O Virtualization */
#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */ #define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
...@@ -716,12 +772,14 @@ ...@@ -716,12 +772,14 @@
#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */ #define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */
#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */ #define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */ #define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
#define PCI_EXT_CAP_SRIOV_SIZEOF 64
#define PCI_LTR_MAX_SNOOP_LAT 0x4 #define PCI_LTR_MAX_SNOOP_LAT 0x4
#define PCI_LTR_MAX_NOSNOOP_LAT 0x6 #define PCI_LTR_MAX_NOSNOOP_LAT 0x6
#define PCI_LTR_VALUE_MASK 0x000003ff #define PCI_LTR_VALUE_MASK 0x000003ff
#define PCI_LTR_SCALE_MASK 0x00001c00 #define PCI_LTR_SCALE_MASK 0x00001c00
#define PCI_LTR_SCALE_SHIFT 10 #define PCI_LTR_SCALE_SHIFT 10
#define PCI_EXT_CAP_LTR_SIZEOF 8
/* Access Control Service */ /* Access Control Service */
#define PCI_ACS_CAP 0x04 /* ACS Capability Register */ #define PCI_ACS_CAP 0x04 /* ACS Capability Register */
...@@ -732,7 +790,38 @@ ...@@ -732,7 +790,38 @@
#define PCI_ACS_UF 0x10 /* Upstream Forwarding */ #define PCI_ACS_UF 0x10 /* Upstream Forwarding */
#define PCI_ACS_EC 0x20 /* P2P Egress Control */ #define PCI_ACS_EC 0x20 /* P2P Egress Control */
#define PCI_ACS_DT 0x40 /* Direct Translated P2P */ #define PCI_ACS_DT 0x40 /* Direct Translated P2P */
#define PCI_ACS_EGRESS_BITS 0x05 /* ACS Egress Control Vector Size */
#define PCI_ACS_CTRL 0x06 /* ACS Control Register */ #define PCI_ACS_CTRL 0x06 /* ACS Control Register */
#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */ #define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */
#define PCI_VSEC_HDR 4 /* extended cap - vendor specific */
#define PCI_VSEC_HDR_LEN_SHIFT 20 /* shift for length field */
/* sata capability */
#define PCI_SATA_REGS 4 /* SATA REGs specifier */
#define PCI_SATA_REGS_MASK 0xF /* location - BAR#/inline */
#define PCI_SATA_REGS_INLINE 0xF /* REGS in config space */
#define PCI_SATA_SIZEOF_SHORT 8
#define PCI_SATA_SIZEOF_LONG 16
/* resizable BARs */
#define PCI_REBAR_CTRL 8 /* control register */
#define PCI_REBAR_CTRL_NBAR_MASK (7 << 5) /* mask for # bars */
#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # bars */
/* dynamic power allocation */
#define PCI_DPA_CAP 4 /* capability register */
#define PCI_DPA_CAP_SUBSTATE_MASK 0x1F /* # substates - 1 */
#define PCI_DPA_BASE_SIZEOF 16 /* size with 0 substates */
/* TPH Requester */
#define PCI_TPH_CAP 4 /* capability register */
#define PCI_TPH_CAP_LOC_MASK 0x600 /* location mask */
#define PCI_TPH_LOC_NONE 0x000 /* no location */
#define PCI_TPH_LOC_CAP 0x200 /* in capability */
#define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */
#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* st table mask */
#define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */
#define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */
#endif /* LINUX_PCI_REGS_H */ #endif /* LINUX_PCI_REGS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment