Commit 0a72ef89 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull more arm64 fixes from Will Deacon:

 - Fix incorrect LDADD instruction encoding in our disassembly macros

 - Disable the broken ARM64_PSEUDO_NMI support for now

 - Add workaround for Cortex-A76 CPU erratum #1463225

 - Handle Cortex-A76/Neoverse-N1 erratum #1418040 w/ existing workaround

 - Fix IORT build failure if IOMMU_SUPPORT=n

 - Fix place-relative module relocation range checking and its
   interaction with KASLR

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: insn: Add BUILD_BUG_ON() for invalid masks
  arm64: insn: Fix ldadd instruction encoding
  arm64: Kconfig: Make ARM64_PSEUDO_NMI depend on BROKEN for now
  arm64: Handle erratum 1418040 as a superset of erratum 1188873
  arm64/module: deal with ambiguity in PRELxx relocation ranges
  ACPI/IORT: Fix build error when IOMMU_SUPPORT is disabled
  arm64/kernel: kaslr: reduce module randomization range to 2 GB
  arm64: errata: Add workaround for Cortex-A76 erratum #1463225
  arm64: Remove useless message during oops
parents c50bbf61 edbcf50e
......@@ -58,13 +58,14 @@ stable kernels.
| ARM | Cortex-A72 | #853709 | N/A |
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
| ARM | Cortex-A76 | #1188873,1418040| ARM64_ERRATUM_1418040 |
| ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 |
| ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 |
| ARM | Neoverse-N1 | #1188873 | ARM64_ERRATUM_1188873 |
| ARM | MMU-500 | #841119,#826419 | N/A |
| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
| ARM | MMU-500 | #841119,826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
| Cavium | ThunderX ITS | #22375,24313 | CAVIUM_ERRATUM_22375 |
| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 |
| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
......
......@@ -476,16 +476,15 @@ config ARM64_ERRATUM_1024718
If unsure, say Y.
config ARM64_ERRATUM_1188873
config ARM64_ERRATUM_1418040
bool "Cortex-A76/Neoverse-N1: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
default y
depends on COMPAT
select ARM_ARCH_TIMER_OOL_WORKAROUND
help
This option adds a workaround for ARM Cortex-A76/Neoverse-N1
erratum 1188873.
errata 1188873 and 1418040.
Affected Cortex-A76/Neoverse-N1 cores (r0p0, r1p0, r2p0) could
Affected Cortex-A76/Neoverse-N1 cores (r0p0 to r3p1) could
cause register corruption when accessing the timer registers
from AArch32 userspace.
......@@ -521,6 +520,24 @@ config ARM64_ERRATUM_1286807
If unsure, say Y.
config ARM64_ERRATUM_1463225
bool "Cortex-A76: Software Step might prevent interrupt recognition"
default y
help
This option adds a workaround for Arm Cortex-A76 erratum 1463225.
On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping
of a system call instruction (SVC) can prevent recognition of
subsequent interrupts when software stepping is disabled in the
exception handler of the system call and either kernel debugging
is enabled or VHE is in use.
Work around the erratum by triggering a dummy step exception
when handling a system call from a task that is being stepped
in a VHE configuration of the kernel.
If unsure, say Y.
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
......@@ -1406,6 +1423,7 @@ config ARM64_MODULE_PLTS
config ARM64_PSEUDO_NMI
bool "Support for NMI-like interrupts"
depends on BROKEN # 1556553607-46531-1-git-send-email-julien.thierry@arm.com
select CONFIG_ARM_GIC_V3
help
Adds support for mimicking Non-Maskable Interrupts through the use of
......
......@@ -53,7 +53,7 @@
#define ARM64_HAS_STAGE2_FWB 32
#define ARM64_HAS_CRC32 33
#define ARM64_SSBS 34
#define ARM64_WORKAROUND_1188873 35
#define ARM64_WORKAROUND_1418040 35
#define ARM64_HAS_SB 36
#define ARM64_WORKAROUND_1165522 37
#define ARM64_HAS_ADDRESS_AUTH_ARCH 38
......@@ -62,7 +62,8 @@
#define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41
#define ARM64_HAS_IRQ_PRIO_MASKING 42
#define ARM64_HAS_DCPODP 43
#define ARM64_WORKAROUND_1463225 44
#define ARM64_NCAPS 44
#define ARM64_NCAPS 45
#endif /* __ASM_CPUCAPS_H */
......@@ -18,6 +18,7 @@
*/
#ifndef __ASM_INSN_H
#define __ASM_INSN_H
#include <linux/build_bug.h>
#include <linux/types.h>
/* A64 instructions are always 32 bits. */
......@@ -268,16 +269,21 @@ enum aarch64_insn_adr_type {
#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
{ return (code & (mask)) == (val); } \
{ \
BUILD_BUG_ON(~(mask) & (val)); \
return (code & (mask)) == (val); \
} \
static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
{ return (val); }
{ \
return (val); \
}
__AARCH64_INSN_FUNCS(adr, 0x9F000000, 0x10000000)
__AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
__AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0xB8200000)
__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
......
......@@ -502,6 +502,22 @@ static const struct midr_range arm64_ssb_cpus[] = {
{},
};
#ifdef CONFIG_ARM64_ERRATUM_1463225
DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
static bool
has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
int scope)
{
u32 midr = read_cpuid_id();
/* Cortex-A76 r0p0 - r3p1 */
struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
}
#endif
static void __maybe_unused
cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
{
......@@ -682,12 +698,16 @@ static const struct midr_range workaround_clean_cache[] = {
};
#endif
#ifdef CONFIG_ARM64_ERRATUM_1188873
static const struct midr_range erratum_1188873_list[] = {
/* Cortex-A76 r0p0 to r2p0 */
MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
/* Neoverse-N1 r0p0 to r2p0 */
MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 2, 0),
#ifdef CONFIG_ARM64_ERRATUM_1418040
/*
* - 1188873 affects r0p0 to r2p0
* - 1418040 affects r0p0 to r3p1
*/
static const struct midr_range erratum_1418040_list[] = {
/* Cortex-A76 r0p0 to r3p1 */
MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
/* Neoverse-N1 r0p0 to r3p1 */
MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
{},
};
#endif
......@@ -809,11 +829,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.matches = has_ssbd_mitigation,
.midr_range_list = arm64_ssb_cpus,
},
#ifdef CONFIG_ARM64_ERRATUM_1188873
#ifdef CONFIG_ARM64_ERRATUM_1418040
{
.desc = "ARM erratum 1188873",
.capability = ARM64_WORKAROUND_1188873,
ERRATA_MIDR_RANGE_LIST(erratum_1188873_list),
.desc = "ARM erratum 1418040",
.capability = ARM64_WORKAROUND_1418040,
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1165522
......@@ -823,6 +843,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.capability = ARM64_WORKAROUND_1165522,
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1463225
{
.desc = "ARM erratum 1463225",
.capability = ARM64_WORKAROUND_1463225,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_cortex_a76_erratum_1463225,
},
#endif
{
}
......
......@@ -336,8 +336,8 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
#ifdef CONFIG_ARM64_ERRATUM_1188873
alternative_if_not ARM64_WORKAROUND_1188873
#ifdef CONFIG_ARM64_ERRATUM_1418040
alternative_if_not ARM64_WORKAROUND_1418040
b 4f
alternative_else_nop_endif
/*
......
......@@ -145,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
/*
* Randomize the module region over a 4 GB window covering the
* Randomize the module region over a 2 GB window covering the
* kernel. This reduces the risk of modules leaking information
* about the address of the kernel itself, but results in
* branches between modules and the core kernel that are
* resolved via PLTs. (Branches between modules will be
* resolved normally.)
*/
module_range = SZ_4G - (u64)(_end - _stext);
module_alloc_base = max((u64)_end + offset - SZ_4G,
module_range = SZ_2G - (u64)(_end - _stext);
module_alloc_base = max((u64)_end + offset - SZ_2G,
(u64)MODULES_VADDR);
} else {
/*
......
......@@ -56,7 +56,7 @@ void *module_alloc(unsigned long size)
* can simply omit this fallback in that case.
*/
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
module_alloc_base + SZ_4G, GFP_KERNEL,
module_alloc_base + SZ_2G, GFP_KERNEL,
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
......@@ -96,15 +96,27 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
{
s64 sval = do_reloc(op, place, val);
/*
* The ELF psABI for AArch64 documents the 16-bit and 32-bit place
* relative relocations as having a range of [-2^15, 2^16) or
* [-2^31, 2^32), respectively. However, in order to be able to detect
* overflows reliably, we have to choose whether we interpret such
* quantities as signed or as unsigned, and stick with it.
* The way we organize our address space requires a signed
* interpretation of 32-bit relative references, so let's use that
* for all R_AARCH64_PRELxx relocations. This means our upper
* bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
*/
switch (len) {
case 16:
*(s16 *)place = sval;
if (sval < S16_MIN || sval > U16_MAX)
if (sval < S16_MIN || sval > S16_MAX)
return -ERANGE;
break;
case 32:
*(s32 *)place = sval;
if (sval < S32_MIN || sval > U32_MAX)
if (sval < S32_MIN || sval > S32_MAX)
return -ERANGE;
break;
case 64:
......
......@@ -8,6 +8,7 @@
#include <linux/syscalls.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/fpsimd.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
......@@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags)
int syscall_trace_enter(struct pt_regs *regs);
void syscall_trace_exit(struct pt_regs *regs);
#ifdef CONFIG_ARM64_ERRATUM_1463225
DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
static void cortex_a76_erratum_1463225_svc_handler(void)
{
u32 reg, val;
if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
return;
if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
return;
__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
reg = read_sysreg(mdscr_el1);
val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
write_sysreg(val, mdscr_el1);
asm volatile("msr daifclr, #8");
isb();
/* We will have taken a single-step exception by this point */
write_sysreg(reg, mdscr_el1);
__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
}
#else
static void cortex_a76_erratum_1463225_svc_handler(void) { }
#endif /* CONFIG_ARM64_ERRATUM_1463225 */
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
const syscall_fn_t syscall_table[])
{
......@@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
regs->orig_x0 = regs->regs[0];
regs->syscallno = scno;
cortex_a76_erratum_1463225_svc_handler();
local_daif_restore(DAIF_PROCCTX);
user_exit();
......
......@@ -168,7 +168,6 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
static int __die(const char *str, int err, struct pt_regs *regs)
{
struct task_struct *tsk = current;
static int die_counter;
int ret;
......@@ -181,9 +180,6 @@ static int __die(const char *str, int err, struct pt_regs *regs)
return ret;
print_modules();
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
end_of_stack(tsk));
show_regs(regs);
if (!user_mode(regs))
......
......@@ -811,6 +811,36 @@ void __init hook_debug_fault_code(int nr,
debug_fault_info[nr].name = name;
}
#ifdef CONFIG_ARM64_ERRATUM_1463225
DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
static int __exception
cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
{
if (user_mode(regs))
return 0;
if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
return 0;
/*
* We've taken a dummy step exception from the kernel to ensure
* that interrupts are re-enabled on the syscall path. Return back
* to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
* masked so that we can safely restore the mdscr and get on with
* handling the syscall.
*/
regs->pstate |= PSR_D_BIT;
return 1;
}
#else
static int __exception
cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_ARM64_ERRATUM_1463225 */
asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
unsigned int esr,
struct pt_regs *regs)
......@@ -818,6 +848,9 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
const struct fault_info *inf = esr_to_debug_fault_info(esr);
unsigned long pc = instruction_pointer(regs);
if (cortex_a76_erratum_1463225_debug_handler(regs))
return;
/*
* Tell lockdep we disabled irqs in entry.S. Do nothing if they were
* already disabled to preserve the last enabled/disabled addresses.
......
......@@ -753,31 +753,6 @@ static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias,
return 0;
}
static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
struct fwnode_handle *fwnode,
const struct iommu_ops *ops)
{
int ret = iommu_fwspec_init(dev, fwnode, ops);
if (!ret)
ret = iommu_fwspec_add_ids(dev, &streamid, 1);
return ret;
}
static inline bool iort_iommu_driver_enabled(u8 type)
{
switch (type) {
case ACPI_IORT_NODE_SMMU_V3:
return IS_BUILTIN(CONFIG_ARM_SMMU_V3);
case ACPI_IORT_NODE_SMMU:
return IS_BUILTIN(CONFIG_ARM_SMMU);
default:
pr_warn("IORT node type %u does not describe an SMMU\n", type);
return false;
}
}
#ifdef CONFIG_IOMMU_API
static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
{
......@@ -878,15 +853,39 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
return (resv == its->its_count) ? resv : -ENODEV;
}
#else
static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
{ return NULL; }
static inline int iort_add_device_replay(const struct iommu_ops *ops,
struct device *dev)
{ return 0; }
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
{ return 0; }
#endif
static inline bool iort_iommu_driver_enabled(u8 type)
{
switch (type) {
case ACPI_IORT_NODE_SMMU_V3:
return IS_BUILTIN(CONFIG_ARM_SMMU_V3);
case ACPI_IORT_NODE_SMMU:
return IS_BUILTIN(CONFIG_ARM_SMMU);
default:
pr_warn("IORT node type %u does not describe an SMMU\n", type);
return false;
}
}
static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
struct fwnode_handle *fwnode,
const struct iommu_ops *ops)
{
int ret = iommu_fwspec_init(dev, fwnode, ops);
if (!ret)
ret = iommu_fwspec_add_ids(dev, &streamid, 1);
return ret;
}
static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
{
struct acpi_iort_root_complex *pci_rc;
pci_rc = (struct acpi_iort_root_complex *)node->node_data;
return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
}
static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
u32 streamid)
......@@ -933,6 +932,93 @@ static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
return iort_iommu_xlate(info->dev, parent, streamid);
}
/**
* iort_iommu_configure - Set-up IOMMU configuration for a device.
*
* @dev: device to configure
*
* Returns: iommu_ops pointer on configuration success
* NULL on configuration failure
*/
const struct iommu_ops *iort_iommu_configure(struct device *dev)
{
struct acpi_iort_node *node, *parent;
const struct iommu_ops *ops;
u32 streamid = 0;
int err = -ENODEV;
/*
* If we already translated the fwspec there
* is nothing left to do, return the iommu_ops.
*/
ops = iort_fwspec_iommu_ops(dev);
if (ops)
return ops;
if (dev_is_pci(dev)) {
struct pci_bus *bus = to_pci_dev(dev)->bus;
struct iort_pci_alias_info info = { .dev = dev };
node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
iort_match_node_callback, &bus->dev);
if (!node)
return NULL;
info.node = node;
err = pci_for_each_dma_alias(to_pci_dev(dev),
iort_pci_iommu_init, &info);
if (!err && iort_pci_rc_supports_ats(node))
dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
} else {
int i = 0;
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
if (!node)
return NULL;
do {
parent = iort_node_map_platform_id(node, &streamid,
IORT_IOMMU_TYPE,
i++);
if (parent)
err = iort_iommu_xlate(dev, parent, streamid);
} while (parent && !err);
}
/*
* If we have reason to believe the IOMMU driver missed the initial
* add_device callback for dev, replay it to get things in order.
*/
if (!err) {
ops = iort_fwspec_iommu_ops(dev);
err = iort_add_device_replay(ops, dev);
}
/* Ignore all other errors apart from EPROBE_DEFER */
if (err == -EPROBE_DEFER) {
ops = ERR_PTR(err);
} else if (err) {
dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
ops = NULL;
}
return ops;
}
#else
static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
{ return NULL; }
static inline int iort_add_device_replay(const struct iommu_ops *ops,
struct device *dev)
{ return 0; }
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
{ return 0; }
const struct iommu_ops *iort_iommu_configure(struct device *dev)
{ return NULL; }
#endif
static int nc_dma_get_range(struct device *dev, u64 *size)
{
struct acpi_iort_node *node;
......@@ -1031,90 +1117,6 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
}
static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
{
struct acpi_iort_root_complex *pci_rc;
pci_rc = (struct acpi_iort_root_complex *)node->node_data;
return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
}
/**
* iort_iommu_configure - Set-up IOMMU configuration for a device.
*
* @dev: device to configure
*
* Returns: iommu_ops pointer on configuration success
* NULL on configuration failure
*/
const struct iommu_ops *iort_iommu_configure(struct device *dev)
{
struct acpi_iort_node *node, *parent;
const struct iommu_ops *ops;
u32 streamid = 0;
int err = -ENODEV;
/*
* If we already translated the fwspec there
* is nothing left to do, return the iommu_ops.
*/
ops = iort_fwspec_iommu_ops(dev);
if (ops)
return ops;
if (dev_is_pci(dev)) {
struct pci_bus *bus = to_pci_dev(dev)->bus;
struct iort_pci_alias_info info = { .dev = dev };
node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
iort_match_node_callback, &bus->dev);
if (!node)
return NULL;
info.node = node;
err = pci_for_each_dma_alias(to_pci_dev(dev),
iort_pci_iommu_init, &info);
if (!err && iort_pci_rc_supports_ats(node))
dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
} else {
int i = 0;
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
if (!node)
return NULL;
do {
parent = iort_node_map_platform_id(node, &streamid,
IORT_IOMMU_TYPE,
i++);
if (parent)
err = iort_iommu_xlate(dev, parent, streamid);
} while (parent && !err);
}
/*
* If we have reason to believe the IOMMU driver missed the initial
* add_device callback for dev, replay it to get things in order.
*/
if (!err) {
ops = iort_fwspec_iommu_ops(dev);
err = iort_add_device_replay(ops, dev);
}
/* Ignore all other errors apart from EPROBE_DEFER */
if (err == -EPROBE_DEFER) {
ops = ERR_PTR(err);
} else if (err) {
dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
ops = NULL;
}
return ops;
}
static void __init acpi_iort_register_irq(int hwirq, const char *name,
int trigger,
struct resource *res)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment