Commit c74009f5 authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/iommu/fixes' into for-next/iommu/core

Merge in IOMMU fixes for 5.10 in order to resolve conflicts against the
queue for 5.11.

* for-next/iommu/fixes:
  iommu/amd: Set DTE[IntTabLen] to represent 512 IRTEs
  iommu/vt-d: Don't read VCCAP register unless it exists
  x86/tboot: Don't disable swiotlb when iommu is forced on
  iommu: Check return of __iommu_attach_device()
  arm-smmu-qcom: Ensure the qcom_scm driver has finished probing
  iommu/amd: Enforce 4k mapping for certain IOMMU data structures
  MAINTAINERS: Temporarily add myself to the IOMMU entry
  iommu/vt-d: Fix compile error with CONFIG_PCI_ATS not set
  iommu/vt-d: Avoid panic if iommu init fails in tboot system
  iommu/vt-d: Cure VF irqdomain hickup
  x86/platform/uv: Fix copied UV5 output archtype
  x86/platform/uv: Drop last traces of uv_flush_tlb_others
parents 113eb4ce 4165bf01
......@@ -9171,6 +9171,7 @@ F: include/linux/iomap.h
IOMMU DRIVERS
M: Joerg Roedel <joro@8bytes.org>
M: Will Deacon <will@kernel.org>
L: iommu@lists.linux-foundation.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
......
......@@ -2,14 +2,8 @@
#ifndef _ASM_X86_UV_UV_H
#define _ASM_X86_UV_UV_H
#include <asm/tlbflush.h>
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC};
struct cpumask;
struct mm_struct;
struct flush_tlb_info;
#ifdef CONFIG_X86_UV
#include <linux/efi.h>
......@@ -44,10 +38,6 @@ static inline int is_uv_system(void) { return 0; }
static inline int is_uv_hubbed(int uv) { return 0; }
static inline void uv_cpu_init(void) { }
static inline void uv_system_init(void) { }
static inline const struct cpumask *
uv_flush_tlb_others(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{ return cpumask; }
#endif /* X86_UV */
......
......@@ -33,7 +33,7 @@ static union uvh_apicid uvh_apicid;
static int uv_node_id;
/* Unpack AT/OEM/TABLE ID's to be NULL terminated strings */
static u8 uv_archtype[UV_AT_SIZE];
static u8 uv_archtype[UV_AT_SIZE + 1];
static u8 oem_id[ACPI_OEM_ID_SIZE + 1];
static u8 oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
......@@ -320,7 +320,7 @@ static int __init decode_arch_type(unsigned long ptr)
if (n > 0 && n < sizeof(uv_ate->archtype)) {
pr_info("UV: UVarchtype received from BIOS\n");
uv_stringify(UV_AT_SIZE, uv_archtype, uv_ate->archtype);
uv_stringify(sizeof(uv_archtype), uv_archtype, uv_ate->archtype);
return 1;
}
return 0;
......@@ -378,7 +378,7 @@ static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id)
if (!early_get_arch_type())
/* If not use OEM ID for UVarchtype */
uv_stringify(UV_AT_SIZE, uv_archtype, _oem_id);
uv_stringify(sizeof(uv_archtype), uv_archtype, oem_id);
/* Check if not hubbed */
if (strncmp(uv_archtype, "SGI", 3) != 0) {
......
......@@ -514,16 +514,10 @@ int tboot_force_iommu(void)
if (!tboot_enabled())
return 0;
if (intel_iommu_tboot_noforce)
return 1;
if (no_iommu || swiotlb || dmar_disabled)
if (no_iommu || dmar_disabled)
pr_warn("Forcing Intel-IOMMU to enabled\n");
dmar_disabled = 0;
#ifdef CONFIG_SWIOTLB
swiotlb = 0;
#endif
no_iommu = 0;
return 1;
......
......@@ -257,7 +257,7 @@
#define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60)
#define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1)
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
#define DTE_IRQ_TABLE_LEN (8ULL << 1)
#define DTE_IRQ_TABLE_LEN (9ULL << 1)
#define DTE_IRQ_REMAP_ENABLE 1ULL
#define PAGE_MODE_NONE 0x00
......
......@@ -29,6 +29,7 @@
#include <asm/iommu_table.h>
#include <asm/io_apic.h>
#include <asm/irq_remapping.h>
#include <asm/set_memory.h>
#include <linux/crash_dump.h>
......@@ -672,11 +673,27 @@ static void __init free_command_buffer(struct amd_iommu *iommu)
free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
}
static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
gfp_t gfp, size_t size)
{
int order = get_order(size);
void *buf = (void *)__get_free_pages(gfp, order);
if (buf &&
iommu_feature(iommu, FEATURE_SNP) &&
set_memory_4k((unsigned long)buf, (1 << order))) {
free_pages((unsigned long)buf, order);
buf = NULL;
}
return buf;
}
/* allocates the memory where the IOMMU will log its events to */
static int __init alloc_event_buffer(struct amd_iommu *iommu)
{
iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(EVT_BUFFER_SIZE));
iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
EVT_BUFFER_SIZE);
return iommu->evt_buf ? 0 : -ENOMEM;
}
......@@ -715,8 +732,8 @@ static void __init free_event_buffer(struct amd_iommu *iommu)
/* allocates the memory where the IOMMU will log its events to */
static int __init alloc_ppr_log(struct amd_iommu *iommu)
{
iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(PPR_LOG_SIZE));
iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
PPR_LOG_SIZE);
return iommu->ppr_log ? 0 : -ENOMEM;
}
......@@ -838,7 +855,7 @@ static int iommu_init_ga(struct amd_iommu *iommu)
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{
iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL);
iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
return iommu->cmd_sem ? 0 : -ENOMEM;
}
......
......@@ -309,6 +309,10 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
{
struct qcom_smmu *qsmmu;
/* Check to make sure qcom_scm has finished probing */
if (!qcom_scm_is_available())
return ERR_PTR(-EPROBE_DEFER);
qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL);
if (!qsmmu)
return ERR_PTR(-ENOMEM);
......
......@@ -333,6 +333,13 @@ static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
dmar_iommu_notify_scope_dev(info);
}
static inline void vf_inherit_msi_domain(struct pci_dev *pdev)
{
struct pci_dev *physfn = pci_physfn(pdev);
dev_set_msi_domain(&pdev->dev, dev_get_msi_domain(&physfn->dev));
}
static int dmar_pci_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
......@@ -342,8 +349,20 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
/* Only care about add/remove events for physical functions.
* For VFs we actually do the lookup based on the corresponding
* PF in device_to_iommu() anyway. */
if (pdev->is_virtfn)
if (pdev->is_virtfn) {
/*
* Ensure that the VF device inherits the irq domain of the
* PF device. Ideally the device would inherit the domain
* from the bus, but DMAR can have multiple units per bus
* which makes this impossible. The VF 'bus' could inherit
* from the PF device, but that's yet another x86'sism to
* inflict on everybody else.
*/
if (action == BUS_NOTIFY_ADD_DEVICE)
vf_inherit_msi_domain(pdev);
return NOTIFY_DONE;
}
if (action != BUS_NOTIFY_ADD_DEVICE &&
action != BUS_NOTIFY_REMOVED_DEVICE)
return NOTIFY_DONE;
......@@ -967,7 +986,8 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
warn_invalid_dmar(phys_addr, " returns all ones");
goto unmap;
}
iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
if (ecap_vcs(iommu->ecap))
iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
......
......@@ -179,7 +179,7 @@ static int rwbf_quirk;
* (used when kernel is launched w/ TXT)
*/
static int force_on = 0;
int intel_iommu_tboot_noforce;
static int intel_iommu_tboot_noforce;
static int no_platform_optin;
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
......@@ -1829,7 +1829,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
if (ecap_prs(iommu->ecap))
intel_svm_finish_prq(iommu);
}
if (ecap_vcs(iommu->ecap) && vccap_pasid(iommu->vccap))
if (vccap_pasid(iommu->vccap))
ioasid_unregister_allocator(&iommu->pasid_allocator);
#endif
......@@ -3120,7 +3120,7 @@ static void register_pasid_allocator(struct intel_iommu *iommu)
* is active. All vIOMMU allocators will eventually be calling the same
* host allocator.
*/
if (!ecap_vcs(iommu->ecap) || !vccap_pasid(iommu->vccap))
if (!vccap_pasid(iommu->vccap))
return;
pr_info("Register custom PASID allocator\n");
......@@ -4205,7 +4205,8 @@ int __init intel_iommu_init(void)
* Intel IOMMU is required for a TXT/tboot launch or platform
* opt in, so enforce that.
*/
force_on = tboot_force_iommu() || platform_optin_force_iommu();
force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
platform_optin_force_iommu();
if (iommu_init_mempool()) {
if (force_on)
......
......@@ -268,16 +268,18 @@ int iommu_probe_device(struct device *dev)
*/
iommu_alloc_default_domain(group, dev);
if (group->default_domain)
if (group->default_domain) {
ret = __iommu_attach_device(group->default_domain, dev);
if (ret) {
iommu_group_put(group);
goto err_release;
}
}
iommu_create_device_direct_mappings(group, dev);
iommu_group_put(group);
if (ret)
goto err_release;
if (ops->probe_finalize)
ops->probe_finalize(dev);
......
......@@ -798,7 +798,6 @@ extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
extern int intel_iommu_enabled;
extern int intel_iommu_tboot_noforce;
extern int intel_iommu_gfx_mapped;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment