Commit 6a2a235a authored by Alex Williamson's avatar Alex Williamson

Merge branches 'v5.13/vfio/embed-vfio_device', 'v5.13/vfio/misc' and...

Merge branches 'v5.13/vfio/embed-vfio_device', 'v5.13/vfio/misc' and 'v5.13/vfio/nvlink' into v5.13/vfio/next

Spelling fixes merged with file deletion.

Conflicts:
	drivers/vfio/pci/vfio_pci_nvlink2.c
Signed-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Mediated device interal definitions * Mediated device internal definitions
* *
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Author: Neo Jia <cjia@nvidia.com> * Author: Neo Jia <cjia@nvidia.com>
......
...@@ -39,9 +39,3 @@ config VFIO_PCI_IGD ...@@ -39,9 +39,3 @@ config VFIO_PCI_IGD
and LPC bridge config space. and LPC bridge config space.
To enable Intel IGD assignment through vfio-pci, say Y. To enable Intel IGD assignment through vfio-pci, say Y.
config VFIO_PCI_NVLINK2
def_bool y
depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU
help
VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o
vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o
vfio-pci-$(CONFIG_VFIO_PCI_NVLINK2) += vfio_pci_nvlink2.o
vfio-pci-$(CONFIG_S390) += vfio_pci_zdev.o vfio-pci-$(CONFIG_S390) += vfio_pci_zdev.o
obj-$(CONFIG_VFIO_PCI) += vfio-pci.o obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
...@@ -378,7 +378,6 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) ...@@ -378,7 +378,6 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev)) if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
vdev->has_vga = true; vdev->has_vga = true;
if (vfio_pci_is_vga(pdev) && if (vfio_pci_is_vga(pdev) &&
pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->vendor == PCI_VENDOR_ID_INTEL &&
IS_ENABLED(CONFIG_VFIO_PCI_IGD)) { IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
...@@ -389,24 +388,6 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) ...@@ -389,24 +388,6 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
} }
} }
if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
ret = vfio_pci_nvdia_v100_nvlink2_init(vdev);
if (ret && ret != -ENODEV) {
pci_warn(pdev, "Failed to setup NVIDIA NV2 RAM region\n");
goto disable_exit;
}
}
if (pdev->vendor == PCI_VENDOR_ID_IBM &&
IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
ret = vfio_pci_ibm_npu2_init(vdev);
if (ret && ret != -ENODEV) {
pci_warn(pdev, "Failed to setup NVIDIA NV2 ATSD region\n");
goto disable_exit;
}
}
vfio_pci_probe_mmaps(vdev); vfio_pci_probe_mmaps(vdev);
return 0; return 0;
...@@ -2434,7 +2415,7 @@ static int __init vfio_pci_init(void) ...@@ -2434,7 +2415,7 @@ static int __init vfio_pci_init(void)
{ {
int ret; int ret;
/* Allocate shared config space permision data used by all devices */ /* Allocate shared config space permission data used by all devices */
ret = vfio_pci_init_perm_bits(); ret = vfio_pci_init_perm_bits();
if (ret) if (ret)
return ret; return ret;
......
...@@ -101,7 +101,7 @@ static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = { ...@@ -101,7 +101,7 @@ static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = {
/* /*
* Read/Write Permission Bits - one bit for each bit in capability * Read/Write Permission Bits - one bit for each bit in capability
* Any field can be read if it exists, but what is read depends on * Any field can be read if it exists, but what is read depends on
* whether the field is 'virtualized', or just pass thru to the * whether the field is 'virtualized', or just pass through to the
* hardware. Any virtualized field is also virtualized for writes. * hardware. Any virtualized field is also virtualized for writes.
* Writes are only permitted if they have a 1 bit here. * Writes are only permitted if they have a 1 bit here.
*/ */
......
...@@ -21,6 +21,10 @@ ...@@ -21,6 +21,10 @@
#define OPREGION_SIZE (8 * 1024) #define OPREGION_SIZE (8 * 1024)
#define OPREGION_PCI_ADDR 0xfc #define OPREGION_PCI_ADDR 0xfc
#define OPREGION_RVDA 0x3ba
#define OPREGION_RVDS 0x3c2
#define OPREGION_VERSION 0x16
static size_t vfio_pci_igd_rw(struct vfio_pci_device *vdev, char __user *buf, static size_t vfio_pci_igd_rw(struct vfio_pci_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite) size_t count, loff_t *ppos, bool iswrite)
{ {
...@@ -58,6 +62,7 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev) ...@@ -58,6 +62,7 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev)
u32 addr, size; u32 addr, size;
void *base; void *base;
int ret; int ret;
u16 version;
ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr); ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr);
if (ret) if (ret)
...@@ -83,6 +88,54 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev) ...@@ -83,6 +88,54 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev)
size *= 1024; /* In KB */ size *= 1024; /* In KB */
/*
* Support opregion v2.1+
* When VBT data exceeds 6KB size and cannot be within mailbox #4, then
* the Extended VBT region next to opregion is used to hold the VBT data.
* RVDA (Relative Address of VBT Data from Opregion Base) and RVDS
* (Raw VBT Data Size) from opregion structure member are used to hold the
* address from region base and size of VBT data. RVDA/RVDS are not
* defined before opregion 2.0.
*
* opregion 2.1+: RVDA is unsigned, relative offset from
* opregion base, and should point to the end of opregion.
* otherwise, exposing to userspace to allow read access to everything between
* the OpRegion and VBT is not safe.
* RVDS is defined as size in bytes.
*
* opregion 2.0: rvda is the physical VBT address.
* Since rvda is HPA it cannot be directly used in guest.
* And it should not be practically available for end user,so it is not supported.
*/
version = le16_to_cpu(*(__le16 *)(base + OPREGION_VERSION));
if (version >= 0x0200) {
u64 rvda;
u32 rvds;
rvda = le64_to_cpu(*(__le64 *)(base + OPREGION_RVDA));
rvds = le32_to_cpu(*(__le32 *)(base + OPREGION_RVDS));
if (rvda && rvds) {
/* no support for opregion v2.0 with physical VBT address */
if (version == 0x0200) {
memunmap(base);
pci_err(vdev->pdev,
"IGD assignment does not support opregion v2.0 with an extended VBT region\n");
return -EINVAL;
}
if (rvda != size) {
memunmap(base);
pci_err(vdev->pdev,
"Extended VBT does not follow opregion on version 0x%04x\n",
version);
return -EINVAL;
}
/* region size for opregion v2.0+: opregion and VBT size. */
size += rvds;
}
}
if (size != OPREGION_SIZE) { if (size != OPREGION_SIZE) {
memunmap(base); memunmap(base);
base = memremap(addr, size, MEMREMAP_WB); base = memremap(addr, size, MEMREMAP_WB);
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2.
*
* Copyright (C) 2018 IBM Corp. All rights reserved.
* Author: Alexey Kardashevskiy <aik@ozlabs.ru>
*
* Register an on-GPU RAM region for cacheable access.
*
* Derived from original vfio_pci_igd.c:
* Copyright (C) 2016 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*/
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/sched/mm.h>
#include <linux/mmu_context.h>
#include <asm/kvm_ppc.h>
#include "vfio_pci_private.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap_fault);
EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap);
EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_npu2_mmap);
struct vfio_pci_nvgpu_data {
unsigned long gpu_hpa; /* GPU RAM physical address */
unsigned long gpu_tgt; /* TGT address of corresponding GPU RAM */
unsigned long useraddr; /* GPU RAM userspace address */
unsigned long size; /* Size of the GPU RAM window (usually 128GB) */
struct mm_struct *mm;
struct mm_iommu_table_group_mem_t *mem; /* Pre-registered RAM descr. */
struct pci_dev *gpdev;
struct notifier_block group_notifier;
};
static size_t vfio_pci_nvgpu_rw(struct vfio_pci_device *vdev,
char __user *buf, size_t count, loff_t *ppos, bool iswrite)
{
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
struct vfio_pci_nvgpu_data *data = vdev->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
loff_t posaligned = pos & PAGE_MASK, posoff = pos & ~PAGE_MASK;
size_t sizealigned;
void __iomem *ptr;
if (pos >= vdev->region[i].size)
return -EINVAL;
count = min(count, (size_t)(vdev->region[i].size - pos));
/*
* We map only a bit of GPU RAM for a short time instead of mapping it
* for the guest lifetime as:
*
* 1) we do not know GPU RAM size, only aperture which is 4-8 times
* bigger than actual RAM size (16/32GB RAM vs. 128GB aperture);
* 2) mapping GPU RAM allows CPU to prefetch and if this happens
* before NVLink bridge is reset (which fences GPU RAM),
* hardware management interrupts (HMI) might happen, this
* will freeze NVLink bridge.
*
* This is not fast path anyway.
*/
sizealigned = ALIGN(posoff + count, PAGE_SIZE);
ptr = ioremap_cache(data->gpu_hpa + posaligned, sizealigned);
if (!ptr)
return -EFAULT;
if (iswrite) {
if (copy_from_user(ptr + posoff, buf, count))
count = -EFAULT;
else
*ppos += count;
} else {
if (copy_to_user(buf, ptr + posoff, count))
count = -EFAULT;
else
*ppos += count;
}
iounmap(ptr);
return count;
}
static void vfio_pci_nvgpu_release(struct vfio_pci_device *vdev,
struct vfio_pci_region *region)
{
struct vfio_pci_nvgpu_data *data = region->data;
long ret;
/* If there were any mappings at all... */
if (data->mm) {
if (data->mem) {
ret = mm_iommu_put(data->mm, data->mem);
WARN_ON(ret);
}
mmdrop(data->mm);
}
vfio_unregister_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
&data->group_notifier);
pnv_npu2_unmap_lpar_dev(data->gpdev);
kfree(data);
}
static vm_fault_t vfio_pci_nvgpu_mmap_fault(struct vm_fault *vmf)
{
vm_fault_t ret;
struct vm_area_struct *vma = vmf->vma;
struct vfio_pci_region *region = vma->vm_private_data;
struct vfio_pci_nvgpu_data *data = region->data;
unsigned long vmf_off = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
unsigned long nv2pg = data->gpu_hpa >> PAGE_SHIFT;
unsigned long vm_pgoff = vma->vm_pgoff &
((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
unsigned long pfn = nv2pg + vm_pgoff + vmf_off;
ret = vmf_insert_pfn(vma, vmf->address, pfn);
trace_vfio_pci_nvgpu_mmap_fault(data->gpdev, pfn << PAGE_SHIFT,
vmf->address, ret);
return ret;
}
static const struct vm_operations_struct vfio_pci_nvgpu_mmap_vmops = {
.fault = vfio_pci_nvgpu_mmap_fault,
};
static int vfio_pci_nvgpu_mmap(struct vfio_pci_device *vdev,
struct vfio_pci_region *region, struct vm_area_struct *vma)
{
int ret;
struct vfio_pci_nvgpu_data *data = region->data;
if (data->useraddr)
return -EPERM;
if (vma->vm_end - vma->vm_start > data->size)
return -EINVAL;
vma->vm_private_data = region;
vma->vm_flags |= VM_PFNMAP;
vma->vm_ops = &vfio_pci_nvgpu_mmap_vmops;
/*
* Calling mm_iommu_newdev() here once as the region is not
* registered yet and therefore right initialization will happen now.
* Other places will use mm_iommu_find() which returns
* registered @mem and does not go gup().
*/
data->useraddr = vma->vm_start;
data->mm = current->mm;
mmgrab(data->mm);
ret = (int) mm_iommu_newdev(data->mm, data->useraddr,
vma_pages(vma), data->gpu_hpa, &data->mem);
trace_vfio_pci_nvgpu_mmap(vdev->pdev, data->gpu_hpa, data->useraddr,
vma->vm_end - vma->vm_start, ret);
return ret;
}
static int vfio_pci_nvgpu_add_capability(struct vfio_pci_device *vdev,
struct vfio_pci_region *region, struct vfio_info_cap *caps)
{
struct vfio_pci_nvgpu_data *data = region->data;
struct vfio_region_info_cap_nvlink2_ssatgt cap = {
.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
.header.version = 1,
.tgt = data->gpu_tgt
};
return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
}
static const struct vfio_pci_regops vfio_pci_nvgpu_regops = {
.rw = vfio_pci_nvgpu_rw,
.release = vfio_pci_nvgpu_release,
.mmap = vfio_pci_nvgpu_mmap,
.add_capability = vfio_pci_nvgpu_add_capability,
};
static int vfio_pci_nvgpu_group_notifier(struct notifier_block *nb,
unsigned long action, void *opaque)
{
struct kvm *kvm = opaque;
struct vfio_pci_nvgpu_data *data = container_of(nb,
struct vfio_pci_nvgpu_data,
group_notifier);
if (action == VFIO_GROUP_NOTIFY_SET_KVM && kvm &&
pnv_npu2_map_lpar_dev(data->gpdev,
kvm->arch.lpid, MSR_DR | MSR_PR))
return NOTIFY_BAD;
return NOTIFY_OK;
}
int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev)
{
int ret;
u64 reg[2];
u64 tgt = 0;
struct device_node *npu_node, *mem_node;
struct pci_dev *npu_dev;
struct vfio_pci_nvgpu_data *data;
uint32_t mem_phandle = 0;
unsigned long events = VFIO_GROUP_NOTIFY_SET_KVM;
/*
* PCI config space does not tell us about NVLink presense but
* platform does, use this.
*/
npu_dev = pnv_pci_get_npu_dev(vdev->pdev, 0);
if (!npu_dev)
return -ENODEV;
npu_node = pci_device_to_OF_node(npu_dev);
if (!npu_node)
return -EINVAL;
if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
return -ENODEV;
mem_node = of_find_node_by_phandle(mem_phandle);
if (!mem_node)
return -EINVAL;
if (of_property_read_variable_u64_array(mem_node, "reg", reg,
ARRAY_SIZE(reg), ARRAY_SIZE(reg)) !=
ARRAY_SIZE(reg))
return -EINVAL;
if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n");
return -EFAULT;
}
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->gpu_hpa = reg[0];
data->gpu_tgt = tgt;
data->size = reg[1];
dev_dbg(&vdev->pdev->dev, "%lx..%lx\n", data->gpu_hpa,
data->gpu_hpa + data->size - 1);
data->gpdev = vdev->pdev;
data->group_notifier.notifier_call = vfio_pci_nvgpu_group_notifier;
ret = vfio_register_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
&events, &data->group_notifier);
if (ret)
goto free_exit;
/*
* We have just set KVM, we do not need the listener anymore.
* Also, keeping it registered means that if more than one GPU is
* assigned, we will get several similar notifiers notifying about
* the same device again which does not help with anything.
*/
vfio_unregister_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
&data->group_notifier);
ret = vfio_pci_register_dev_region(vdev,
PCI_VENDOR_ID_NVIDIA | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM,
&vfio_pci_nvgpu_regops,
data->size,
VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE |
VFIO_REGION_INFO_FLAG_MMAP,
data);
if (ret)
goto free_exit;
return 0;
free_exit:
kfree(data);
return ret;
}
/*
* IBM NPU2 bridge
*/
struct vfio_pci_npu2_data {
void *base; /* ATSD register virtual address, for emulated access */
unsigned long mmio_atsd; /* ATSD physical address */
unsigned long gpu_tgt; /* TGT address of corresponding GPU RAM */
unsigned int link_speed; /* The link speed from DT's ibm,nvlink-speed */
};
static size_t vfio_pci_npu2_rw(struct vfio_pci_device *vdev,
char __user *buf, size_t count, loff_t *ppos, bool iswrite)
{
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
struct vfio_pci_npu2_data *data = vdev->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
if (pos >= vdev->region[i].size)
return -EINVAL;
count = min(count, (size_t)(vdev->region[i].size - pos));
if (iswrite) {
if (copy_from_user(data->base + pos, buf, count))
return -EFAULT;
} else {
if (copy_to_user(buf, data->base + pos, count))
return -EFAULT;
}
*ppos += count;
return count;
}
static int vfio_pci_npu2_mmap(struct vfio_pci_device *vdev,
struct vfio_pci_region *region, struct vm_area_struct *vma)
{
int ret;
struct vfio_pci_npu2_data *data = region->data;
unsigned long req_len = vma->vm_end - vma->vm_start;
if (req_len != PAGE_SIZE)
return -EINVAL;
vma->vm_flags |= VM_PFNMAP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ret = remap_pfn_range(vma, vma->vm_start, data->mmio_atsd >> PAGE_SHIFT,
req_len, vma->vm_page_prot);
trace_vfio_pci_npu2_mmap(vdev->pdev, data->mmio_atsd, vma->vm_start,
vma->vm_end - vma->vm_start, ret);
return ret;
}
static void vfio_pci_npu2_release(struct vfio_pci_device *vdev,
struct vfio_pci_region *region)
{
struct vfio_pci_npu2_data *data = region->data;
memunmap(data->base);
kfree(data);
}
static int vfio_pci_npu2_add_capability(struct vfio_pci_device *vdev,
struct vfio_pci_region *region, struct vfio_info_cap *caps)
{
struct vfio_pci_npu2_data *data = region->data;
struct vfio_region_info_cap_nvlink2_ssatgt captgt = {
.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
.header.version = 1,
.tgt = data->gpu_tgt
};
struct vfio_region_info_cap_nvlink2_lnkspd capspd = {
.header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD,
.header.version = 1,
.link_speed = data->link_speed
};
int ret;
ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt));
if (ret)
return ret;
return vfio_info_add_capability(caps, &capspd.header, sizeof(capspd));
}
static const struct vfio_pci_regops vfio_pci_npu2_regops = {
.rw = vfio_pci_npu2_rw,
.mmap = vfio_pci_npu2_mmap,
.release = vfio_pci_npu2_release,
.add_capability = vfio_pci_npu2_add_capability,
};
int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
{
int ret;
struct vfio_pci_npu2_data *data;
struct device_node *nvlink_dn;
u32 nvlink_index = 0, mem_phandle = 0;
struct pci_dev *npdev = vdev->pdev;
struct device_node *npu_node = pci_device_to_OF_node(npdev);
struct pci_controller *hose = pci_bus_to_host(npdev->bus);
u64 mmio_atsd = 0;
u64 tgt = 0;
u32 link_speed = 0xff;
/*
* PCI config space does not tell us about NVLink presense but
* platform does, use this.
*/
if (!pnv_pci_get_gpu_dev(vdev->pdev))
return -ENODEV;
if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
return -ENODEV;
/*
* NPU2 normally has 8 ATSD registers (for concurrency) and 6 links
* so we can allocate one register per link, using nvlink index as
* a key.
* There is always at least one ATSD register so as long as at least
* NVLink bridge #0 is passed to the guest, ATSD will be available.
*/
nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
&nvlink_index)))
return -ENODEV;
if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", nvlink_index,
&mmio_atsd)) {
if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", 0,
&mmio_atsd)) {
dev_warn(&vdev->pdev->dev, "No available ATSD found\n");
mmio_atsd = 0;
} else {
dev_warn(&vdev->pdev->dev,
"Using fallback ibm,mmio-atsd[0] for ATSD.\n");
}
}
if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n");
return -EFAULT;
}
if (of_property_read_u32(npu_node, "ibm,nvlink-speed", &link_speed)) {
dev_warn(&vdev->pdev->dev, "No ibm,nvlink-speed found\n");
return -EFAULT;
}
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->mmio_atsd = mmio_atsd;
data->gpu_tgt = tgt;
data->link_speed = link_speed;
if (data->mmio_atsd) {
data->base = memremap(data->mmio_atsd, SZ_64K, MEMREMAP_WT);
if (!data->base) {
ret = -ENOMEM;
goto free_exit;
}
}
/*
* We want to expose the capability even if this specific NVLink
* did not get its own ATSD register because capabilities
* belong to VFIO regions and normally there will be ATSD register
* assigned to the NVLink bridge.
*/
ret = vfio_pci_register_dev_region(vdev,
PCI_VENDOR_ID_IBM |
VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD,
&vfio_pci_npu2_regops,
data->mmio_atsd ? PAGE_SIZE : 0,
VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE |
VFIO_REGION_INFO_FLAG_MMAP,
data);
if (ret)
goto free_exit;
return 0;
free_exit:
if (data->base)
memunmap(data->base);
kfree(data);
return ret;
}
...@@ -200,20 +200,6 @@ static inline int vfio_pci_igd_init(struct vfio_pci_device *vdev) ...@@ -200,20 +200,6 @@ static inline int vfio_pci_igd_init(struct vfio_pci_device *vdev)
return -ENODEV; return -ENODEV;
} }
#endif #endif
#ifdef CONFIG_VFIO_PCI_NVLINK2
extern int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev);
extern int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev);
#else
static inline int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev)
{
return -ENODEV;
}
static inline int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
{
return -ENODEV;
}
#endif
#ifdef CONFIG_S390 #ifdef CONFIG_S390
extern int vfio_pci_info_zdev_add_caps(struct vfio_pci_device *vdev, extern int vfio_pci_info_zdev_add_caps(struct vfio_pci_device *vdev,
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */ #define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */
#define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */ #define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */
/* DMA Control registe defines */ /* DMA Control register defines */
#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ #define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ #define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
* IOMMU to support the IOMMU API and have few to no restrictions around * IOMMU to support the IOMMU API and have few to no restrictions around
* the IOVA range that can be mapped. The Type1 IOMMU is currently * the IOVA range that can be mapped. The Type1 IOMMU is currently
* optimized for relatively static mappings of a userspace process with * optimized for relatively static mappings of a userspace process with
* userpsace pages pinned into memory. We also assume devices and IOMMU * userspace pages pinned into memory. We also assume devices and IOMMU
* domains are PCI based as the IOMMU API is still centered around a * domains are PCI based as the IOMMU API is still centered around a
* device/bus interface rather than a group interface. * device/bus interface rather than a group interface.
*/ */
...@@ -877,7 +877,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, ...@@ -877,7 +877,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
/* /*
* If iommu capable domain exist in the container then all pages are * If iommu capable domain exist in the container then all pages are
* already pinned and accounted. Accouting should be done if there is no * already pinned and accounted. Accounting should be done if there is no
* iommu capable domain in the container. * iommu capable domain in the container.
*/ */
do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu); do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
...@@ -960,7 +960,7 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data, ...@@ -960,7 +960,7 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
bool do_accounting; bool do_accounting;
int i; int i;
if (!iommu || !user_pfn) if (!iommu || !user_pfn || npage <= 0)
return -EINVAL; return -EINVAL;
/* Supported for v2 version only */ /* Supported for v2 version only */
...@@ -977,13 +977,13 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data, ...@@ -977,13 +977,13 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
iova = user_pfn[i] << PAGE_SHIFT; iova = user_pfn[i] << PAGE_SHIFT;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE); dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
if (!dma) if (!dma)
goto unpin_exit; break;
vfio_unpin_page_external(dma, iova, do_accounting); vfio_unpin_page_external(dma, iova, do_accounting);
} }
unpin_exit:
mutex_unlock(&iommu->lock); mutex_unlock(&iommu->lock);
return i > npage ? npage : (i > 0 ? i : -EINVAL); return i > 0 ? i : -EINVAL;
} }
static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain, static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
...@@ -2177,7 +2177,7 @@ static int vfio_iommu_resv_exclude(struct list_head *iova, ...@@ -2177,7 +2177,7 @@ static int vfio_iommu_resv_exclude(struct list_head *iova,
continue; continue;
/* /*
* Insert a new node if current node overlaps with the * Insert a new node if current node overlaps with the
* reserve region to exlude that from valid iova range. * reserve region to exclude that from valid iova range.
* Note that, new node is inserted before the current * Note that, new node is inserted before the current
* node and finally the current node is deleted keeping * node and finally the current node is deleted keeping
* the list updated and sorted. * the list updated and sorted.
......
...@@ -333,17 +333,10 @@ struct vfio_region_info_cap_type { ...@@ -333,17 +333,10 @@ struct vfio_region_info_cap_type {
#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3) #define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3)
/* 10de vendor PCI sub-types */ /* 10de vendor PCI sub-types */
/* /* subtype 1 was VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM, don't use */
* NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
*/
#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1)
/* 1014 vendor PCI sub-types */ /* 1014 vendor PCI sub-types */
/* /* subtype 1 was VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD, don't use */
* IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
* to do TLB invalidation on a GPU.
*/
#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1)
/* sub-types for VFIO_REGION_TYPE_GFX */ /* sub-types for VFIO_REGION_TYPE_GFX */
#define VFIO_REGION_SUBTYPE_GFX_EDID (1) #define VFIO_REGION_SUBTYPE_GFX_EDID (1)
...@@ -637,32 +630,9 @@ struct vfio_device_migration_info { ...@@ -637,32 +630,9 @@ struct vfio_device_migration_info {
*/ */
#define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE 3 #define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE 3
/* /* subtype 4 was VFIO_REGION_INFO_CAP_NVLINK2_SSATGT, don't use */
* Capability with compressed real address (aka SSA - small system address)
* where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
* and by the userspace to associate a NVLink bridge with a GPU.
*/
#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT 4
struct vfio_region_info_cap_nvlink2_ssatgt {
struct vfio_info_cap_header header;
__u64 tgt;
};
/* /* subtype 5 was VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD, don't use */
* Capability with an NVLink link speed. The value is read by
* the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed"
* property in the device tree. The value is fixed in the hardware
* and failing to provide the correct value results in the link
* not working with no indication from the driver why.
*/
#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD 5
struct vfio_region_info_cap_nvlink2_lnkspd {
struct vfio_info_cap_header header;
__u32 link_speed;
__u32 __pad;
};
/** /**
* VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9, * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment