Commit 1bb15570 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfio-v4.18-rc4' of git://github.com/awilliam/linux-vfio

Pull VFIO fixes from Alex Williamson:

 - Make vfio-pci IGD extensions optional via Kconfig (Alex Williamson)

 - Remove unused and soon to be removed map_atomic callback from mbochs
   sample driver, add unmap callback to avoid dmabuf leaks (Gerd
   Hoffmann)

 - Fix usage of get_user_pages_longterm() (Jason Gunthorpe)

 - Fix sample mbochs driver vm_operations_struct.fault return type
   (Souptick Joarder)

* tag 'vfio-v4.18-rc4' of git://github.com/awilliam/linux-vfio:
  sample/vfio-mdev: Change return type to vm_fault_t
  vfio: Use get_user_pages_longterm correctly
  sample/mdev/mbochs: add mbochs_kunmap_dmabuf
  sample/mdev/mbochs: remove mbochs_kmap_atomic_dmabuf
  vfio/pci: Make IGD support a configurable option
parents b4d05621 d7ef4899
...@@ -28,5 +28,13 @@ config VFIO_PCI_INTX ...@@ -28,5 +28,13 @@ config VFIO_PCI_INTX
def_bool y if !S390 def_bool y if !S390
config VFIO_PCI_IGD config VFIO_PCI_IGD
depends on VFIO_PCI bool "VFIO PCI extensions for Intel graphics (GVT-d)"
def_bool y if X86 depends on VFIO_PCI && X86
default y
help
Support for Intel IGD specific extensions to enable direct
assignment to virtual machines. This includes exposing an IGD
specific firmware table and read-only copies of the host bridge
and LPC bridge config space.
To enable Intel IGD assignment through vfio-pci, say Y.
...@@ -343,18 +343,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, ...@@ -343,18 +343,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
struct page *page[1]; struct page *page[1];
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct vm_area_struct *vmas[1]; struct vm_area_struct *vmas[1];
unsigned int flags = 0;
int ret; int ret;
if (prot & IOMMU_WRITE)
flags |= FOLL_WRITE;
down_read(&mm->mmap_sem);
if (mm == current->mm) { if (mm == current->mm) {
ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE), ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas);
page, vmas);
} else { } else {
unsigned int flags = 0;
if (prot & IOMMU_WRITE)
flags |= FOLL_WRITE;
down_read(&mm->mmap_sem);
ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
vmas, NULL); vmas, NULL);
/* /*
...@@ -368,8 +366,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, ...@@ -368,8 +366,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
put_page(page[0]); put_page(page[0]);
} }
up_read(&mm->mmap_sem);
} }
up_read(&mm->mmap_sem);
if (ret == 1) { if (ret == 1) {
*pfn = page_to_pfn(page[0]); *pfn = page_to_pfn(page[0]);
......
...@@ -657,7 +657,7 @@ static void mbochs_put_pages(struct mdev_state *mdev_state) ...@@ -657,7 +657,7 @@ static void mbochs_put_pages(struct mdev_state *mdev_state)
dev_dbg(dev, "%s: %d pages released\n", __func__, count); dev_dbg(dev, "%s: %d pages released\n", __func__, count);
} }
static int mbochs_region_vm_fault(struct vm_fault *vmf) static vm_fault_t mbochs_region_vm_fault(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct mdev_state *mdev_state = vma->vm_private_data; struct mdev_state *mdev_state = vma->vm_private_data;
...@@ -695,7 +695,7 @@ static int mbochs_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) ...@@ -695,7 +695,7 @@ static int mbochs_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
return 0; return 0;
} }
static int mbochs_dmabuf_vm_fault(struct vm_fault *vmf) static vm_fault_t mbochs_dmabuf_vm_fault(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct mbochs_dmabuf *dmabuf = vma->vm_private_data; struct mbochs_dmabuf *dmabuf = vma->vm_private_data;
...@@ -803,29 +803,26 @@ static void mbochs_release_dmabuf(struct dma_buf *buf) ...@@ -803,29 +803,26 @@ static void mbochs_release_dmabuf(struct dma_buf *buf)
mutex_unlock(&mdev_state->ops_lock); mutex_unlock(&mdev_state->ops_lock);
} }
static void *mbochs_kmap_atomic_dmabuf(struct dma_buf *buf, static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num)
unsigned long page_num)
{ {
struct mbochs_dmabuf *dmabuf = buf->priv; struct mbochs_dmabuf *dmabuf = buf->priv;
struct page *page = dmabuf->pages[page_num]; struct page *page = dmabuf->pages[page_num];
return kmap_atomic(page); return kmap(page);
} }
static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num) static void mbochs_kunmap_dmabuf(struct dma_buf *buf, unsigned long page_num,
void *vaddr)
{ {
struct mbochs_dmabuf *dmabuf = buf->priv; kunmap(vaddr);
struct page *page = dmabuf->pages[page_num];
return kmap(page);
} }
static struct dma_buf_ops mbochs_dmabuf_ops = { static struct dma_buf_ops mbochs_dmabuf_ops = {
.map_dma_buf = mbochs_map_dmabuf, .map_dma_buf = mbochs_map_dmabuf,
.unmap_dma_buf = mbochs_unmap_dmabuf, .unmap_dma_buf = mbochs_unmap_dmabuf,
.release = mbochs_release_dmabuf, .release = mbochs_release_dmabuf,
.map_atomic = mbochs_kmap_atomic_dmabuf,
.map = mbochs_kmap_dmabuf, .map = mbochs_kmap_dmabuf,
.unmap = mbochs_kunmap_dmabuf,
.mmap = mbochs_mmap_dmabuf, .mmap = mbochs_mmap_dmabuf,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment