Commit dcb8cfbd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-5.3a-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:

 - a small cleanup

 - a fix for a build error on ARM with some configs

 - a fix of a patch for the Xen gntdev driver

 - three patches for fixing a potential problem in the swiotlb-xen
   driver which Konrad was fine with me carrying them through the Xen
   tree

* tag 'for-linus-5.3a-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/swiotlb: remember having called xen_create_contiguous_region()
  xen/swiotlb: simplify range_straddles_page_boundary()
  xen/swiotlb: fix condition for calling xen_destroy_contiguous_region()
  xen: avoid link error on ARM
  xen/gntdev.c: Replace vm_map_pages() with vm_map_pages_zero()
  xen/pciback: remove set but not used variable 'old_state'
parents a507f25d b877ac98
...@@ -1143,7 +1143,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) ...@@ -1143,7 +1143,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto out_put_map; goto out_put_map;
if (!use_ptemod) { if (!use_ptemod) {
err = vm_map_pages(vma, map->pages, map->count); err = vm_map_pages_zero(vma, map->pages, map->count);
if (err) if (err)
goto out_put_map; goto out_put_map;
} else { } else {
......
...@@ -724,25 +724,6 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata) ...@@ -724,25 +724,6 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
return 0; return 0;
} }
struct remap_pfn {
struct mm_struct *mm;
struct page **pages;
pgprot_t prot;
unsigned long i;
};
static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
{
struct remap_pfn *r = data;
struct page *page = r->pages[r->i];
pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
set_pte_at(r->mm, addr, ptep, pte);
r->i++;
return 0;
}
static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
{ {
struct privcmd_data *data = file->private_data; struct privcmd_data *data = file->private_data;
...@@ -774,7 +755,8 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) ...@@ -774,7 +755,8 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
goto out; goto out;
} }
if (xen_feature(XENFEAT_auto_translated_physmap)) { if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE); unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
struct page **pages; struct page **pages;
unsigned int i; unsigned int i;
...@@ -808,16 +790,9 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) ...@@ -808,16 +790,9 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
if (rc) if (rc)
goto out; goto out;
if (xen_feature(XENFEAT_auto_translated_physmap)) { if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
struct remap_pfn r = { xen_feature(XENFEAT_auto_translated_physmap)) {
.mm = vma->vm_mm, rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
.pages = vma->vm_private_data,
.prot = vma->vm_page_prot,
};
rc = apply_to_page_range(r.mm, kdata.addr,
kdata.num << PAGE_SHIFT,
remap_pfn_fn, &r);
} else { } else {
unsigned int domid = unsigned int domid =
(xdata.flags & XENMEM_rsrc_acq_caller_owned) ? (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
......
...@@ -83,34 +83,18 @@ static inline dma_addr_t xen_virt_to_bus(void *address) ...@@ -83,34 +83,18 @@ static inline dma_addr_t xen_virt_to_bus(void *address)
return xen_phys_to_bus(virt_to_phys(address)); return xen_phys_to_bus(virt_to_phys(address));
} }
static int check_pages_physically_contiguous(unsigned long xen_pfn, static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
unsigned int offset,
size_t length)
{ {
unsigned long next_bfn; unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
int i; unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
int nr_pages;
next_bfn = pfn_to_bfn(xen_pfn); next_bfn = pfn_to_bfn(xen_pfn);
nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
for (i = 1; i < nr_pages; i++) { for (i = 1; i < nr_pages; i++)
if (pfn_to_bfn(++xen_pfn) != ++next_bfn) if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
return 0; return 1;
}
return 1;
}
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) return 0;
{
unsigned long xen_pfn = XEN_PFN_DOWN(p);
unsigned int offset = p & ~XEN_PAGE_MASK;
if (offset + size <= XEN_PAGE_SIZE)
return 0;
if (check_pages_physically_contiguous(xen_pfn, offset, size))
return 0;
return 1;
} }
static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
...@@ -338,6 +322,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, ...@@ -338,6 +322,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
return NULL; return NULL;
} }
SetPageXenRemapped(virt_to_page(ret));
} }
memset(ret, 0, size); memset(ret, 0, size);
return ret; return ret;
...@@ -361,8 +346,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, ...@@ -361,8 +346,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
/* Convert the size to actually allocated. */ /* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT); size = 1UL << (order + XEN_PAGE_SHIFT);
if (((dev_addr + size - 1 <= dma_mask)) || if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
range_straddles_page_boundary(phys, size)) range_straddles_page_boundary(phys, size)) &&
TestClearPageXenRemapped(virt_to_page(vaddr)))
xen_destroy_contiguous_region(phys, order); xen_destroy_contiguous_region(phys, order);
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
......
...@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value, ...@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
{ {
int err; int err;
u16 old_value; u16 old_value;
pci_power_t new_state, old_state; pci_power_t new_state;
err = pci_read_config_word(dev, offset, &old_value); err = pci_read_config_word(dev, offset, &old_value);
if (err) if (err)
goto out; goto out;
old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK); new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
new_value &= PM_OK_BITS; new_value &= PM_OK_BITS;
......
...@@ -262,3 +262,35 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, ...@@ -262,3 +262,35 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages); EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
struct remap_pfn {
struct mm_struct *mm;
struct page **pages;
pgprot_t prot;
unsigned long i;
};
static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
{
struct remap_pfn *r = data;
struct page *page = r->pages[r->i];
pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
set_pte_at(r->mm, addr, ptep, pte);
r->i++;
return 0;
}
/* Used by the privcmd module, but has to be built-in on ARM */
int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
{
struct remap_pfn r = {
.mm = vma->vm_mm,
.pages = vma->vm_private_data,
.prot = vma->vm_page_prot,
};
return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r);
}
EXPORT_SYMBOL_GPL(xen_remap_vma_range);
...@@ -152,6 +152,8 @@ enum pageflags { ...@@ -152,6 +152,8 @@ enum pageflags {
PG_savepinned = PG_dirty, PG_savepinned = PG_dirty,
/* Has a grant mapping of another (foreign) domain's page. */ /* Has a grant mapping of another (foreign) domain's page. */
PG_foreign = PG_owner_priv_1, PG_foreign = PG_owner_priv_1,
/* Remapped by swiotlb-xen. */
PG_xen_remapped = PG_owner_priv_1,
/* SLOB */ /* SLOB */
PG_slob_free = PG_private, PG_slob_free = PG_private,
...@@ -329,6 +331,8 @@ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) ...@@ -329,6 +331,8 @@ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
......
...@@ -109,6 +109,9 @@ static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, ...@@ -109,6 +109,9 @@ static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
} }
#endif #endif
int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long len);
/* /*
* xen_remap_domain_gfn_array() - map an array of foreign frames by gfn * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
* @vma: VMA to map the pages into * @vma: VMA to map the pages into
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment