Commit 57398443 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-5.15b-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:
 "Some minor cleanups and fixes of some theoretical bugs, as well as a
  fix of a bug introduced in 5.15-rc1"

* tag 'for-linus-5.15b-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/x86: fix PV trap handling on secondary processors
  xen/balloon: fix balloon kthread freezing
  swiotlb-xen: this is PV-only on x86
  xen/pci-swiotlb: reduce visibility of symbols
  PCI: only build xen-pcifront in PV-enabled environments
  swiotlb-xen: ensure to issue well-formed XENMEM_exchange requests
  Xen/gntdev: don't ignore kernel unmapping error
  xen/x86: drop redundant zeroing from cpu_initialize_context()
parents 90316e6e 0594c581
...@@ -2610,7 +2610,6 @@ config PCI_OLPC ...@@ -2610,7 +2610,6 @@ config PCI_OLPC
config PCI_XEN config PCI_XEN
def_bool y def_bool y
depends on PCI && XEN depends on PCI && XEN
select SWIOTLB_XEN
config MMCONF_FAM10H config MMCONF_FAM10H
def_bool y def_bool y
......
...@@ -3,14 +3,10 @@ ...@@ -3,14 +3,10 @@
#define _ASM_X86_SWIOTLB_XEN_H #define _ASM_X86_SWIOTLB_XEN_H
#ifdef CONFIG_SWIOTLB_XEN #ifdef CONFIG_SWIOTLB_XEN
extern int xen_swiotlb;
extern int __init pci_xen_swiotlb_detect(void); extern int __init pci_xen_swiotlb_detect(void);
extern void __init pci_xen_swiotlb_init(void);
extern int pci_xen_swiotlb_init_late(void); extern int pci_xen_swiotlb_init_late(void);
#else #else
#define xen_swiotlb (0) #define pci_xen_swiotlb_detect NULL
static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
static inline void __init pci_xen_swiotlb_init(void) { }
static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; } static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
#endif #endif
......
...@@ -755,8 +755,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) ...@@ -755,8 +755,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
preempt_enable(); preempt_enable();
} }
static void xen_convert_trap_info(const struct desc_ptr *desc, static unsigned xen_convert_trap_info(const struct desc_ptr *desc,
struct trap_info *traps) struct trap_info *traps, bool full)
{ {
unsigned in, out, count; unsigned in, out, count;
...@@ -766,17 +766,18 @@ static void xen_convert_trap_info(const struct desc_ptr *desc, ...@@ -766,17 +766,18 @@ static void xen_convert_trap_info(const struct desc_ptr *desc,
for (in = out = 0; in < count; in++) { for (in = out = 0; in < count; in++) {
gate_desc *entry = (gate_desc *)(desc->address) + in; gate_desc *entry = (gate_desc *)(desc->address) + in;
if (cvt_gate_to_trap(in, entry, &traps[out])) if (cvt_gate_to_trap(in, entry, &traps[out]) || full)
out++; out++;
} }
traps[out].address = 0;
return out;
} }
void xen_copy_trap_info(struct trap_info *traps) void xen_copy_trap_info(struct trap_info *traps)
{ {
const struct desc_ptr *desc = this_cpu_ptr(&idt_desc); const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
xen_convert_trap_info(desc, traps); xen_convert_trap_info(desc, traps, true);
} }
/* Load a new IDT into Xen. In principle this can be per-CPU, so we /* Load a new IDT into Xen. In principle this can be per-CPU, so we
...@@ -786,6 +787,7 @@ static void xen_load_idt(const struct desc_ptr *desc) ...@@ -786,6 +787,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
{ {
static DEFINE_SPINLOCK(lock); static DEFINE_SPINLOCK(lock);
static struct trap_info traps[257]; static struct trap_info traps[257];
unsigned out;
trace_xen_cpu_load_idt(desc); trace_xen_cpu_load_idt(desc);
...@@ -793,7 +795,8 @@ static void xen_load_idt(const struct desc_ptr *desc) ...@@ -793,7 +795,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
xen_convert_trap_info(desc, traps); out = xen_convert_trap_info(desc, traps, false);
memset(&traps[out], 0, sizeof(traps[0]));
xen_mc_flush(); xen_mc_flush();
if (HYPERVISOR_set_trap_table(traps)) if (HYPERVISOR_set_trap_table(traps))
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#endif #endif
#include <linux/export.h> #include <linux/export.h>
int xen_swiotlb __read_mostly; static int xen_swiotlb __read_mostly;
/* /*
* pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
...@@ -56,7 +56,7 @@ int __init pci_xen_swiotlb_detect(void) ...@@ -56,7 +56,7 @@ int __init pci_xen_swiotlb_detect(void)
return xen_swiotlb; return xen_swiotlb;
} }
void __init pci_xen_swiotlb_init(void) static void __init pci_xen_swiotlb_init(void)
{ {
if (xen_swiotlb) { if (xen_swiotlb) {
xen_swiotlb_init_early(); xen_swiotlb_init_early();
......
...@@ -290,8 +290,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ...@@ -290,8 +290,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
gdt = get_cpu_gdt_rw(cpu); gdt = get_cpu_gdt_rw(cpu);
memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
/* /*
* Bring up the CPU in cpu_bringup_and_idle() with the stack * Bring up the CPU in cpu_bringup_and_idle() with the stack
* pointing just below where pt_regs would be if it were a normal * pointing just below where pt_regs would be if it were a normal
...@@ -308,8 +306,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ...@@ -308,8 +306,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
xen_copy_trap_info(ctxt->trap_ctxt); xen_copy_trap_info(ctxt->trap_ctxt);
ctxt->ldt_ents = 0;
BUG_ON((unsigned long)gdt & ~PAGE_MASK); BUG_ON((unsigned long)gdt & ~PAGE_MASK);
gdt_mfn = arbitrary_virt_to_mfn(gdt); gdt_mfn = arbitrary_virt_to_mfn(gdt);
......
...@@ -110,7 +110,7 @@ config PCI_PF_STUB ...@@ -110,7 +110,7 @@ config PCI_PF_STUB
config XEN_PCIDEV_FRONTEND config XEN_PCIDEV_FRONTEND
tristate "Xen PCI Frontend" tristate "Xen PCI Frontend"
depends on X86 && XEN depends on XEN_PV
select PCI_XEN select PCI_XEN
select XEN_XENBUS_FRONTEND select XEN_XENBUS_FRONTEND
default y default y
......
...@@ -177,6 +177,7 @@ config XEN_GRANT_DMA_ALLOC ...@@ -177,6 +177,7 @@ config XEN_GRANT_DMA_ALLOC
config SWIOTLB_XEN config SWIOTLB_XEN
def_bool y def_bool y
depends on XEN_PV || ARM || ARM64
select DMA_OPS select DMA_OPS
select SWIOTLB select SWIOTLB
......
...@@ -522,7 +522,7 @@ static int balloon_thread(void *unused) ...@@ -522,7 +522,7 @@ static int balloon_thread(void *unused)
timeout = 3600 * HZ; timeout = 3600 * HZ;
credit = current_credit(); credit = current_credit();
wait_event_interruptible_timeout(balloon_thread_wq, wait_event_freezable_timeout(balloon_thread_wq,
balloon_thread_cond(state, credit), timeout); balloon_thread_cond(state, credit), timeout);
if (kthread_should_stop()) if (kthread_should_stop())
......
...@@ -381,6 +381,14 @@ static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset, ...@@ -381,6 +381,14 @@ static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
map->unmap_ops[offset+i].handle, map->unmap_ops[offset+i].handle,
map->unmap_ops[offset+i].status); map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE; map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
if (use_ptemod) {
if (map->kunmap_ops[offset+i].status)
err = -EINVAL;
pr_debug("kunmap handle=%u st=%d\n",
map->kunmap_ops[offset+i].handle,
map->kunmap_ops[offset+i].status);
map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
}
} }
return err; return err;
} }
......
...@@ -230,10 +230,11 @@ void __init xen_swiotlb_init_early(void) ...@@ -230,10 +230,11 @@ void __init xen_swiotlb_init_early(void)
/* /*
* Get IO TLB memory from any location. * Get IO TLB memory from any location.
*/ */
start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE); start = memblock_alloc(PAGE_ALIGN(bytes),
IO_TLB_SEGSIZE << IO_TLB_SHIFT);
if (!start) if (!start)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n", panic("%s: Failed to allocate %lu bytes\n",
__func__, PAGE_ALIGN(bytes), PAGE_SIZE); __func__, PAGE_ALIGN(bytes));
/* /*
* And replace that memory with pages under 4GB. * And replace that memory with pages under 4GB.
......
...@@ -46,19 +46,7 @@ extern unsigned long *xen_contiguous_bitmap; ...@@ -46,19 +46,7 @@ extern unsigned long *xen_contiguous_bitmap;
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits, unsigned int address_bits,
dma_addr_t *dma_handle); dma_addr_t *dma_handle);
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
#else
static inline int xen_create_contiguous_region(phys_addr_t pstart,
unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle)
{
return 0;
}
static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
unsigned int order) { }
#endif #endif
#if defined(CONFIG_XEN_PV) #if defined(CONFIG_XEN_PV)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment