Commit 051089a2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-4.15-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:
 "Xen features and fixes for v4.15-rc1

  Apart from several small fixes it contains the following features:

   - a series by Joao Martins to add vdso support of the pv clock
     interface

   - a series by Juergen Gross to add support for Xen pv guests to be
     able to run on 5 level paging hosts

   - a series by Stefano Stabellini adding the Xen pvcalls frontend
     driver using a paravirtualized socket interface"

* tag 'for-linus-4.15-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (34 commits)
  xen/pvcalls: fix potential endless loop in pvcalls-front.c
  xen/pvcalls: Add MODULE_LICENSE()
  MAINTAINERS: xen, kvm: track pvclock-abi.h changes
  x86/xen/time: setup vcpu 0 time info page
  x86/xen/time: set pvclock flags on xen_time_init()
  x86/pvclock: add setter for pvclock_pvti_cpu0_va
  ptp_kvm: probe for kvm guest availability
  xen/privcmd: remove unused variable pageidx
  xen: select grant interface version
  xen: update arch/x86/include/asm/xen/cpuid.h
  xen: add grant interface version dependent constants to gnttab_ops
  xen: limit grant v2 interface to the v1 functionality
  xen: re-introduce support for grant v2 interface
  xen: support priv-mapping in an HVM tools domain
  xen/pvcalls: remove redundant check for irq >= 0
  xen/pvcalls: fix unsigned less than zero error check
  xen/time: Return -ENODEV from xen_get_wallclock()
  xen/pvcalls-front: mark expected switch fall-through
  xen: xenbus_probe_frontend: mark expected switch fall-throughs
  xen/time: do not decrease steal time after live migration on xen
  ...
parents 974aa563 646d944c
...@@ -7650,6 +7650,7 @@ S: Supported ...@@ -7650,6 +7650,7 @@ S: Supported
F: arch/x86/kvm/ F: arch/x86/kvm/
F: arch/x86/include/uapi/asm/kvm* F: arch/x86/include/uapi/asm/kvm*
F: arch/x86/include/asm/kvm* F: arch/x86/include/asm/kvm*
F: arch/x86/include/asm/pvclock-abi.h
F: arch/x86/kernel/kvm.c F: arch/x86/kernel/kvm.c
F: arch/x86/kernel/kvmclock.c F: arch/x86/kernel/kvmclock.c
...@@ -14838,6 +14839,7 @@ F: arch/x86/xen/ ...@@ -14838,6 +14839,7 @@ F: arch/x86/xen/
F: drivers/*/xen-*front.c F: drivers/*/xen-*front.c
F: drivers/xen/ F: drivers/xen/
F: arch/x86/include/asm/xen/ F: arch/x86/include/asm/xen/
F: arch/x86/include/asm/pvclock-abi.h
F: include/xen/ F: include/xen/
F: include/uapi/xen/ F: include/uapi/xen/
F: Documentation/ABI/stable/sysfs-hypervisor-xen F: Documentation/ABI/stable/sysfs-hypervisor-xen
......
...@@ -45,7 +45,14 @@ void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) ...@@ -45,7 +45,14 @@ void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
return; return;
} }
int arch_gnttab_init(unsigned long nr_shared) int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
grant_status_t **__shared)
{
return -ENOSYS;
}
int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
{ {
return 0; return 0;
} }
...@@ -112,7 +112,7 @@ static int vvar_fault(const struct vm_special_mapping *sm, ...@@ -112,7 +112,7 @@ static int vvar_fault(const struct vm_special_mapping *sm,
__pa_symbol(&__vvar_page) >> PAGE_SHIFT); __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
} else if (sym_offset == image->sym_pvclock_page) { } else if (sym_offset == image->sym_pvclock_page) {
struct pvclock_vsyscall_time_info *pvti = struct pvclock_vsyscall_time_info *pvti =
pvclock_pvti_cpu0_va(); pvclock_get_pvti_cpu0_va();
if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) { if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
ret = vm_insert_pfn_prot( ret = vm_insert_pfn_prot(
vma, vma,
......
...@@ -5,15 +5,6 @@ ...@@ -5,15 +5,6 @@
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <asm/pvclock-abi.h> #include <asm/pvclock-abi.h>
#ifdef CONFIG_KVM_GUEST
extern struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void);
#else
static inline struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
{
return NULL;
}
#endif
/* some helper functions for xen and kvm pv clock sources */ /* some helper functions for xen and kvm pv clock sources */
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src); u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
...@@ -102,4 +93,14 @@ struct pvclock_vsyscall_time_info { ...@@ -102,4 +93,14 @@ struct pvclock_vsyscall_time_info {
#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
#ifdef CONFIG_PARAVIRT_CLOCK
void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti);
struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void);
#else
static inline struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void)
{
return NULL;
}
#endif
#endif /* _ASM_X86_PVCLOCK_H */ #endif /* _ASM_X86_PVCLOCK_H */
...@@ -73,22 +73,44 @@ ...@@ -73,22 +73,44 @@
#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0 #define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0
#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0) #define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0)
/*
* Leaf 4 (0x40000x03)
* Sub-leaf 0: EAX: bit 0: emulated tsc
* bit 1: host tsc is known to be reliable
* bit 2: RDTSCP instruction available
* EBX: tsc_mode: 0=default (emulate if necessary), 1=emulate,
* 2=no emulation, 3=no emulation + TSC_AUX support
* ECX: guest tsc frequency in kHz
* EDX: guest tsc incarnation (migration count)
* Sub-leaf 1: EAX: tsc offset low part
* EBX: tsc offset high part
* ECX: multiplicator for tsc->ns conversion
* EDX: shift amount for tsc->ns conversion
* Sub-leaf 2: EAX: host tsc frequency in kHz
*/
/* /*
* Leaf 5 (0x40000x04) * Leaf 5 (0x40000x04)
* HVM-specific features * HVM-specific features
* EAX: Features * Sub-leaf 0: EAX: Features
* EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag) * Sub-leaf 0: EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag)
*/ */
#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) /* Virtualized APIC registers */
/* Virtualized APIC registers */ #define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) /* Virtualized x2APIC accesses */
#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0)
/* Virtualized x2APIC accesses */
#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1)
/* Memory mapped from other domains has valid IOMMU entries */ /* Memory mapped from other domains has valid IOMMU entries */
#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2) #define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
/* vcpu id is present in EBX */ #define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */
#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3)
/*
* Leaf 6 (0x40000x05)
* PV-specific parameters
* Sub-leaf 0: EAX: max available sub-leaf
* Sub-leaf 0: EBX: bits 0-7: max machine address width
*/
/* Max. address width in bits taking memory hotplug into account. */
#define XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK (0xffu << 0)
#define XEN_CPUID_MAX_NUM_LEAVES 4 #define XEN_CPUID_MAX_NUM_LEAVES 5
#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */ #endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */
...@@ -27,6 +27,15 @@ typedef struct xpaddr { ...@@ -27,6 +27,15 @@ typedef struct xpaddr {
phys_addr_t paddr; phys_addr_t paddr;
} xpaddr_t; } xpaddr_t;
#ifdef CONFIG_X86_64
#define XEN_PHYSICAL_MASK __sme_clr((1UL << 52) - 1)
#else
#define XEN_PHYSICAL_MASK __PHYSICAL_MASK
#endif
#define XEN_PTE_MFN_MASK ((pteval_t)(((signed long)PAGE_MASK) & \
XEN_PHYSICAL_MASK))
#define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) #define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
#define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) #define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
...@@ -278,7 +287,7 @@ static inline unsigned long bfn_to_local_pfn(unsigned long mfn) ...@@ -278,7 +287,7 @@ static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
static inline unsigned long pte_mfn(pte_t pte) static inline unsigned long pte_mfn(pte_t pte)
{ {
return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT; return (pte.pte & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
} }
static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot) static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
......
...@@ -48,12 +48,6 @@ early_param("no-kvmclock", parse_no_kvmclock); ...@@ -48,12 +48,6 @@ early_param("no-kvmclock", parse_no_kvmclock);
static struct pvclock_vsyscall_time_info *hv_clock; static struct pvclock_vsyscall_time_info *hv_clock;
static struct pvclock_wall_clock *wall_clock; static struct pvclock_wall_clock *wall_clock;
struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
{
return hv_clock;
}
EXPORT_SYMBOL_GPL(pvclock_pvti_cpu0_va);
/* /*
* The wallclock is the time of day when we booted. Since then, some time may * The wallclock is the time of day when we booted. Since then, some time may
* have elapsed since the hypervisor wrote the data. So we try to account for * have elapsed since the hypervisor wrote the data. So we try to account for
...@@ -377,6 +371,7 @@ int __init kvm_setup_vsyscall_timeinfo(void) ...@@ -377,6 +371,7 @@ int __init kvm_setup_vsyscall_timeinfo(void)
return 1; return 1;
} }
pvclock_set_pvti_cpu0_va(hv_clock);
put_cpu(); put_cpu();
kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
......
...@@ -25,8 +25,10 @@ ...@@ -25,8 +25,10 @@
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/pvclock.h> #include <asm/pvclock.h>
#include <asm/vgtod.h>
static u8 valid_flags __read_mostly = 0; static u8 valid_flags __read_mostly = 0;
static struct pvclock_vsyscall_time_info *pvti_cpu0_va __read_mostly;
void pvclock_set_flags(u8 flags) void pvclock_set_flags(u8 flags)
{ {
...@@ -144,3 +146,15 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock, ...@@ -144,3 +146,15 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
} }
void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti)
{
WARN_ON(vclock_was_used(VCLOCK_PVCLOCK));
pvti_cpu0_va = pvti;
}
struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void)
{
return pvti_cpu0_va;
}
EXPORT_SYMBOL_GPL(pvclock_get_pvti_cpu0_va);
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
static struct gnttab_vm_area { static struct gnttab_vm_area {
struct vm_struct *area; struct vm_struct *area;
pte_t **ptes; pte_t **ptes;
} gnttab_shared_vm_area; } gnttab_shared_vm_area, gnttab_status_vm_area;
int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes, unsigned long max_nr_gframes,
...@@ -73,16 +73,43 @@ int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, ...@@ -73,16 +73,43 @@ int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
return 0; return 0;
} }
int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
grant_status_t **__shared)
{
grant_status_t *shared = *__shared;
unsigned long addr;
unsigned long i;
if (shared == NULL)
*__shared = shared = gnttab_status_vm_area.area->addr;
addr = (unsigned long)shared;
for (i = 0; i < nr_gframes; i++) {
set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i],
mfn_pte(frames[i], PAGE_KERNEL));
addr += PAGE_SIZE;
}
return 0;
}
void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
{ {
pte_t **ptes;
unsigned long addr; unsigned long addr;
unsigned long i; unsigned long i;
if (shared == gnttab_status_vm_area.area->addr)
ptes = gnttab_status_vm_area.ptes;
else
ptes = gnttab_shared_vm_area.ptes;
addr = (unsigned long)shared; addr = (unsigned long)shared;
for (i = 0; i < nr_gframes; i++) { for (i = 0; i < nr_gframes; i++) {
set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i], set_pte_at(&init_mm, addr, ptes[i], __pte(0));
__pte(0));
addr += PAGE_SIZE; addr += PAGE_SIZE;
} }
} }
...@@ -102,12 +129,35 @@ static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames) ...@@ -102,12 +129,35 @@ static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
return 0; return 0;
} }
int arch_gnttab_init(unsigned long nr_shared) static void arch_gnttab_vfree(struct gnttab_vm_area *area)
{ {
free_vm_area(area->area);
kfree(area->ptes);
}
int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
{
int ret;
if (!xen_pv_domain()) if (!xen_pv_domain())
return 0; return 0;
return arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared); ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared);
if (ret < 0)
return ret;
/*
* Always allocate the space for the status frames in case
* we're migrated to a host with V2 support.
*/
ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status);
if (ret < 0)
goto err;
return 0;
err:
arch_gnttab_vfree(&gnttab_shared_vm_area);
return -ENOMEM;
} }
#ifdef CONFIG_XEN_PVH #ifdef CONFIG_XEN_PVH
......
...@@ -172,6 +172,9 @@ int xen_remap_domain_gfn_range(struct vm_area_struct *vma, ...@@ -172,6 +172,9 @@ int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
pgprot_t prot, unsigned domid, pgprot_t prot, unsigned domid,
struct page **pages) struct page **pages)
{ {
if (xen_feature(XENFEAT_auto_translated_physmap))
return -EOPNOTSUPP;
return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages); return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
} }
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range); EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
...@@ -182,6 +185,10 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma, ...@@ -182,6 +185,10 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
int *err_ptr, pgprot_t prot, int *err_ptr, pgprot_t prot,
unsigned domid, struct page **pages) unsigned domid, struct page **pages)
{ {
if (xen_feature(XENFEAT_auto_translated_physmap))
return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
prot, domid, pages);
/* We BUG_ON because it's a programmer error to pass a NULL err_ptr, /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
* and the consequences later is quite hard to detect what the actual * and the consequences later is quite hard to detect what the actual
* cause of "wrong memory was mapped in". * cause of "wrong memory was mapped in".
...@@ -193,9 +200,12 @@ EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array); ...@@ -193,9 +200,12 @@ EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
/* Returns: 0 success */ /* Returns: 0 success */
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages) int nr, struct page **pages)
{ {
if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) if (xen_feature(XENFEAT_auto_translated_physmap))
return xen_xlate_unmap_gfn_range(vma, nr, pages);
if (!pages)
return 0; return 0;
return -EINVAL; return -EINVAL;
......
...@@ -315,7 +315,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, ...@@ -315,7 +315,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
static pteval_t pte_mfn_to_pfn(pteval_t val) static pteval_t pte_mfn_to_pfn(pteval_t val)
{ {
if (val & _PAGE_PRESENT) { if (val & _PAGE_PRESENT) {
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; unsigned long mfn = (val & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn); unsigned long pfn = mfn_to_pfn(mfn);
pteval_t flags = val & PTE_FLAGS_MASK; pteval_t flags = val & PTE_FLAGS_MASK;
...@@ -1721,7 +1721,7 @@ static unsigned long __init m2p(phys_addr_t maddr) ...@@ -1721,7 +1721,7 @@ static unsigned long __init m2p(phys_addr_t maddr)
{ {
phys_addr_t paddr; phys_addr_t paddr;
maddr &= PTE_PFN_MASK; maddr &= XEN_PTE_MFN_MASK;
paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
return paddr; return paddr;
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
void xen_arch_pre_suspend(void) void xen_arch_pre_suspend(void)
{ {
xen_save_time_memory_area();
if (xen_pv_domain()) if (xen_pv_domain())
xen_pv_pre_suspend(); xen_pv_pre_suspend();
} }
...@@ -27,6 +29,8 @@ void xen_arch_post_suspend(int cancelled) ...@@ -27,6 +29,8 @@ void xen_arch_post_suspend(int cancelled)
xen_pv_post_suspend(cancelled); xen_pv_post_suspend(cancelled);
else else
xen_hvm_post_suspend(cancelled); xen_hvm_post_suspend(cancelled);
xen_restore_time_memory_area();
} }
static void xen_vcpu_notify_restore(void *data) static void xen_vcpu_notify_restore(void *data)
......
...@@ -75,7 +75,7 @@ static void xen_get_wallclock(struct timespec *now) ...@@ -75,7 +75,7 @@ static void xen_get_wallclock(struct timespec *now)
static int xen_set_wallclock(const struct timespec *now) static int xen_set_wallclock(const struct timespec *now)
{ {
return -1; return -ENODEV;
} }
static int xen_pvclock_gtod_notify(struct notifier_block *nb, static int xen_pvclock_gtod_notify(struct notifier_block *nb,
...@@ -371,8 +371,95 @@ static const struct pv_time_ops xen_time_ops __initconst = { ...@@ -371,8 +371,95 @@ static const struct pv_time_ops xen_time_ops __initconst = {
.steal_clock = xen_steal_clock, .steal_clock = xen_steal_clock,
}; };
static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
void xen_save_time_memory_area(void)
{
struct vcpu_register_time_memory_area t;
int ret;
if (!xen_clock)
return;
t.addr.v = NULL;
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
if (ret != 0)
pr_notice("Cannot save secondary vcpu_time_info (err %d)",
ret);
else
clear_page(xen_clock);
}
void xen_restore_time_memory_area(void)
{
struct vcpu_register_time_memory_area t;
int ret;
if (!xen_clock)
return;
t.addr.v = &xen_clock->pvti;
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
/*
* We don't disable VCLOCK_PVCLOCK entirely if it fails to register the
* secondary time info with Xen or if we migrated to a host without the
* necessary flags. On both of these cases what happens is either
* process seeing a zeroed out pvti or seeing no PVCLOCK_TSC_STABLE_BIT
* bit set. Userspace checks the latter and if 0, it discards the data
* in pvti and fallbacks to a system call for a reliable timestamp.
*/
if (ret != 0)
pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
ret);
}
static void xen_setup_vsyscall_time_info(void)
{
struct vcpu_register_time_memory_area t;
struct pvclock_vsyscall_time_info *ti;
int ret;
ti = (struct pvclock_vsyscall_time_info *)get_zeroed_page(GFP_KERNEL);
if (!ti)
return;
t.addr.v = &ti->pvti;
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
if (ret) {
pr_notice("xen: VCLOCK_PVCLOCK not supported (err %d)\n", ret);
free_page((unsigned long)ti);
return;
}
/*
* If primary time info had this bit set, secondary should too since
* it's the same data on both just different memory regions. But we
* still check it in case hypervisor is buggy.
*/
if (!(ti->pvti.flags & PVCLOCK_TSC_STABLE_BIT)) {
t.addr.v = NULL;
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area,
0, &t);
if (!ret)
free_page((unsigned long)ti);
pr_notice("xen: VCLOCK_PVCLOCK not supported (tsc unstable)\n");
return;
}
xen_clock = ti;
pvclock_set_pvti_cpu0_va(xen_clock);
xen_clocksource.archdata.vclock_mode = VCLOCK_PVCLOCK;
}
static void __init xen_time_init(void) static void __init xen_time_init(void)
{ {
struct pvclock_vcpu_time_info *pvti;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct timespec tp; struct timespec tp;
...@@ -396,6 +483,16 @@ static void __init xen_time_init(void) ...@@ -396,6 +483,16 @@ static void __init xen_time_init(void)
setup_force_cpu_cap(X86_FEATURE_TSC); setup_force_cpu_cap(X86_FEATURE_TSC);
/*
* We check ahead on the primary time info if this
* bit is supported hence speeding up Xen clocksource.
*/
pvti = &__this_cpu_read(xen_vcpu)->time;
if (pvti->flags & PVCLOCK_TSC_STABLE_BIT) {
pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
xen_setup_vsyscall_time_info();
}
xen_setup_runstate_info(cpu); xen_setup_runstate_info(cpu);
xen_setup_timer(cpu); xen_setup_timer(cpu);
xen_setup_cpu_clockevents(); xen_setup_cpu_clockevents();
......
...@@ -70,6 +70,8 @@ void xen_setup_runstate_info(int cpu); ...@@ -70,6 +70,8 @@ void xen_setup_runstate_info(int cpu);
void xen_teardown_timer(int cpu); void xen_teardown_timer(int cpu);
u64 xen_clocksource_read(void); u64 xen_clocksource_read(void);
void xen_setup_cpu_clockevents(void); void xen_setup_cpu_clockevents(void);
void xen_save_time_memory_area(void);
void xen_restore_time_memory_area(void);
void __init xen_init_time_ops(void); void __init xen_init_time_ops(void);
void __init xen_hvm_init_time_ops(void); void __init xen_hvm_init_time_ops(void);
......
...@@ -178,8 +178,11 @@ static int __init ptp_kvm_init(void) ...@@ -178,8 +178,11 @@ static int __init ptp_kvm_init(void)
{ {
long ret; long ret;
if (!kvm_para_available())
return -ENODEV;
clock_pair_gpa = slow_virt_to_phys(&clock_pair); clock_pair_gpa = slow_virt_to_phys(&clock_pair);
hv_clock = pvclock_pvti_cpu0_va(); hv_clock = pvclock_get_pvti_cpu0_va();
if (!hv_clock) if (!hv_clock)
return -ENODEV; return -ENODEV;
......
...@@ -196,6 +196,17 @@ config XEN_PCIDEV_BACKEND ...@@ -196,6 +196,17 @@ config XEN_PCIDEV_BACKEND
If in doubt, say m. If in doubt, say m.
config XEN_PVCALLS_FRONTEND
tristate "XEN PV Calls frontend driver"
depends on INET && XEN
default n
select XEN_XENBUS_FRONTEND
help
Experimental frontend for the Xen PV Calls protocol
(https://xenbits.xen.org/docs/unstable/misc/pvcalls.html). It
sends a small set of POSIX calls to the backend, which
implements them.
config XEN_PVCALLS_BACKEND config XEN_PVCALLS_BACKEND
bool "XEN PV Calls backend driver" bool "XEN PV Calls backend driver"
depends on INET && XEN && XEN_BACKEND depends on INET && XEN && XEN_BACKEND
......
...@@ -37,6 +37,7 @@ obj-$(CONFIG_XEN_EFI) += efi.o ...@@ -37,6 +37,7 @@ obj-$(CONFIG_XEN_EFI) += efi.o
obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o
obj-$(CONFIG_XEN_AUTO_XLATE) += xlate_mmu.o obj-$(CONFIG_XEN_AUTO_XLATE) += xlate_mmu.o
obj-$(CONFIG_XEN_PVCALLS_BACKEND) += pvcalls-back.o obj-$(CONFIG_XEN_PVCALLS_BACKEND) += pvcalls-back.o
obj-$(CONFIG_XEN_PVCALLS_FRONTEND) += pvcalls-front.o
xen-evtchn-y := evtchn.o xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o xen-gntdev-y := gntdev.o
xen-gntalloc-y := gntalloc.o xen-gntalloc-y := gntalloc.o
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
#include <linux/bootmem.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -43,6 +44,7 @@ ...@@ -43,6 +44,7 @@
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/moduleparam.h>
#include <xen/xen.h> #include <xen/xen.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
...@@ -52,6 +54,9 @@ ...@@ -52,6 +54,9 @@
#include <xen/hvc-console.h> #include <xen/hvc-console.h>
#include <xen/swiotlb-xen.h> #include <xen/swiotlb-xen.h>
#include <xen/balloon.h> #include <xen/balloon.h>
#ifdef CONFIG_X86
#include <asm/xen/cpuid.h>
#endif
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
#include <asm/xen/interface.h> #include <asm/xen/interface.h>
...@@ -68,14 +73,25 @@ static int gnttab_free_count; ...@@ -68,14 +73,25 @@ static int gnttab_free_count;
static grant_ref_t gnttab_free_head; static grant_ref_t gnttab_free_head;
static DEFINE_SPINLOCK(gnttab_list_lock); static DEFINE_SPINLOCK(gnttab_list_lock);
struct grant_frames xen_auto_xlat_grant_frames; struct grant_frames xen_auto_xlat_grant_frames;
static unsigned int xen_gnttab_version;
module_param_named(version, xen_gnttab_version, uint, 0);
static union { static union {
struct grant_entry_v1 *v1; struct grant_entry_v1 *v1;
union grant_entry_v2 *v2;
void *addr; void *addr;
} gnttab_shared; } gnttab_shared;
/*This is a structure of function pointers for grant table*/ /*This is a structure of function pointers for grant table*/
struct gnttab_ops { struct gnttab_ops {
/*
* Version of the grant interface.
*/
unsigned int version;
/*
* Grant refs per grant frame.
*/
unsigned int grefs_per_grant_frame;
/* /*
* Mapping a list of frames for storing grant entries. Frames parameter * Mapping a list of frames for storing grant entries. Frames parameter
* is used to store grant table address when grant table being setup, * is used to store grant table address when grant table being setup,
...@@ -130,14 +146,15 @@ struct unmap_refs_callback_data { ...@@ -130,14 +146,15 @@ struct unmap_refs_callback_data {
static const struct gnttab_ops *gnttab_interface; static const struct gnttab_ops *gnttab_interface;
static int grant_table_version; /* This reflects status of grant entries, so act as a global value. */
static int grefs_per_grant_frame; static grant_status_t *grstatus;
static struct gnttab_free_callback *gnttab_free_callback_list; static struct gnttab_free_callback *gnttab_free_callback_list;
static int gnttab_expand(unsigned int req_entries); static int gnttab_expand(unsigned int req_entries);
#define RPP (PAGE_SIZE / sizeof(grant_ref_t)) #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
#define SPP (PAGE_SIZE / sizeof(grant_status_t))
static inline grant_ref_t *__gnttab_entry(grant_ref_t entry) static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
{ {
...@@ -210,7 +227,7 @@ static void put_free_entry(grant_ref_t ref) ...@@ -210,7 +227,7 @@ static void put_free_entry(grant_ref_t ref)
} }
/* /*
* Following applies to gnttab_update_entry_v1. * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
* Introducing a valid entry into the grant table: * Introducing a valid entry into the grant table:
* 1. Write ent->domid. * 1. Write ent->domid.
* 2. Write ent->frame: * 2. Write ent->frame:
...@@ -229,6 +246,15 @@ static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid, ...@@ -229,6 +246,15 @@ static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
gnttab_shared.v1[ref].flags = flags; gnttab_shared.v1[ref].flags = flags;
} }
static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
unsigned long frame, unsigned int flags)
{
gnttab_shared.v2[ref].hdr.domid = domid;
gnttab_shared.v2[ref].full_page.frame = frame;
wmb(); /* Hypervisor concurrent accesses. */
gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
}
/* /*
* Public grant-issuing interface functions * Public grant-issuing interface functions
*/ */
...@@ -260,6 +286,11 @@ static int gnttab_query_foreign_access_v1(grant_ref_t ref) ...@@ -260,6 +286,11 @@ static int gnttab_query_foreign_access_v1(grant_ref_t ref)
return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing); return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
} }
static int gnttab_query_foreign_access_v2(grant_ref_t ref)
{
return grstatus[ref] & (GTF_reading|GTF_writing);
}
int gnttab_query_foreign_access(grant_ref_t ref) int gnttab_query_foreign_access(grant_ref_t ref)
{ {
return gnttab_interface->query_foreign_access(ref); return gnttab_interface->query_foreign_access(ref);
...@@ -282,6 +313,29 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) ...@@ -282,6 +313,29 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
return 1; return 1;
} }
static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
{
gnttab_shared.v2[ref].hdr.flags = 0;
mb(); /* Concurrent access by hypervisor. */
if (grstatus[ref] & (GTF_reading|GTF_writing)) {
return 0;
} else {
/*
* The read of grstatus needs to have acquire semantics.
* On x86, reads already have that, and we just need to
* protect against compiler reorderings.
* On other architectures we may need a full barrier.
*/
#ifdef CONFIG_X86
barrier();
#else
mb();
#endif
}
return 1;
}
static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
{ {
return gnttab_interface->end_foreign_access_ref(ref, readonly); return gnttab_interface->end_foreign_access_ref(ref, readonly);
...@@ -442,6 +496,37 @@ static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref) ...@@ -442,6 +496,37 @@ static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
return frame; return frame;
} }
static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
{
unsigned long frame;
u16 flags;
u16 *pflags;
pflags = &gnttab_shared.v2[ref].hdr.flags;
/*
* If a transfer is not even yet started, try to reclaim the grant
* reference and return failure (== 0).
*/
while (!((flags = *pflags) & GTF_transfer_committed)) {
if (sync_cmpxchg(pflags, flags, 0) == flags)
return 0;
cpu_relax();
}
/* If a transfer is in progress then wait until it is completed. */
while (!(flags & GTF_transfer_completed)) {
flags = *pflags;
cpu_relax();
}
rmb(); /* Read the frame number /after/ reading completion status. */
frame = gnttab_shared.v2[ref].full_page.frame;
BUG_ON(frame == 0);
return frame;
}
unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
{ {
return gnttab_interface->end_foreign_transfer_ref(ref); return gnttab_interface->end_foreign_transfer_ref(ref);
...@@ -563,19 +648,26 @@ void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) ...@@ -563,19 +648,26 @@ void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
} }
EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
{
return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
align;
}
static int grow_gnttab_list(unsigned int more_frames) static int grow_gnttab_list(unsigned int more_frames)
{ {
unsigned int new_nr_grant_frames, extra_entries, i; unsigned int new_nr_grant_frames, extra_entries, i;
unsigned int nr_glist_frames, new_nr_glist_frames; unsigned int nr_glist_frames, new_nr_glist_frames;
unsigned int grefs_per_frame;
BUG_ON(grefs_per_grant_frame == 0); BUG_ON(gnttab_interface == NULL);
grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
new_nr_grant_frames = nr_grant_frames + more_frames; new_nr_grant_frames = nr_grant_frames + more_frames;
extra_entries = more_frames * grefs_per_grant_frame; extra_entries = more_frames * grefs_per_frame;
nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
new_nr_glist_frames = new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
(new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
if (!gnttab_list[i]) if (!gnttab_list[i])
...@@ -583,12 +675,12 @@ static int grow_gnttab_list(unsigned int more_frames) ...@@ -583,12 +675,12 @@ static int grow_gnttab_list(unsigned int more_frames)
} }
for (i = grefs_per_grant_frame * nr_grant_frames; for (i = grefs_per_frame * nr_grant_frames;
i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++) i < grefs_per_frame * new_nr_grant_frames - 1; i++)
gnttab_entry(i) = i + 1; gnttab_entry(i) = i + 1;
gnttab_entry(i) = gnttab_free_head; gnttab_entry(i) = gnttab_free_head;
gnttab_free_head = grefs_per_grant_frame * nr_grant_frames; gnttab_free_head = grefs_per_frame * nr_grant_frames;
gnttab_free_count += extra_entries; gnttab_free_count += extra_entries;
nr_grant_frames = new_nr_grant_frames; nr_grant_frames = new_nr_grant_frames;
...@@ -938,6 +1030,12 @@ int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item) ...@@ -938,6 +1030,12 @@ int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
} }
EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync); EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
static unsigned int nr_status_frames(unsigned int nr_grant_frames)
{
BUG_ON(gnttab_interface == NULL);
return gnttab_frames(nr_grant_frames, SPP);
}
static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
{ {
int rc; int rc;
...@@ -955,6 +1053,55 @@ static void gnttab_unmap_frames_v1(void) ...@@ -955,6 +1053,55 @@ static void gnttab_unmap_frames_v1(void)
arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
} }
static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
{
uint64_t *sframes;
unsigned int nr_sframes;
struct gnttab_get_status_frames getframes;
int rc;
nr_sframes = nr_status_frames(nr_gframes);
/* No need for kzalloc as it is initialized in following hypercall
* GNTTABOP_get_status_frames.
*/
sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
if (!sframes)
return -ENOMEM;
getframes.dom = DOMID_SELF;
getframes.nr_frames = nr_sframes;
set_xen_guest_handle(getframes.frame_list, sframes);
rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
&getframes, 1);
if (rc == -ENOSYS) {
kfree(sframes);
return -ENOSYS;
}
BUG_ON(rc || getframes.status);
rc = arch_gnttab_map_status(sframes, nr_sframes,
nr_status_frames(gnttab_max_grant_frames()),
&grstatus);
BUG_ON(rc);
kfree(sframes);
rc = arch_gnttab_map_shared(frames, nr_gframes,
gnttab_max_grant_frames(),
&gnttab_shared.addr);
BUG_ON(rc);
return 0;
}
static void gnttab_unmap_frames_v2(void)
{
arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
}
static int gnttab_map(unsigned int start_idx, unsigned int end_idx) static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
{ {
struct gnttab_setup_table setup; struct gnttab_setup_table setup;
...@@ -1014,6 +1161,9 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) ...@@ -1014,6 +1161,9 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
} }
static const struct gnttab_ops gnttab_v1_ops = { static const struct gnttab_ops gnttab_v1_ops = {
.version = 1,
.grefs_per_grant_frame = XEN_PAGE_SIZE /
sizeof(struct grant_entry_v1),
.map_frames = gnttab_map_frames_v1, .map_frames = gnttab_map_frames_v1,
.unmap_frames = gnttab_unmap_frames_v1, .unmap_frames = gnttab_unmap_frames_v1,
.update_entry = gnttab_update_entry_v1, .update_entry = gnttab_update_entry_v1,
...@@ -1022,14 +1172,56 @@ static const struct gnttab_ops gnttab_v1_ops = { ...@@ -1022,14 +1172,56 @@ static const struct gnttab_ops gnttab_v1_ops = {
.query_foreign_access = gnttab_query_foreign_access_v1, .query_foreign_access = gnttab_query_foreign_access_v1,
}; };
static void gnttab_request_version(void) static const struct gnttab_ops gnttab_v2_ops = {
.version = 2,
.grefs_per_grant_frame = XEN_PAGE_SIZE /
sizeof(union grant_entry_v2),
.map_frames = gnttab_map_frames_v2,
.unmap_frames = gnttab_unmap_frames_v2,
.update_entry = gnttab_update_entry_v2,
.end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
.end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
.query_foreign_access = gnttab_query_foreign_access_v2,
};
static bool gnttab_need_v2(void)
{ {
/* Only version 1 is used, which will always be available. */ #ifdef CONFIG_X86
grant_table_version = 1; uint32_t base, width;
grefs_per_grant_frame = XEN_PAGE_SIZE / sizeof(struct grant_entry_v1);
gnttab_interface = &gnttab_v1_ops; if (xen_pv_domain()) {
base = xen_cpuid_base();
if (cpuid_eax(base) < 5)
return false; /* Information not available, use V1. */
width = cpuid_ebx(base + 5) &
XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
return width > 32 + PAGE_SHIFT;
}
#endif
return !!(max_possible_pfn >> 32);
}
pr_info("Grant tables using version %d layout\n", grant_table_version); static void gnttab_request_version(void)
{
long rc;
struct gnttab_set_version gsv;
if (gnttab_need_v2())
gsv.version = 2;
else
gsv.version = 1;
/* Boot parameter overrides automatic selection. */
if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
gsv.version = xen_gnttab_version;
rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
if (rc == 0 && gsv.version == 2)
gnttab_interface = &gnttab_v2_ops;
else
gnttab_interface = &gnttab_v1_ops;
pr_info("Grant tables using version %d layout\n",
gnttab_interface->version);
} }
static int gnttab_setup(void) static int gnttab_setup(void)
...@@ -1069,10 +1261,10 @@ static int gnttab_expand(unsigned int req_entries) ...@@ -1069,10 +1261,10 @@ static int gnttab_expand(unsigned int req_entries)
int rc; int rc;
unsigned int cur, extra; unsigned int cur, extra;
BUG_ON(grefs_per_grant_frame == 0); BUG_ON(gnttab_interface == NULL);
cur = nr_grant_frames; cur = nr_grant_frames;
extra = ((req_entries + (grefs_per_grant_frame-1)) / extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
grefs_per_grant_frame); gnttab_interface->grefs_per_grant_frame);
if (cur + extra > gnttab_max_grant_frames()) { if (cur + extra > gnttab_max_grant_frames()) {
pr_warn_ratelimited("xen/grant-table: max_grant_frames reached" pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
" cur=%u extra=%u limit=%u" " cur=%u extra=%u limit=%u"
...@@ -1104,16 +1296,16 @@ int gnttab_init(void) ...@@ -1104,16 +1296,16 @@ int gnttab_init(void)
/* Determine the maximum number of frames required for the /* Determine the maximum number of frames required for the
* grant reference free list on the current hypervisor. * grant reference free list on the current hypervisor.
*/ */
BUG_ON(grefs_per_grant_frame == 0); BUG_ON(gnttab_interface == NULL);
max_nr_glist_frames = (max_nr_grant_frames * max_nr_glist_frames = (max_nr_grant_frames *
grefs_per_grant_frame / RPP); gnttab_interface->grefs_per_grant_frame / RPP);
gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
GFP_KERNEL); GFP_KERNEL);
if (gnttab_list == NULL) if (gnttab_list == NULL)
return -ENOMEM; return -ENOMEM;
nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
for (i = 0; i < nr_glist_frames; i++) { for (i = 0; i < nr_glist_frames; i++) {
gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
if (gnttab_list[i] == NULL) { if (gnttab_list[i] == NULL) {
...@@ -1122,7 +1314,8 @@ int gnttab_init(void) ...@@ -1122,7 +1314,8 @@ int gnttab_init(void)
} }
} }
ret = arch_gnttab_init(max_nr_grant_frames); ret = arch_gnttab_init(max_nr_grant_frames,
nr_status_frames(max_nr_grant_frames));
if (ret < 0) if (ret < 0)
goto ini_nomem; goto ini_nomem;
...@@ -1131,7 +1324,8 @@ int gnttab_init(void) ...@@ -1131,7 +1324,8 @@ int gnttab_init(void)
goto ini_nomem; goto ini_nomem;
} }
nr_init_grefs = nr_grant_frames * grefs_per_grant_frame; nr_init_grefs = nr_grant_frames *
gnttab_interface->grefs_per_grant_frame;
for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
gnttab_entry(i) = i + 1; gnttab_entry(i) = i + 1;
......
...@@ -72,18 +72,15 @@ static int xen_suspend(void *data) ...@@ -72,18 +72,15 @@ static int xen_suspend(void *data)
} }
gnttab_suspend(); gnttab_suspend();
xen_manage_runstate_time(-1);
xen_arch_pre_suspend(); xen_arch_pre_suspend();
/*
* This hypercall returns 1 if suspend was cancelled
* or the domain was merely checkpointed, and 0 if it
* is resuming in a new domain.
*/
si->cancelled = HYPERVISOR_suspend(xen_pv_domain() si->cancelled = HYPERVISOR_suspend(xen_pv_domain()
? virt_to_gfn(xen_start_info) ? virt_to_gfn(xen_start_info)
: 0); : 0);
xen_arch_post_suspend(si->cancelled); xen_arch_post_suspend(si->cancelled);
xen_manage_runstate_time(si->cancelled ? 1 : 0);
gnttab_resume(); gnttab_resume();
if (!si->cancelled) { if (!si->cancelled) {
......
...@@ -191,13 +191,10 @@ static int traverse_pages_block(unsigned nelem, size_t size, ...@@ -191,13 +191,10 @@ static int traverse_pages_block(unsigned nelem, size_t size,
void *state) void *state)
{ {
void *pagedata; void *pagedata;
unsigned pageidx;
int ret = 0; int ret = 0;
BUG_ON(size > PAGE_SIZE); BUG_ON(size > PAGE_SIZE);
pageidx = PAGE_SIZE;
while (nelem) { while (nelem) {
int nr = (PAGE_SIZE/size); int nr = (PAGE_SIZE/size);
struct page *page; struct page *page;
......
...@@ -1238,3 +1238,7 @@ static void __exit pvcalls_back_fin(void) ...@@ -1238,3 +1238,7 @@ static void __exit pvcalls_back_fin(void)
} }
module_exit(pvcalls_back_fin); module_exit(pvcalls_back_fin);
MODULE_DESCRIPTION("Xen PV Calls backend driver");
MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>");
MODULE_LICENSE("GPL");
/*
* (c) 2017 Stefano Stabellini <stefano@aporeto.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/net.h>
#include <linux/socket.h>
#include <net/sock.h>
#include <xen/events.h>
#include <xen/grant_table.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/interface/io/pvcalls.h>
#include "pvcalls-front.h"
#define PVCALLS_INVALID_ID UINT_MAX
#define PVCALLS_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
#define PVCALLS_FRONT_MAX_SPIN 5000
struct pvcalls_bedata {
struct xen_pvcalls_front_ring ring;
grant_ref_t ref;
int irq;
struct list_head socket_mappings;
spinlock_t socket_lock;
wait_queue_head_t inflight_req;
struct xen_pvcalls_response rsp[PVCALLS_NR_RSP_PER_RING];
};
/* Only one front/back connection supported. */
static struct xenbus_device *pvcalls_front_dev;
static atomic_t pvcalls_refcount;
/* first increment refcount, then proceed */
#define pvcalls_enter() { \
atomic_inc(&pvcalls_refcount); \
}
/* first complete other operations, then decrement refcount */
#define pvcalls_exit() { \
atomic_dec(&pvcalls_refcount); \
}
struct sock_mapping {
bool active_socket;
struct list_head list;
struct socket *sock;
union {
struct {
int irq;
grant_ref_t ref;
struct pvcalls_data_intf *ring;
struct pvcalls_data data;
struct mutex in_mutex;
struct mutex out_mutex;
wait_queue_head_t inflight_conn_req;
} active;
struct {
/* Socket status */
#define PVCALLS_STATUS_UNINITALIZED 0
#define PVCALLS_STATUS_BIND 1
#define PVCALLS_STATUS_LISTEN 2
uint8_t status;
/*
* Internal state-machine flags.
* Only one accept operation can be inflight for a socket.
* Only one poll operation can be inflight for a given socket.
*/
#define PVCALLS_FLAG_ACCEPT_INFLIGHT 0
#define PVCALLS_FLAG_POLL_INFLIGHT 1
#define PVCALLS_FLAG_POLL_RET 2
uint8_t flags;
uint32_t inflight_req_id;
struct sock_mapping *accept_map;
wait_queue_head_t inflight_accept_req;
} passive;
};
};
static inline int get_request(struct pvcalls_bedata *bedata, int *req_id)
{
*req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1);
if (RING_FULL(&bedata->ring) ||
bedata->rsp[*req_id].req_id != PVCALLS_INVALID_ID)
return -EAGAIN;
return 0;
}
static bool pvcalls_front_write_todo(struct sock_mapping *map)
{
struct pvcalls_data_intf *intf = map->active.ring;
RING_IDX cons, prod, size = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
int32_t error;
error = intf->out_error;
if (error == -ENOTCONN)
return false;
if (error != 0)
return true;
cons = intf->out_cons;
prod = intf->out_prod;
return !!(size - pvcalls_queued(prod, cons, size));
}
static bool pvcalls_front_read_todo(struct sock_mapping *map)
{
struct pvcalls_data_intf *intf = map->active.ring;
RING_IDX cons, prod;
int32_t error;
cons = intf->in_cons;
prod = intf->in_prod;
error = intf->in_error;
return (error != 0 ||
pvcalls_queued(prod, cons,
XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER)) != 0);
}
static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
{
struct xenbus_device *dev = dev_id;
struct pvcalls_bedata *bedata;
struct xen_pvcalls_response *rsp;
uint8_t *src, *dst;
int req_id = 0, more = 0, done = 0;
if (dev == NULL)
return IRQ_HANDLED;
pvcalls_enter();
bedata = dev_get_drvdata(&dev->dev);
if (bedata == NULL) {
pvcalls_exit();
return IRQ_HANDLED;
}
again:
while (RING_HAS_UNCONSUMED_RESPONSES(&bedata->ring)) {
rsp = RING_GET_RESPONSE(&bedata->ring, bedata->ring.rsp_cons);
req_id = rsp->req_id;
if (rsp->cmd == PVCALLS_POLL) {
struct sock_mapping *map = (struct sock_mapping *)(uintptr_t)
rsp->u.poll.id;
clear_bit(PVCALLS_FLAG_POLL_INFLIGHT,
(void *)&map->passive.flags);
/*
* clear INFLIGHT, then set RET. It pairs with
* the checks at the beginning of
* pvcalls_front_poll_passive.
*/
smp_wmb();
set_bit(PVCALLS_FLAG_POLL_RET,
(void *)&map->passive.flags);
} else {
dst = (uint8_t *)&bedata->rsp[req_id] +
sizeof(rsp->req_id);
src = (uint8_t *)rsp + sizeof(rsp->req_id);
memcpy(dst, src, sizeof(*rsp) - sizeof(rsp->req_id));
/*
* First copy the rest of the data, then req_id. It is
* paired with the barrier when accessing bedata->rsp.
*/
smp_wmb();
bedata->rsp[req_id].req_id = req_id;
}
done = 1;
bedata->ring.rsp_cons++;
}
RING_FINAL_CHECK_FOR_RESPONSES(&bedata->ring, more);
if (more)
goto again;
if (done)
wake_up(&bedata->inflight_req);
pvcalls_exit();
return IRQ_HANDLED;
}
static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
struct sock_mapping *map)
{
int i;
unbind_from_irqhandler(map->active.irq, map);
spin_lock(&bedata->socket_lock);
if (!list_empty(&map->list))
list_del_init(&map->list);
spin_unlock(&bedata->socket_lock);
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
gnttab_end_foreign_access(map->active.ring->ref[i], 0, 0);
gnttab_end_foreign_access(map->active.ref, 0, 0);
free_page((unsigned long)map->active.ring);
kfree(map);
}
static irqreturn_t pvcalls_front_conn_handler(int irq, void *sock_map)
{
struct sock_mapping *map = sock_map;
if (map == NULL)
return IRQ_HANDLED;
wake_up_interruptible(&map->active.inflight_conn_req);
return IRQ_HANDLED;
}
int pvcalls_front_socket(struct socket *sock)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map = NULL;
struct xen_pvcalls_request *req;
int notify, req_id, ret;
/*
* PVCalls only supports domain AF_INET,
* type SOCK_STREAM and protocol 0 sockets for now.
*
* Check socket type here, AF_INET and protocol checks are done
* by the caller.
*/
if (sock->type != SOCK_STREAM)
return -EOPNOTSUPP;
pvcalls_enter();
if (!pvcalls_front_dev) {
pvcalls_exit();
return -EACCES;
}
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL) {
pvcalls_exit();
return -ENOMEM;
}
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
kfree(map);
spin_unlock(&bedata->socket_lock);
pvcalls_exit();
return ret;
}
/*
* sock->sk->sk_send_head is not used for ip sockets: reuse the
* field to store a pointer to the struct sock_mapping
* corresponding to the socket. This way, we can easily get the
* struct sock_mapping from the struct socket.
*/
sock->sk->sk_send_head = (void *)map;
list_add_tail(&map->list, &bedata->socket_mappings);
req = RING_GET_REQUEST(&bedata->ring, req_id);
req->req_id = req_id;
req->cmd = PVCALLS_SOCKET;
req->u.socket.id = (uintptr_t) map;
req->u.socket.domain = AF_INET;
req->u.socket.type = SOCK_STREAM;
req->u.socket.protocol = IPPROTO_IP;
bedata->ring.req_prod_pvt++;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
spin_unlock(&bedata->socket_lock);
if (notify)
notify_remote_via_irq(bedata->irq);
wait_event(bedata->inflight_req,
READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
/* read req_id, then the content */
smp_rmb();
ret = bedata->rsp[req_id].ret;
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
pvcalls_exit();
return ret;
}
static int create_active(struct sock_mapping *map, int *evtchn)
{
void *bytes;
int ret = -ENOMEM, irq = -1, i;
*evtchn = -1;
init_waitqueue_head(&map->active.inflight_conn_req);
map->active.ring = (struct pvcalls_data_intf *)
__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (map->active.ring == NULL)
goto out_error;
map->active.ring->ring_order = PVCALLS_RING_ORDER;
bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
PVCALLS_RING_ORDER);
if (bytes == NULL)
goto out_error;
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
map->active.ring->ref[i] = gnttab_grant_foreign_access(
pvcalls_front_dev->otherend_id,
pfn_to_gfn(virt_to_pfn(bytes) + i), 0);
map->active.ref = gnttab_grant_foreign_access(
pvcalls_front_dev->otherend_id,
pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
map->active.data.in = bytes;
map->active.data.out = bytes +
XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
if (ret)
goto out_error;
irq = bind_evtchn_to_irqhandler(*evtchn, pvcalls_front_conn_handler,
0, "pvcalls-frontend", map);
if (irq < 0) {
ret = irq;
goto out_error;
}
map->active.irq = irq;
map->active_socket = true;
mutex_init(&map->active.in_mutex);
mutex_init(&map->active.out_mutex);
return 0;
out_error:
if (*evtchn >= 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
kfree(map->active.data.in);
kfree(map->active.ring);
return ret;
}
int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
int addr_len, int flags)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map = NULL;
struct xen_pvcalls_request *req;
int notify, req_id, ret, evtchn;
if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
return -EOPNOTSUPP;
pvcalls_enter();
if (!pvcalls_front_dev) {
pvcalls_exit();
return -ENOTCONN;
}
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
map = (struct sock_mapping *)sock->sk->sk_send_head;
if (!map) {
pvcalls_exit();
return -ENOTSOCK;
}
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
pvcalls_exit();
return ret;
}
ret = create_active(map, &evtchn);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
pvcalls_exit();
return ret;
}
req = RING_GET_REQUEST(&bedata->ring, req_id);
req->req_id = req_id;
req->cmd = PVCALLS_CONNECT;
req->u.connect.id = (uintptr_t)map;
req->u.connect.len = addr_len;
req->u.connect.flags = flags;
req->u.connect.ref = map->active.ref;
req->u.connect.evtchn = evtchn;
memcpy(req->u.connect.addr, addr, sizeof(*addr));
map->sock = sock;
bedata->ring.req_prod_pvt++;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
spin_unlock(&bedata->socket_lock);
if (notify)
notify_remote_via_irq(bedata->irq);
wait_event(bedata->inflight_req,
READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
/* read req_id, then the content */
smp_rmb();
ret = bedata->rsp[req_id].ret;
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
pvcalls_exit();
return ret;
}
static int __write_ring(struct pvcalls_data_intf *intf,
struct pvcalls_data *data,
struct iov_iter *msg_iter,
int len)
{
RING_IDX cons, prod, size, masked_prod, masked_cons;
RING_IDX array_size = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
int32_t error;
error = intf->out_error;
if (error < 0)
return error;
cons = intf->out_cons;
prod = intf->out_prod;
/* read indexes before continuing */
virt_mb();
size = pvcalls_queued(prod, cons, array_size);
if (size >= array_size)
return -EINVAL;
if (len > array_size - size)
len = array_size - size;
masked_prod = pvcalls_mask(prod, array_size);
masked_cons = pvcalls_mask(cons, array_size);
if (masked_prod < masked_cons) {
len = copy_from_iter(data->out + masked_prod, len, msg_iter);
} else {
if (len > array_size - masked_prod) {
int ret = copy_from_iter(data->out + masked_prod,
array_size - masked_prod, msg_iter);
if (ret != array_size - masked_prod) {
len = ret;
goto out;
}
len = ret + copy_from_iter(data->out, len - ret, msg_iter);
} else {
len = copy_from_iter(data->out + masked_prod, len, msg_iter);
}
}
out:
/* write to ring before updating pointer */
virt_wmb();
intf->out_prod += len;
return len;
}
int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map;
int sent, tot_sent = 0;
int count = 0, flags;
flags = msg->msg_flags;
if (flags & (MSG_CONFIRM|MSG_DONTROUTE|MSG_EOR|MSG_OOB))
return -EOPNOTSUPP;
pvcalls_enter();
if (!pvcalls_front_dev) {
pvcalls_exit();
return -ENOTCONN;
}
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
map = (struct sock_mapping *) sock->sk->sk_send_head;
if (!map) {
pvcalls_exit();
return -ENOTSOCK;
}
mutex_lock(&map->active.out_mutex);
if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
mutex_unlock(&map->active.out_mutex);
pvcalls_exit();
return -EAGAIN;
}
if (len > INT_MAX)
len = INT_MAX;
again:
count++;
sent = __write_ring(map->active.ring,
&map->active.data, &msg->msg_iter,
len);
if (sent > 0) {
len -= sent;
tot_sent += sent;
notify_remote_via_irq(map->active.irq);
}
if (sent >= 0 && len > 0 && count < PVCALLS_FRONT_MAX_SPIN)
goto again;
if (sent < 0)
tot_sent = sent;
mutex_unlock(&map->active.out_mutex);
pvcalls_exit();
return tot_sent;
}
static int __read_ring(struct pvcalls_data_intf *intf,
struct pvcalls_data *data,
struct iov_iter *msg_iter,
size_t len, int flags)
{
RING_IDX cons, prod, size, masked_prod, masked_cons;
RING_IDX array_size = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
int32_t error;
cons = intf->in_cons;
prod = intf->in_prod;
error = intf->in_error;
/* get pointers before reading from the ring */
virt_rmb();
if (error < 0)
return error;
size = pvcalls_queued(prod, cons, array_size);
masked_prod = pvcalls_mask(prod, array_size);
masked_cons = pvcalls_mask(cons, array_size);
if (size == 0)
return 0;
if (len > size)
len = size;
if (masked_prod > masked_cons) {
len = copy_to_iter(data->in + masked_cons, len, msg_iter);
} else {
if (len > (array_size - masked_cons)) {
int ret = copy_to_iter(data->in + masked_cons,
array_size - masked_cons, msg_iter);
if (ret != array_size - masked_cons) {
len = ret;
goto out;
}
len = ret + copy_to_iter(data->in, len - ret, msg_iter);
} else {
len = copy_to_iter(data->in + masked_cons, len, msg_iter);
}
}
out:
/* read data from the ring before increasing the index */
virt_mb();
if (!(flags & MSG_PEEK))
intf->in_cons += len;
return len;
}
int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct pvcalls_bedata *bedata;
int ret;
struct sock_mapping *map;
if (flags & (MSG_CMSG_CLOEXEC|MSG_ERRQUEUE|MSG_OOB|MSG_TRUNC))
return -EOPNOTSUPP;
pvcalls_enter();
if (!pvcalls_front_dev) {
pvcalls_exit();
return -ENOTCONN;
}
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
map = (struct sock_mapping *) sock->sk->sk_send_head;
if (!map) {
pvcalls_exit();
return -ENOTSOCK;
}
mutex_lock(&map->active.in_mutex);
if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))
len = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
while (!(flags & MSG_DONTWAIT) && !pvcalls_front_read_todo(map)) {
wait_event_interruptible(map->active.inflight_conn_req,
pvcalls_front_read_todo(map));
}
ret = __read_ring(map->active.ring, &map->active.data,
&msg->msg_iter, len, flags);
if (ret > 0)
notify_remote_via_irq(map->active.irq);
if (ret == 0)
ret = (flags & MSG_DONTWAIT) ? -EAGAIN : 0;
if (ret == -ENOTCONN)
ret = 0;
mutex_unlock(&map->active.in_mutex);
pvcalls_exit();
return ret;
}
int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map = NULL;
struct xen_pvcalls_request *req;
int notify, req_id, ret;
if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
return -EOPNOTSUPP;
pvcalls_enter();
if (!pvcalls_front_dev) {
pvcalls_exit();
return -ENOTCONN;
}
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
map = (struct sock_mapping *) sock->sk->sk_send_head;
if (map == NULL) {
pvcalls_exit();
return -ENOTSOCK;
}
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
pvcalls_exit();
return ret;
}
req = RING_GET_REQUEST(&bedata->ring, req_id);
req->req_id = req_id;
map->sock = sock;
req->cmd = PVCALLS_BIND;
req->u.bind.id = (uintptr_t)map;
memcpy(req->u.bind.addr, addr, sizeof(*addr));
req->u.bind.len = addr_len;
init_waitqueue_head(&map->passive.inflight_accept_req);
map->active_socket = false;
bedata->ring.req_prod_pvt++;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
spin_unlock(&bedata->socket_lock);
if (notify)
notify_remote_via_irq(bedata->irq);
wait_event(bedata->inflight_req,
READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
/* read req_id, then the content */
smp_rmb();
ret = bedata->rsp[req_id].ret;
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
map->passive.status = PVCALLS_STATUS_BIND;
pvcalls_exit();
return 0;
}
int pvcalls_front_listen(struct socket *sock, int backlog)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map;
struct xen_pvcalls_request *req;
int notify, req_id, ret;
pvcalls_enter();
if (!pvcalls_front_dev) {
pvcalls_exit();
return -ENOTCONN;
}
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
map = (struct sock_mapping *) sock->sk->sk_send_head;
if (!map) {
pvcalls_exit();
return -ENOTSOCK;
}
if (map->passive.status != PVCALLS_STATUS_BIND) {
pvcalls_exit();
return -EOPNOTSUPP;
}
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
pvcalls_exit();
return ret;
}
req = RING_GET_REQUEST(&bedata->ring, req_id);
req->req_id = req_id;
req->cmd = PVCALLS_LISTEN;
req->u.listen.id = (uintptr_t) map;
req->u.listen.backlog = backlog;
bedata->ring.req_prod_pvt++;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
spin_unlock(&bedata->socket_lock);
if (notify)
notify_remote_via_irq(bedata->irq);
wait_event(bedata->inflight_req,
READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
/* read req_id, then the content */
smp_rmb();
ret = bedata->rsp[req_id].ret;
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
map->passive.status = PVCALLS_STATUS_LISTEN;
pvcalls_exit();
return ret;
}
int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map;
struct sock_mapping *map2 = NULL;
struct xen_pvcalls_request *req;
int notify, req_id, ret, evtchn, nonblock;
pvcalls_enter();
if (!pvcalls_front_dev) {
pvcalls_exit();
return -ENOTCONN;
}
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
map = (struct sock_mapping *) sock->sk->sk_send_head;
if (!map) {
pvcalls_exit();
return -ENOTSOCK;
}
if (map->passive.status != PVCALLS_STATUS_LISTEN) {
pvcalls_exit();
return -EINVAL;
}
nonblock = flags & SOCK_NONBLOCK;
/*
* Backend only supports 1 inflight accept request, will return
* errors for the others
*/
if (test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags)) {
req_id = READ_ONCE(map->passive.inflight_req_id);
if (req_id != PVCALLS_INVALID_ID &&
READ_ONCE(bedata->rsp[req_id].req_id) == req_id) {
map2 = map->passive.accept_map;
goto received;
}
if (nonblock) {
pvcalls_exit();
return -EAGAIN;
}
if (wait_event_interruptible(map->passive.inflight_accept_req,
!test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags))) {
pvcalls_exit();
return -EINTR;
}
}
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
pvcalls_exit();
return ret;
}
map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
if (map2 == NULL) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
pvcalls_exit();
return -ENOMEM;
}
ret = create_active(map2, &evtchn);
if (ret < 0) {
kfree(map2);
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
pvcalls_exit();
return ret;
}
list_add_tail(&map2->list, &bedata->socket_mappings);
req = RING_GET_REQUEST(&bedata->ring, req_id);
req->req_id = req_id;
req->cmd = PVCALLS_ACCEPT;
req->u.accept.id = (uintptr_t) map;
req->u.accept.ref = map2->active.ref;
req->u.accept.id_new = (uintptr_t) map2;
req->u.accept.evtchn = evtchn;
map->passive.accept_map = map2;
bedata->ring.req_prod_pvt++;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
spin_unlock(&bedata->socket_lock);
if (notify)
notify_remote_via_irq(bedata->irq);
/* We could check if we have received a response before returning. */
if (nonblock) {
WRITE_ONCE(map->passive.inflight_req_id, req_id);
pvcalls_exit();
return -EAGAIN;
}
if (wait_event_interruptible(bedata->inflight_req,
READ_ONCE(bedata->rsp[req_id].req_id) == req_id)) {
pvcalls_exit();
return -EINTR;
}
/* read req_id, then the content */
smp_rmb();
received:
map2->sock = newsock;
newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
if (!newsock->sk) {
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
map->passive.inflight_req_id = PVCALLS_INVALID_ID;
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
pvcalls_front_free_map(bedata, map2);
pvcalls_exit();
return -ENOMEM;
}
newsock->sk->sk_send_head = (void *)map2;
ret = bedata->rsp[req_id].ret;
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
map->passive.inflight_req_id = PVCALLS_INVALID_ID;
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags);
wake_up(&map->passive.inflight_accept_req);
pvcalls_exit();
return ret;
}
static unsigned int pvcalls_front_poll_passive(struct file *file,
struct pvcalls_bedata *bedata,
struct sock_mapping *map,
poll_table *wait)
{
int notify, req_id, ret;
struct xen_pvcalls_request *req;
if (test_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags)) {
uint32_t req_id = READ_ONCE(map->passive.inflight_req_id);
if (req_id != PVCALLS_INVALID_ID &&
READ_ONCE(bedata->rsp[req_id].req_id) == req_id)
return POLLIN | POLLRDNORM;
poll_wait(file, &map->passive.inflight_accept_req, wait);
return 0;
}
if (test_and_clear_bit(PVCALLS_FLAG_POLL_RET,
(void *)&map->passive.flags))
return POLLIN | POLLRDNORM;
/*
* First check RET, then INFLIGHT. No barriers necessary to
* ensure execution ordering because of the conditional
* instructions creating control dependencies.
*/
if (test_and_set_bit(PVCALLS_FLAG_POLL_INFLIGHT,
(void *)&map->passive.flags)) {
poll_wait(file, &bedata->inflight_req, wait);
return 0;
}
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
return ret;
}
req = RING_GET_REQUEST(&bedata->ring, req_id);
req->req_id = req_id;
req->cmd = PVCALLS_POLL;
req->u.poll.id = (uintptr_t) map;
bedata->ring.req_prod_pvt++;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
spin_unlock(&bedata->socket_lock);
if (notify)
notify_remote_via_irq(bedata->irq);
poll_wait(file, &bedata->inflight_req, wait);
return 0;
}
static unsigned int pvcalls_front_poll_active(struct file *file,
struct pvcalls_bedata *bedata,
struct sock_mapping *map,
poll_table *wait)
{
unsigned int mask = 0;
int32_t in_error, out_error;
struct pvcalls_data_intf *intf = map->active.ring;
out_error = intf->out_error;
in_error = intf->in_error;
poll_wait(file, &map->active.inflight_conn_req, wait);
if (pvcalls_front_write_todo(map))
mask |= POLLOUT | POLLWRNORM;
if (pvcalls_front_read_todo(map))
mask |= POLLIN | POLLRDNORM;
if (in_error != 0 || out_error != 0)
mask |= POLLERR;
return mask;
}
unsigned int pvcalls_front_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map;
int ret;
pvcalls_enter();
if (!pvcalls_front_dev) {
pvcalls_exit();
return POLLNVAL;
}
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
map = (struct sock_mapping *) sock->sk->sk_send_head;
if (!map) {
pvcalls_exit();
return POLLNVAL;
}
if (map->active_socket)
ret = pvcalls_front_poll_active(file, bedata, map, wait);
else
ret = pvcalls_front_poll_passive(file, bedata, map, wait);
pvcalls_exit();
return ret;
}
int pvcalls_front_release(struct socket *sock)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map;
int req_id, notify, ret;
struct xen_pvcalls_request *req;
if (sock->sk == NULL)
return 0;
pvcalls_enter();
if (!pvcalls_front_dev) {
pvcalls_exit();
return -EIO;
}
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
map = (struct sock_mapping *) sock->sk->sk_send_head;
if (map == NULL) {
pvcalls_exit();
return 0;
}
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
pvcalls_exit();
return ret;
}
sock->sk->sk_send_head = NULL;
req = RING_GET_REQUEST(&bedata->ring, req_id);
req->req_id = req_id;
req->cmd = PVCALLS_RELEASE;
req->u.release.id = (uintptr_t)map;
bedata->ring.req_prod_pvt++;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
spin_unlock(&bedata->socket_lock);
if (notify)
notify_remote_via_irq(bedata->irq);
wait_event(bedata->inflight_req,
READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
if (map->active_socket) {
/*
* Set in_error and wake up inflight_conn_req to force
* recvmsg waiters to exit.
*/
map->active.ring->in_error = -EBADF;
wake_up_interruptible(&map->active.inflight_conn_req);
/*
* We need to make sure that sendmsg/recvmsg on this socket have
* not started before we've cleared sk_send_head here. The
* easiest (though not optimal) way to guarantee this is to see
* that no pvcall (other than us) is in progress.
*/
while (atomic_read(&pvcalls_refcount) > 1)
cpu_relax();
pvcalls_front_free_map(bedata, map);
} else {
spin_lock(&bedata->socket_lock);
list_del(&map->list);
spin_unlock(&bedata->socket_lock);
if (READ_ONCE(map->passive.inflight_req_id) !=
PVCALLS_INVALID_ID) {
pvcalls_front_free_map(bedata,
map->passive.accept_map);
}
kfree(map);
}
WRITE_ONCE(bedata->rsp[req_id].req_id, PVCALLS_INVALID_ID);
pvcalls_exit();
return 0;
}
static const struct xenbus_device_id pvcalls_front_ids[] = {
{ "pvcalls" },
{ "" }
};
static int pvcalls_front_remove(struct xenbus_device *dev)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map = NULL, *n;
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
dev_set_drvdata(&dev->dev, NULL);
pvcalls_front_dev = NULL;
if (bedata->irq >= 0)
unbind_from_irqhandler(bedata->irq, dev);
list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
map->sock->sk->sk_send_head = NULL;
if (map->active_socket) {
map->active.ring->in_error = -EBADF;
wake_up_interruptible(&map->active.inflight_conn_req);
}
}
smp_mb();
while (atomic_read(&pvcalls_refcount) > 0)
cpu_relax();
list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
if (map->active_socket) {
/* No need to lock, refcount is 0 */
pvcalls_front_free_map(bedata, map);
} else {
list_del(&map->list);
kfree(map);
}
}
if (bedata->ref >= 0)
gnttab_end_foreign_access(bedata->ref, 0, 0);
kfree(bedata->ring.sring);
kfree(bedata);
xenbus_switch_state(dev, XenbusStateClosed);
return 0;
}
static int pvcalls_front_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int ret = -ENOMEM, evtchn, i;
unsigned int max_page_order, function_calls, len;
char *versions;
grant_ref_t gref_head = 0;
struct xenbus_transaction xbt;
struct pvcalls_bedata *bedata = NULL;
struct xen_pvcalls_sring *sring;
if (pvcalls_front_dev != NULL) {
dev_err(&dev->dev, "only one PV Calls connection supported\n");
return -EINVAL;
}
versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
if (!len)
return -EINVAL;
if (strcmp(versions, "1")) {
kfree(versions);
return -EINVAL;
}
kfree(versions);
max_page_order = xenbus_read_unsigned(dev->otherend,
"max-page-order", 0);
if (max_page_order < PVCALLS_RING_ORDER)
return -ENODEV;
function_calls = xenbus_read_unsigned(dev->otherend,
"function-calls", 0);
/* See XENBUS_FUNCTIONS_CALLS in pvcalls.h */
if (function_calls != 1)
return -ENODEV;
pr_info("%s max-page-order is %u\n", __func__, max_page_order);
bedata = kzalloc(sizeof(struct pvcalls_bedata), GFP_KERNEL);
if (!bedata)
return -ENOMEM;
dev_set_drvdata(&dev->dev, bedata);
pvcalls_front_dev = dev;
init_waitqueue_head(&bedata->inflight_req);
INIT_LIST_HEAD(&bedata->socket_mappings);
spin_lock_init(&bedata->socket_lock);
bedata->irq = -1;
bedata->ref = -1;
for (i = 0; i < PVCALLS_NR_RSP_PER_RING; i++)
bedata->rsp[i].req_id = PVCALLS_INVALID_ID;
sring = (struct xen_pvcalls_sring *) __get_free_page(GFP_KERNEL |
__GFP_ZERO);
if (!sring)
goto error;
SHARED_RING_INIT(sring);
FRONT_RING_INIT(&bedata->ring, sring, XEN_PAGE_SIZE);
ret = xenbus_alloc_evtchn(dev, &evtchn);
if (ret)
goto error;
bedata->irq = bind_evtchn_to_irqhandler(evtchn,
pvcalls_front_event_handler,
0, "pvcalls-frontend", dev);
if (bedata->irq < 0) {
ret = bedata->irq;
goto error;
}
ret = gnttab_alloc_grant_references(1, &gref_head);
if (ret < 0)
goto error;
ret = gnttab_claim_grant_reference(&gref_head);
if (ret < 0)
goto error;
bedata->ref = ret;
gnttab_grant_foreign_access_ref(bedata->ref, dev->otherend_id,
virt_to_gfn((void *)sring), 0);
again:
ret = xenbus_transaction_start(&xbt);
if (ret) {
xenbus_dev_fatal(dev, ret, "starting transaction");
goto error;
}
ret = xenbus_printf(xbt, dev->nodename, "version", "%u", 1);
if (ret)
goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "ring-ref", "%d", bedata->ref);
if (ret)
goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "port", "%u",
evtchn);
if (ret)
goto error_xenbus;
ret = xenbus_transaction_end(xbt, 0);
if (ret) {
if (ret == -EAGAIN)
goto again;
xenbus_dev_fatal(dev, ret, "completing transaction");
goto error;
}
xenbus_switch_state(dev, XenbusStateInitialised);
return 0;
error_xenbus:
xenbus_transaction_end(xbt, 1);
xenbus_dev_fatal(dev, ret, "writing xenstore");
error:
pvcalls_front_remove(dev);
return ret;
}
static void pvcalls_front_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
switch (backend_state) {
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateInitialising:
case XenbusStateInitialised:
case XenbusStateUnknown:
break;
case XenbusStateInitWait:
break;
case XenbusStateConnected:
xenbus_switch_state(dev, XenbusStateConnected);
break;
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
/* Missed the backend's CLOSING state */
/* fall through */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
}
}
static struct xenbus_driver pvcalls_front_driver = {
.ids = pvcalls_front_ids,
.probe = pvcalls_front_probe,
.remove = pvcalls_front_remove,
.otherend_changed = pvcalls_front_changed,
};
static int __init pvcalls_frontend_init(void)
{
if (!xen_domain())
return -ENODEV;
pr_info("Initialising Xen pvcalls frontend driver\n");
return xenbus_register_frontend(&pvcalls_front_driver);
}
module_init(pvcalls_frontend_init);
MODULE_DESCRIPTION("Xen PV Calls frontend driver");
MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>");
MODULE_LICENSE("GPL");
#ifndef __PVCALLS_FRONT_H__
#define __PVCALLS_FRONT_H__
#include <linux/net.h>
int pvcalls_front_socket(struct socket *sock);
int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
int addr_len, int flags);
int pvcalls_front_bind(struct socket *sock,
struct sockaddr *addr,
int addr_len);
int pvcalls_front_listen(struct socket *sock, int backlog);
int pvcalls_front_accept(struct socket *sock,
struct socket *newsock,
int flags);
int pvcalls_front_sendmsg(struct socket *sock,
struct msghdr *msg,
size_t len);
int pvcalls_front_recvmsg(struct socket *sock,
struct msghdr *msg,
size_t len,
int flags);
unsigned int pvcalls_front_poll(struct file *file,
struct socket *sock,
poll_table *wait);
int pvcalls_front_release(struct socket *sock);
#endif
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/math64.h> #include <linux/math64.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/slab.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
...@@ -20,6 +21,8 @@ ...@@ -20,6 +21,8 @@
/* runstate info updated by Xen */ /* runstate info updated by Xen */
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
static DEFINE_PER_CPU(u64[4], old_runstate_time);
/* return an consistent snapshot of 64-bit time/counter value */ /* return an consistent snapshot of 64-bit time/counter value */
static u64 get64(const u64 *p) static u64 get64(const u64 *p)
{ {
...@@ -48,8 +51,8 @@ static u64 get64(const u64 *p) ...@@ -48,8 +51,8 @@ static u64 get64(const u64 *p)
return ret; return ret;
} }
static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res, static void xen_get_runstate_snapshot_cpu_delta(
unsigned int cpu) struct vcpu_runstate_info *res, unsigned int cpu)
{ {
u64 state_time; u64 state_time;
struct vcpu_runstate_info *state; struct vcpu_runstate_info *state;
...@@ -67,6 +70,71 @@ static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res, ...@@ -67,6 +70,71 @@ static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res,
(state_time & XEN_RUNSTATE_UPDATE)); (state_time & XEN_RUNSTATE_UPDATE));
} }
static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res,
unsigned int cpu)
{
int i;
xen_get_runstate_snapshot_cpu_delta(res, cpu);
for (i = 0; i < 4; i++)
res->time[i] += per_cpu(old_runstate_time, cpu)[i];
}
void xen_manage_runstate_time(int action)
{
static struct vcpu_runstate_info *runstate_delta;
struct vcpu_runstate_info state;
int cpu, i;
switch (action) {
case -1: /* backup runstate time before suspend */
if (unlikely(runstate_delta))
pr_warn_once("%s: memory leak as runstate_delta is not NULL\n",
__func__);
runstate_delta = kmalloc_array(num_possible_cpus(),
sizeof(*runstate_delta),
GFP_ATOMIC);
if (unlikely(!runstate_delta)) {
pr_warn("%s: failed to allocate runstate_delta\n",
__func__);
return;
}
for_each_possible_cpu(cpu) {
xen_get_runstate_snapshot_cpu_delta(&state, cpu);
memcpy(runstate_delta[cpu].time, state.time,
sizeof(runstate_delta[cpu].time));
}
break;
case 0: /* backup runstate time after resume */
if (unlikely(!runstate_delta)) {
pr_warn("%s: cannot accumulate runstate time as runstate_delta is NULL\n",
__func__);
return;
}
for_each_possible_cpu(cpu) {
for (i = 0; i < 4; i++)
per_cpu(old_runstate_time, cpu)[i] +=
runstate_delta[cpu].time[i];
}
break;
default: /* do not accumulate runstate time for checkpointing */
break;
}
if (action != -1 && runstate_delta) {
kfree(runstate_delta);
runstate_delta = NULL;
}
}
/* /*
* Runstate accounting * Runstate accounting
*/ */
......
...@@ -379,10 +379,12 @@ static void xenbus_reset_frontend(char *fe, char *be, int be_state) ...@@ -379,10 +379,12 @@ static void xenbus_reset_frontend(char *fe, char *be, int be_state)
case XenbusStateConnected: case XenbusStateConnected:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing); xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing);
xenbus_reset_wait_for_backend(be, XenbusStateClosing); xenbus_reset_wait_for_backend(be, XenbusStateClosing);
/* fall through */
case XenbusStateClosing: case XenbusStateClosing:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed); xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed);
xenbus_reset_wait_for_backend(be, XenbusStateClosed); xenbus_reset_wait_for_backend(be, XenbusStateClosed);
/* fall through */
case XenbusStateClosed: case XenbusStateClosed:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising); xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising);
......
...@@ -174,10 +174,13 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr, ...@@ -174,10 +174,13 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
unmap->dev_bus_addr = 0; unmap->dev_bus_addr = 0;
} }
int arch_gnttab_init(unsigned long nr_shared); int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes, int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes, unsigned long max_nr_gframes,
void **__shared); void **__shared);
int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
grant_status_t **__shared);
void arch_gnttab_unmap(void *shared, unsigned long nr_gframes); void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
struct grant_frames { struct grant_frames {
......
...@@ -178,4 +178,46 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info); ...@@ -178,4 +178,46 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info);
/* Send an NMI to the specified VCPU. @extra_arg == NULL. */ /* Send an NMI to the specified VCPU. @extra_arg == NULL. */
#define VCPUOP_send_nmi 11 #define VCPUOP_send_nmi 11
/*
* Get the physical ID information for a pinned vcpu's underlying physical
* processor. The physical ID informmation is architecture-specific.
* On x86: id[31:0]=apic_id, id[63:32]=acpi_id.
* This command returns -EINVAL if it is not a valid operation for this VCPU.
*/
#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */
struct vcpu_get_physid {
uint64_t phys_id;
};
DEFINE_GUEST_HANDLE_STRUCT(vcpu_get_physid);
#define xen_vcpu_physid_to_x86_apicid(physid) ((uint32_t)(physid))
#define xen_vcpu_physid_to_x86_acpiid(physid) ((uint32_t)((physid) >> 32))
/*
* Register a memory location to get a secondary copy of the vcpu time
* parameters. The master copy still exists as part of the vcpu shared
* memory area, and this secondary copy is updated whenever the master copy
* is updated (and using the same versioning scheme for synchronisation).
*
* The intent is that this copy may be mapped (RO) into userspace so
* that usermode can compute system time using the time info and the
* tsc. Usermode will see an array of vcpu_time_info structures, one
* for each vcpu, and choose the right one by an existing mechanism
* which allows it to get the current vcpu number (such as via a
* segment limit). It can then apply the normal algorithm to compute
* system time from the tsc.
*
* @extra_arg == pointer to vcpu_register_time_info_memory_area structure.
*/
#define VCPUOP_register_vcpu_time_memory_area 13
DEFINE_GUEST_HANDLE_STRUCT(vcpu_time_info);
struct vcpu_register_time_memory_area {
union {
GUEST_HANDLE(vcpu_time_info) h;
struct pvclock_vcpu_time_info *v;
uint64_t p;
} addr;
};
DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_time_memory_area);
#endif /* __XEN_PUBLIC_VCPU_H__ */ #endif /* __XEN_PUBLIC_VCPU_H__ */
...@@ -33,6 +33,7 @@ void xen_resume_notifier_unregister(struct notifier_block *nb); ...@@ -33,6 +33,7 @@ void xen_resume_notifier_unregister(struct notifier_block *nb);
bool xen_vcpu_stolen(int vcpu); bool xen_vcpu_stolen(int vcpu);
void xen_setup_runstate_info(int cpu); void xen_setup_runstate_info(int cpu);
void xen_time_setup_guest(void); void xen_time_setup_guest(void);
void xen_manage_runstate_time(int action);
void xen_get_runstate_snapshot(struct vcpu_runstate_info *res); void xen_get_runstate_snapshot(struct vcpu_runstate_info *res);
u64 xen_steal_clock(int cpu); u64 xen_steal_clock(int cpu);
...@@ -104,6 +105,8 @@ int xen_remap_domain_gfn_range(struct vm_area_struct *vma, ...@@ -104,6 +105,8 @@ int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
struct page **pages); struct page **pages);
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages); int numpgs, struct page **pages);
#ifdef CONFIG_XEN_AUTO_XLATE
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t *gfn, int nr, xen_pfn_t *gfn, int nr,
...@@ -112,6 +115,28 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, ...@@ -112,6 +115,28 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
struct page **pages); struct page **pages);
int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
int nr, struct page **pages); int nr, struct page **pages);
#else
/*
* These two functions are called from arch/x86/xen/mmu.c and so stubs
* are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE.
*/
static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot,
unsigned int domid,
struct page **pages)
{
return -EOPNOTSUPP;
}
static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
int nr, struct page **pages)
{
return -EOPNOTSUPP;
}
#endif
int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr, int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
unsigned long nr_grant_frames); unsigned long nr_grant_frames);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment