Commit a701f370 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-5.18-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - A bunch of minor cleanups

 - A fix for kexec in Xen dom0 when executed on a high cpu number

 - A fix for resuming after suspend of a Xen guest with assigned PCI
   devices

 - A fix for a crash due to not disabled preemption when resuming as Xen
   dom0

* tag 'for-linus-5.18-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen: fix is_xen_pmu()
  xen: don't hang when resuming PCI device
  arch:x86:xen: Remove unnecessary assignment in xen_apic_read()
  xen/grant-table: remove readonly parameter from functions
  xen/grant-table: remove gnttab_*transfer*() functions
  drivers/xen: use helper macro __ATTR_RW
  x86/xen: Fix kerneldoc warning
  xen: delay xen_hvm_init_time_ops() if kdump is boot on vcpu>=32
  xen: use time_is_before_eq_jiffies() instead of open coding it
parents 72030621 de2ae403
......@@ -51,7 +51,7 @@ static u32 xen_apic_read(u32 reg)
.interface_version = XENPF_INTERFACE_VERSION,
.u.pcpu_info.xen_cpuid = 0,
};
int ret = 0;
int ret;
/* Shouldn't need this as APIC is turned off for PV, and we only
* get called on the bootup processor. But just in case. */
......
......@@ -506,10 +506,7 @@ irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
return ret;
}
bool is_xen_pmu(int cpu)
{
return (get_xenpmu_data() != NULL);
}
bool is_xen_pmu;
void xen_pmu_init(int cpu)
{
......@@ -520,7 +517,7 @@ void xen_pmu_init(int cpu)
BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE);
if (xen_hvm_domain())
if (xen_hvm_domain() || (cpu != 0 && !is_xen_pmu))
return;
xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL);
......@@ -541,7 +538,8 @@ void xen_pmu_init(int cpu)
per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data;
per_cpu(xenpmu_shared, cpu).flags = 0;
if (cpu == 0) {
if (!is_xen_pmu) {
is_xen_pmu = true;
perf_register_guest_info_callbacks(&xen_guest_cbs);
xen_pmu_arch_init();
}
......
......@@ -4,6 +4,8 @@
#include <xen/interface/xenpmu.h>
extern bool is_xen_pmu;
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
#ifdef CONFIG_XEN_HAVE_VPMU
void xen_pmu_init(int cpu);
......@@ -12,7 +14,6 @@ void xen_pmu_finish(int cpu);
static inline void xen_pmu_init(int cpu) {}
static inline void xen_pmu_finish(int cpu) {}
#endif
bool is_xen_pmu(int cpu);
bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
int pmu_apic_update(uint32_t reg);
......
......@@ -719,7 +719,7 @@ static void __init xen_reserve_xen_mfnlist(void)
}
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
* xen_memory_setup - Hook for machine specific memory setup.
**/
char * __init xen_memory_setup(void)
{
......
......@@ -19,6 +19,12 @@ static void __init xen_hvm_smp_prepare_boot_cpu(void)
*/
xen_vcpu_setup(0);
/*
* Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS.
* Refer to comments in xen_hvm_init_time_ops().
*/
xen_hvm_init_time_ops();
/*
* The alternative logic (which patches the unlock/lock) runs before
* the smp bootup up code is activated. Hence we need to set this up
......
......@@ -129,7 +129,7 @@ int xen_smp_intr_init_pv(unsigned int cpu)
per_cpu(xen_irq_work, cpu).irq = rc;
per_cpu(xen_irq_work, cpu).name = callfunc_name;
if (is_xen_pmu(cpu)) {
if (is_xen_pmu) {
pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
xen_pmu_irq_handler,
......
......@@ -558,6 +558,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
void __init xen_hvm_init_time_ops(void)
{
static bool hvm_time_initialized;
if (hvm_time_initialized)
return;
/*
* vector callback is needed otherwise we cannot receive interrupts
* on cpu > 0 and at this point we don't know how many cpus are
......@@ -567,7 +572,22 @@ void __init xen_hvm_init_time_ops(void)
return;
if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
pr_info("Xen doesn't support pvclock on HVM, disable pv timer");
pr_info_once("Xen doesn't support pvclock on HVM, disable pv timer");
return;
}
/*
* Only MAX_VIRT_CPUS 'vcpu_info' are embedded inside 'shared_info'.
* The __this_cpu_read(xen_vcpu) is still NULL when Xen HVM guest
* boots on vcpu >= MAX_VIRT_CPUS (e.g., kexec), To access
* __this_cpu_read(xen_vcpu) via xen_clocksource_read() will panic.
*
* The xen_hvm_init_time_ops() should be called again later after
* __this_cpu_read(xen_vcpu) is available.
*/
if (!__this_cpu_read(xen_vcpu)) {
pr_info("Delay xen_init_time_common() as kernel is running on vcpu=%d\n",
xen_vcpu_nr(0));
return;
}
......@@ -577,6 +597,8 @@ void __init xen_hvm_init_time_ops(void)
x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
x86_platform.set_wallclock = xen_set_wallclock;
hvm_time_initialized = true;
}
#endif
......
......@@ -1223,7 +1223,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
list_del(&persistent_gnt->node);
if (persistent_gnt->gref != GRANT_INVALID_REF) {
gnttab_end_foreign_access(persistent_gnt->gref,
0, 0UL);
0UL);
rinfo->persistent_gnts_c--;
}
if (info->feature_persistent)
......@@ -1246,7 +1246,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
rinfo->shadow[i].req.u.rw.nr_segments;
for (j = 0; j < segs; j++) {
persistent_gnt = rinfo->shadow[i].grants_used[j];
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
gnttab_end_foreign_access(persistent_gnt->gref, 0UL);
if (info->feature_persistent)
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
......@@ -1261,7 +1261,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
for (j = 0; j < INDIRECT_GREFS(segs); j++) {
persistent_gnt = rinfo->shadow[i].indirect_grants[j];
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
gnttab_end_foreign_access(persistent_gnt->gref, 0UL);
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
}
......@@ -1284,7 +1284,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
/* Free resources associated with old device channel. */
for (i = 0; i < info->nr_ring_pages; i++) {
if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0);
gnttab_end_foreign_access(rinfo->ring_ref[i], 0);
rinfo->ring_ref[i] = GRANT_INVALID_REF;
}
}
......
......@@ -332,7 +332,7 @@ static void ring_free(struct tpm_private *priv)
return;
if (priv->ring_ref)
gnttab_end_foreign_access(priv->ring_ref, 0,
gnttab_end_foreign_access(priv->ring_ref,
(unsigned long)priv->shr);
else
free_page((unsigned long)priv->shr);
......
......@@ -148,7 +148,7 @@ static void evtchnl_free(struct xen_drm_front_info *front_info,
/* end access and free the page */
if (evtchnl->gref != GRANT_INVALID_REF)
gnttab_end_foreign_access(evtchnl->gref, 0, page);
gnttab_end_foreign_access(evtchnl->gref, page);
memset(evtchnl, 0, sizeof(*evtchnl));
}
......
......@@ -481,7 +481,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
error_evtchan:
xenbus_free_evtchn(dev, evtchn);
error_grant:
gnttab_end_foreign_access(info->gref, 0, 0UL);
gnttab_end_foreign_access(info->gref, 0UL);
info->gref = -1;
return ret;
}
......@@ -492,7 +492,7 @@ static void xenkbd_disconnect_backend(struct xenkbd_info *info)
unbind_from_irqhandler(info->irq, info);
info->irq = -1;
if (info->gref >= 0)
gnttab_end_foreign_access(info->gref, 0, 0UL);
gnttab_end_foreign_access(info->gref, 0UL);
info->gref = -1;
}
......
......@@ -425,7 +425,7 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue)
skb = queue->tx_skbs[id];
queue->tx_skbs[id] = NULL;
if (unlikely(!gnttab_end_foreign_access_ref(
queue->grant_tx_ref[id], GNTMAP_readonly))) {
queue->grant_tx_ref[id]))) {
dev_alert(dev,
"Grant still in use by backend domain\n");
goto err;
......@@ -1029,7 +1029,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
goto next;
}
if (!gnttab_end_foreign_access_ref(ref, 0)) {
if (!gnttab_end_foreign_access_ref(ref)) {
dev_alert(dev,
"Grant still in use by backend domain\n");
queue->info->broken = true;
......@@ -1388,7 +1388,6 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
queue->tx_skbs[i] = NULL;
get_page(queue->grant_tx_page[i]);
gnttab_end_foreign_access(queue->grant_tx_ref[i],
GNTMAP_readonly,
(unsigned long)page_address(queue->grant_tx_page[i]));
queue->grant_tx_page[i] = NULL;
queue->grant_tx_ref[i] = GRANT_INVALID_REF;
......@@ -1421,7 +1420,7 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
* foreign access is ended (which may be deferred).
*/
get_page(page);
gnttab_end_foreign_access(ref, 0,
gnttab_end_foreign_access(ref,
(unsigned long)page_address(page));
queue->grant_rx_ref[id] = GRANT_INVALID_REF;
......@@ -1763,7 +1762,7 @@ static void xennet_end_access(int ref, void *page)
{
/* This frees the page as a side-effect */
if (ref != GRANT_INVALID_REF)
gnttab_end_foreign_access(ref, 0, (unsigned long)page);
gnttab_end_foreign_access(ref, (unsigned long)page);
}
static void xennet_disconnect_backend(struct netfront_info *info)
......@@ -1980,14 +1979,14 @@ static int setup_netfront(struct xenbus_device *dev,
*/
fail:
if (queue->rx_ring_ref != GRANT_INVALID_REF) {
gnttab_end_foreign_access(queue->rx_ring_ref, 0,
gnttab_end_foreign_access(queue->rx_ring_ref,
(unsigned long)rxs);
queue->rx_ring_ref = GRANT_INVALID_REF;
} else {
free_page((unsigned long)rxs);
}
if (queue->tx_ring_ref != GRANT_INVALID_REF) {
gnttab_end_foreign_access(queue->tx_ring_ref, 0,
gnttab_end_foreign_access(queue->tx_ring_ref,
(unsigned long)txs);
queue->tx_ring_ref = GRANT_INVALID_REF;
} else {
......
......@@ -755,7 +755,7 @@ static void free_pdev(struct pcifront_device *pdev)
xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
if (pdev->gnt_ref != INVALID_GRANT_REF)
gnttab_end_foreign_access(pdev->gnt_ref, 0 /* r/w page */,
gnttab_end_foreign_access(pdev->gnt_ref,
(unsigned long)pdev->sh_info);
else
free_page((unsigned long)pdev->sh_info);
......
......@@ -757,7 +757,7 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info)
free_irq:
unbind_from_irqhandler(info->irq, info);
free_gnttab:
gnttab_end_foreign_access(info->ring_ref, 0,
gnttab_end_foreign_access(info->ring_ref,
(unsigned long)info->ring.sring);
return err;
......@@ -766,7 +766,7 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info)
static void scsifront_free_ring(struct vscsifrnt_info *info)
{
unbind_from_irqhandler(info->irq, info);
gnttab_end_foreign_access(info->ring_ref, 0,
gnttab_end_foreign_access(info->ring_ref,
(unsigned long)info->ring.sring);
}
......
......@@ -1101,14 +1101,14 @@ static void xenhcd_destroy_rings(struct xenhcd_info *info)
info->irq = 0;
if (info->urb_ring_ref != GRANT_INVALID_REF) {
gnttab_end_foreign_access(info->urb_ring_ref, 0,
gnttab_end_foreign_access(info->urb_ring_ref,
(unsigned long)info->urb_ring.sring);
info->urb_ring_ref = GRANT_INVALID_REF;
}
info->urb_ring.sring = NULL;
if (info->conn_ring_ref != GRANT_INVALID_REF) {
gnttab_end_foreign_access(info->conn_ring_ref, 0,
gnttab_end_foreign_access(info->conn_ring_ref,
(unsigned long)info->conn_ring.sring);
info->conn_ring_ref = GRANT_INVALID_REF;
}
......
......@@ -59,6 +59,7 @@
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
#include <asm/page.h>
#include <asm/tlb.h>
......@@ -794,7 +795,7 @@ static int __init balloon_wait_finish(void)
if (balloon_state == BP_ECANCELED) {
pr_warn_once("Initial ballooning failed, %ld pages need to be freed.\n",
-credit);
if (jiffies - last_changed >= HZ * balloon_boot_timeout)
if (time_is_before_eq_jiffies(last_changed + HZ * balloon_boot_timeout))
panic("Initial ballooning failed!\n");
}
......
......@@ -192,7 +192,7 @@ static void __del_gref(struct gntalloc_gref *gref)
if (gref->gref_id) {
if (gref->page) {
addr = (unsigned long)page_to_virt(gref->page);
gnttab_end_foreign_access(gref->gref_id, 0, addr);
gnttab_end_foreign_access(gref->gref_id, addr);
} else
gnttab_free_grant_reference(gref->gref_id);
}
......
......@@ -533,7 +533,7 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
for (i = 0; i < count; i++)
if (refs[i] != GRANT_INVALID_REF)
gnttab_end_foreign_access(refs[i], 0, 0UL);
gnttab_end_foreign_access(refs[i], 0UL);
}
static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
......
......@@ -109,7 +109,7 @@ struct gnttab_ops {
void (*unmap_frames)(void);
/*
* Introducing a valid entry into the grant table, granting the frame of
* this grant entry to domain for accessing or transfering. Ref
* this grant entry to domain for accessing. Ref
* parameter is reference of this introduced grant entry, domid is id of
* granted domain, frame is the page frame to be granted, and flags is
* status of the grant entry to be updated.
......@@ -118,21 +118,12 @@ struct gnttab_ops {
unsigned long frame, unsigned flags);
/*
* Stop granting a grant entry to domain for accessing. Ref parameter is
* reference of a grant entry whose grant access will be stopped,
* readonly is not in use in this function. If the grant entry is
* currently mapped for reading or writing, just return failure(==0)
* directly and don't tear down the grant access. Otherwise, stop grant
* access for this entry and return success(==1).
* reference of a grant entry whose grant access will be stopped.
* If the grant entry is currently mapped for reading or writing, just
* return failure(==0) directly and don't tear down the grant access.
* Otherwise, stop grant access for this entry and return success(==1).
*/
int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
/*
* Stop granting a grant entry to domain for transfer. Ref parameter is
* reference of a grant entry whose grant transfer will be stopped. If
* tranfer has not started, just reclaim the grant entry and return
* failure(==0). Otherwise, wait for the transfer to complete and then
* return the frame.
*/
unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
int (*end_foreign_access_ref)(grant_ref_t ref);
/*
* Read the frame number related to a given grant reference.
*/
......@@ -230,10 +221,7 @@ static void put_free_entry(grant_ref_t ref)
* Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
* Introducing a valid entry into the grant table:
* 1. Write ent->domid.
* 2. Write ent->frame:
* GTF_permit_access: Frame to which access is permitted.
* GTF_accept_transfer: Pseudo-phys frame slot being filled by new
* frame, or zero if none.
* 2. Write ent->frame: Frame to which access is permitted.
* 3. Write memory barrier (WMB).
* 4. Write ent->flags, inc. valid type.
*/
......@@ -281,7 +269,7 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref)
{
u16 flags, nflags;
u16 *pflags;
......@@ -297,7 +285,7 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
return 1;
}
static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref)
{
gnttab_shared.v2[ref].hdr.flags = 0;
mb(); /* Concurrent access by hypervisor. */
......@@ -320,14 +308,14 @@ static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
return 1;
}
static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref)
{
return gnttab_interface->end_foreign_access_ref(ref, readonly);
return gnttab_interface->end_foreign_access_ref(ref);
}
int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
int gnttab_end_foreign_access_ref(grant_ref_t ref)
{
if (_gnttab_end_foreign_access_ref(ref, readonly))
if (_gnttab_end_foreign_access_ref(ref))
return 1;
pr_warn("WARNING: g.e. %#x still in use!\n", ref);
return 0;
......@@ -347,7 +335,6 @@ static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
struct deferred_entry {
struct list_head list;
grant_ref_t ref;
bool ro;
uint16_t warn_delay;
struct page *page;
};
......@@ -371,7 +358,7 @@ static void gnttab_handle_deferred(struct timer_list *unused)
break;
list_del(&entry->list);
spin_unlock_irqrestore(&gnttab_list_lock, flags);
if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
if (_gnttab_end_foreign_access_ref(entry->ref)) {
put_free_entry(entry->ref);
pr_debug("freeing g.e. %#x (pfn %#lx)\n",
entry->ref, page_to_pfn(entry->page));
......@@ -397,8 +384,7 @@ static void gnttab_handle_deferred(struct timer_list *unused)
spin_unlock_irqrestore(&gnttab_list_lock, flags);
}
static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
struct page *page)
static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
{
struct deferred_entry *entry;
gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
......@@ -416,7 +402,6 @@ static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
unsigned long flags;
entry->ref = ref;
entry->ro = readonly;
entry->page = page;
entry->warn_delay = 60;
spin_lock_irqsave(&gnttab_list_lock, flags);
......@@ -434,7 +419,7 @@ static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
int gnttab_try_end_foreign_access(grant_ref_t ref)
{
int ret = _gnttab_end_foreign_access_ref(ref, 0);
int ret = _gnttab_end_foreign_access_ref(ref);
if (ret)
put_free_entry(ref);
......@@ -443,114 +428,16 @@ int gnttab_try_end_foreign_access(grant_ref_t ref)
}
EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
unsigned long page)
void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page)
{
if (gnttab_try_end_foreign_access(ref)) {
if (page != 0)
put_page(virt_to_page(page));
} else
gnttab_add_deferred(ref, readonly,
page ? virt_to_page(page) : NULL);
gnttab_add_deferred(ref, page ? virt_to_page(page) : NULL);
}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
{
int ref;
ref = get_free_entries(1);
if (unlikely(ref < 0))
return -ENOSPC;
gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
return ref;
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
unsigned long pfn)
{
gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
{
unsigned long frame;
u16 flags;
u16 *pflags;
pflags = &gnttab_shared.v1[ref].flags;
/*
* If a transfer is not even yet started, try to reclaim the grant
* reference and return failure (== 0).
*/
while (!((flags = *pflags) & GTF_transfer_committed)) {
if (sync_cmpxchg(pflags, flags, 0) == flags)
return 0;
cpu_relax();
}
/* If a transfer is in progress then wait until it is completed. */
while (!(flags & GTF_transfer_completed)) {
flags = *pflags;
cpu_relax();
}
rmb(); /* Read the frame number /after/ reading completion status. */
frame = gnttab_shared.v1[ref].frame;
BUG_ON(frame == 0);
return frame;
}
static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
{
unsigned long frame;
u16 flags;
u16 *pflags;
pflags = &gnttab_shared.v2[ref].hdr.flags;
/*
* If a transfer is not even yet started, try to reclaim the grant
* reference and return failure (== 0).
*/
while (!((flags = *pflags) & GTF_transfer_committed)) {
if (sync_cmpxchg(pflags, flags, 0) == flags)
return 0;
cpu_relax();
}
/* If a transfer is in progress then wait until it is completed. */
while (!(flags & GTF_transfer_completed)) {
flags = *pflags;
cpu_relax();
}
rmb(); /* Read the frame number /after/ reading completion status. */
frame = gnttab_shared.v2[ref].full_page.frame;
BUG_ON(frame == 0);
return frame;
}
unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
{
return gnttab_interface->end_foreign_transfer_ref(ref);
}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
{
unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
put_free_entry(ref);
return frame;
}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
void gnttab_free_grant_reference(grant_ref_t ref)
{
put_free_entry(ref);
......@@ -1423,7 +1310,6 @@ static const struct gnttab_ops gnttab_v1_ops = {
.unmap_frames = gnttab_unmap_frames_v1,
.update_entry = gnttab_update_entry_v1,
.end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
.end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
.read_frame = gnttab_read_frame_v1,
};
......@@ -1435,7 +1321,6 @@ static const struct gnttab_ops gnttab_v2_ops = {
.unmap_frames = gnttab_unmap_frames_v2,
.update_entry = gnttab_update_entry_v2,
.end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
.end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
.read_frame = gnttab_read_frame_v2,
};
......
......@@ -141,6 +141,8 @@ static void do_suspend(void)
raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
xen_arch_resume();
dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
if (err) {
......@@ -148,8 +150,6 @@ static void do_suspend(void)
si.cancelled = 1;
}
xen_arch_resume();
out_resume:
if (!si.cancelled)
xs_resume();
......
......@@ -238,8 +238,8 @@ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
spin_unlock(&bedata->socket_lock);
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
gnttab_end_foreign_access(map->active.ring->ref[i], 0, 0);
gnttab_end_foreign_access(map->active.ref, 0, 0);
gnttab_end_foreign_access(map->active.ring->ref[i], 0);
gnttab_end_foreign_access(map->active.ref, 0);
free_page((unsigned long)map->active.ring);
kfree(map);
......@@ -1117,7 +1117,7 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
}
}
if (bedata->ref != -1)
gnttab_end_foreign_access(bedata->ref, 0, 0);
gnttab_end_foreign_access(bedata->ref, 0);
kfree(bedata->ring.sring);
kfree(bedata);
xenbus_switch_state(dev, XenbusStateClosed);
......
......@@ -22,11 +22,10 @@
#endif
#define HYPERVISOR_ATTR_RO(_name) \
static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name)
static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name)
#define HYPERVISOR_ATTR_RW(_name) \
static struct hyp_sysfs_attr _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
static struct hyp_sysfs_attr _name##_attr = __ATTR_RW(_name)
struct hyp_sysfs_attr {
struct attribute attr;
......
......@@ -143,8 +143,7 @@ void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
for (i = 0; i < buf->num_grefs; i++)
if (buf->grefs[i] != GRANT_INVALID_REF)
gnttab_end_foreign_access(buf->grefs[i],
0, 0UL);
gnttab_end_foreign_access(buf->grefs[i], 0UL);
}
kfree(buf->grefs);
kfree(buf->directory);
......
......@@ -97,7 +97,7 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
* longer in use. Return 1 if the grant entry was freed, 0 if it is still in
* use.
*/
int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
int gnttab_end_foreign_access_ref(grant_ref_t ref);
/*
* Eventually end access through the given grant reference, and once that
......@@ -114,8 +114,7 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
* gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
* via free_pages_exact()) in order to avoid high order pages.
*/
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
unsigned long page);
void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page);
/*
* End access through the given grant reference, iff the grant entry is
......@@ -125,11 +124,6 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
*/
int gnttab_try_end_foreign_access(grant_ref_t ref);
int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
/*
* operations on reserved batches of grant references
*/
......@@ -162,9 +156,6 @@ static inline void gnttab_page_grant_foreign_access_ref_one(
readonly);
}
void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
unsigned long pfn);
static inline void
gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
uint32_t flags, grant_ref_t ref, domid_t domid)
......
......@@ -279,13 +279,13 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
grant_ref_t ref;
ref = priv->rings[i].intf->ref[j];
gnttab_end_foreign_access(ref, 0, 0);
gnttab_end_foreign_access(ref, 0);
}
free_pages_exact(priv->rings[i].data.in,
1UL << (priv->rings[i].intf->ring_order +
XEN_PAGE_SHIFT));
}
gnttab_end_foreign_access(priv->rings[i].ref, 0, 0);
gnttab_end_foreign_access(priv->rings[i].ref, 0);
free_page((unsigned long)priv->rings[i].intf);
}
kfree(priv->rings);
......@@ -353,10 +353,10 @@ static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
out:
if (bytes) {
for (i--; i >= 0; i--)
gnttab_end_foreign_access(ring->intf->ref[i], 0, 0);
gnttab_end_foreign_access(ring->intf->ref[i], 0);
free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
}
gnttab_end_foreign_access(ring->ref, 0, 0);
gnttab_end_foreign_access(ring->ref, 0);
free_page((unsigned long)ring->intf);
return ret;
}
......
......@@ -168,7 +168,7 @@ static void evtchnl_free(struct xen_snd_front_info *front_info,
/* End access and free the page. */
if (channel->gref != GRANT_INVALID_REF)
gnttab_end_foreign_access(channel->gref, 0, page);
gnttab_end_foreign_access(channel->gref, page);
else
free_page(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment