Commit 27d2a8b9 authored by Linus Torvalds's avatar Linus Torvalds

Merge branches 'stable/ia64', 'stable/blkfront-cleanup' and 'stable/cleanup'...

Merge branches 'stable/ia64', 'stable/blkfront-cleanup' and 'stable/cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen

* 'stable/ia64' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen: ia64 build broken due to "xen: switch to new schedop hypercall by default."

* 'stable/blkfront-cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen: Union the blkif_request request specific fields

* 'stable/cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen: annotate functions which only call into __init at start of day
  xen p2m: annotate variable which appears unused
  xen: events: mark cpu_evtchn_mask_p as __refdata
......@@ -107,7 +107,7 @@ extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
static inline int
xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
{
return _hypercall2(int, sched_op_new, cmd, arg);
return _hypercall2(int, sched_op, cmd, arg);
}
static inline long
......
......@@ -1292,7 +1292,7 @@ static int init_hvm_pv_info(int *major, int *minor)
return 0;
}
void xen_hvm_init_shared_info(void)
void __ref xen_hvm_init_shared_info(void)
{
int cpu;
struct xen_add_to_physmap xatp;
......
......@@ -266,7 +266,7 @@ static void p2m_init(unsigned long *p2m)
* - After resume we're called from within stop_machine, but the mfn
* tree should alreay be completely allocated.
*/
void xen_build_mfn_list_list(void)
void __ref xen_build_mfn_list_list(void)
{
unsigned long pfn;
......@@ -654,7 +654,7 @@ int m2p_add_override(unsigned long mfn, struct page *page)
{
unsigned long flags;
unsigned long pfn;
unsigned long address;
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
......@@ -688,7 +688,7 @@ int m2p_remove_override(struct page *page)
unsigned long flags;
unsigned long mfn;
unsigned long pfn;
unsigned long address;
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
......
......@@ -281,7 +281,7 @@ static int blkif_queue_request(struct request *req)
info->shadow[id].request = req;
ring_req->id = id;
ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
ring_req->handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
......@@ -317,7 +317,7 @@ static int blkif_queue_request(struct request *req)
rq_data_dir(req) );
info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
ring_req->seg[i] =
ring_req->u.rw.seg[i] =
(struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
......@@ -615,7 +615,7 @@ static void blkif_completion(struct blk_shadow *s)
{
int i;
for (i = 0; i < s->req.nr_segments; i++)
gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
......@@ -932,7 +932,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Rewrite any grant references invalidated by susp/resume. */
for (j = 0; j < req->nr_segments; j++)
gnttab_grant_foreign_access_ref(
req->seg[j].gref,
req->u.rw.seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->id].frame[j]),
rq_data_dir(info->shadow[req->id].request));
......
......@@ -114,7 +114,7 @@ struct cpu_evtchn_s {
static __initdata struct cpu_evtchn_s init_evtchn_mask = {
.bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
};
static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
static struct cpu_evtchn_s __refdata *cpu_evtchn_mask_p = &init_evtchn_mask;
static inline unsigned long *cpu_evtchn_mask(int cpu)
{
......
......@@ -51,11 +51,7 @@ typedef uint64_t blkif_sector_t;
*/
#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
struct blkif_request {
uint8_t operation; /* BLKIF_OP_??? */
uint8_t nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */
uint64_t id; /* private guest value, echoed in resp */
struct blkif_request_rw {
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
struct blkif_request_segment {
grant_ref_t gref; /* reference to I/O buffer frame */
......@@ -65,6 +61,16 @@ struct blkif_request {
} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
struct blkif_request {
uint8_t operation; /* BLKIF_OP_??? */
uint8_t nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */
uint64_t id; /* private guest value, echoed in resp */
union {
struct blkif_request_rw rw;
} u;
};
struct blkif_response {
uint64_t id; /* copied from request */
uint8_t operation; /* copied from request */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment