Commit 821fe947 authored by Dave Jones's avatar Dave Jones Committed by Linus Torvalds

[PATCH] gcc4 compile fix for recent ia64 xpc changes

Gcc4 doesn't like volatile casts as lvalues.  Make the structure members
volatile instead.
Signed-off-by: default avatarDave Jones <davej@redhat.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 7e0fa31d
......@@ -87,7 +87,7 @@ struct xpc_rsvd_page {
u8 partid; /* partition ID from SAL */
u8 version;
u8 pad[6]; /* pad to u64 align */
u64 vars_pa;
volatile u64 vars_pa;
u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned;
u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned;
};
......@@ -138,7 +138,7 @@ struct xpc_vars {
* occupies half a cacheline.
*/
struct xpc_vars_part {
u64 magic;
volatile u64 magic;
u64 openclose_args_pa; /* physical address of open and close args */
u64 GPs_pa; /* physical address of Get/Put values */
......@@ -185,8 +185,8 @@ struct xpc_vars_part {
* Define a Get/Put value pair (pointers) used with a message queue.
*/
struct xpc_gp {
s64 get; /* Get value */
s64 put; /* Put value */
volatile s64 get; /* Get value */
volatile s64 put; /* Put value */
};
#define XPC_GP_SIZE \
......@@ -231,7 +231,7 @@ struct xpc_openclose_args {
*/
struct xpc_notify {
struct semaphore sema; /* notify semaphore */
u8 type; /* type of notification */
volatile u8 type; /* type of notification */
/* the following two fields are only used if type == XPC_N_CALL */
xpc_notify_func func; /* user's notify function */
......@@ -439,7 +439,7 @@ struct xpc_partition {
/* XPC infrastructure referencing and teardown control */
u8 setup_state; /* infrastructure setup state */
volatile u8 setup_state; /* infrastructure setup state */
wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
atomic_t references; /* #of references to infrastructure */
......
......@@ -209,7 +209,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
* With the setting of the partition setup_state to XPC_P_SETUP, we're
* declaring that this partition is ready to go.
*/
(volatile u8) part->setup_state = XPC_P_SETUP;
part->setup_state = XPC_P_SETUP;
/*
......@@ -227,7 +227,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
xpc_vars_part[partid].IPI_phys_cpuid =
cpu_physical_id(smp_processor_id());
xpc_vars_part[partid].nchannels = part->nchannels;
(volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
return xpcSuccess;
}
......@@ -355,7 +355,7 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
/* let the other side know that we've pulled their variables */
(volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
}
if (pulled_entry->magic == XPC_VP_MAGIC1) {
......@@ -1183,7 +1183,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
*/
xpc_clear_local_msgqueue_flags(ch);
(volatile s64) ch->w_remote_GP.get = ch->remote_GP.get;
ch->w_remote_GP.get = ch->remote_GP.get;
dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
"channel=%d\n", ch->w_remote_GP.get, ch->partid,
......@@ -1211,7 +1211,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
*/
xpc_clear_remote_msgqueue_flags(ch);
(volatile s64) ch->w_remote_GP.put = ch->remote_GP.put;
ch->w_remote_GP.put = ch->remote_GP.put;
dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
"channel=%d\n", ch->w_remote_GP.put, ch->partid,
......@@ -1875,7 +1875,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
notify = &ch->notify_queue[msg_number % ch->local_nentries];
notify->func = func;
notify->key = key;
(volatile u8) notify->type = notify_type;
notify->type = notify_type;
// >>> is a mb() needed here?
......
......@@ -253,7 +253,7 @@ xpc_rsvd_page_init(void)
* This signifies to the remote partition that our reserved
* page is initialized.
*/
(volatile u64) rp->vars_pa = __pa(xpc_vars);
rp->vars_pa = __pa(xpc_vars);
return rp;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment