Commit 55fa11c5 authored by David Mosberger's avatar David Mosberger Committed by David Mosberger

ia64: Change local_irq_restore() to restore only psr.i, so that it

	doesn't unexpectedly trample on the other psr bits.
parent cde28bc5
......@@ -5,7 +5,7 @@
*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999-2001 Hewlett-Packard Co.
* Copyright (C) 1999-2002 Hewlett-Packard Co.
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
*
......@@ -212,8 +212,8 @@ efi_map_pal_code (void)
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
u64 efi_desc_size;
int pal_code_count=0;
u64 mask, flags;
int pal_code_count = 0;
u64 mask, psr;
u64 vaddr;
efi_map_start = __va(ia64_boot_param->efi_memmap);
......@@ -266,10 +266,10 @@ efi_map_pal_code (void)
/*
* Cannot write to CRx with PSR.ic=1
*/
ia64_clear_ic(flags);
psr = ia64_clear_ic();
ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask,
pte_val(mk_pte_phys(md->phys_addr, PAGE_KERNEL)), IA64_GRANULE_SHIFT);
local_irq_restore(flags);
ia64_set_psr(psr);
ia64_srlz_i();
}
}
......@@ -485,7 +485,7 @@ efi_get_iobase (void)
}
u32
efi_mem_type (u64 phys_addr)
efi_mem_type (unsigned long phys_addr)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
......@@ -506,7 +506,7 @@ efi_mem_type (u64 phys_addr)
}
u64
efi_mem_attributes (u64 phys_addr)
efi_mem_attributes (unsigned long phys_addr)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
......
......@@ -265,7 +265,7 @@ put_gate_page (struct page *page, unsigned long address)
void __init
ia64_mmu_init (void *my_cpu_data)
{
unsigned long flags, rid, pta, impl_va_bits;
unsigned long psr, rid, pta, impl_va_bits;
extern void __init tlb_init (void);
#ifdef CONFIG_DISABLE_VHPT
# define VHPT_ENABLE_BIT 0
......@@ -277,7 +277,7 @@ ia64_mmu_init (void *my_cpu_data)
* Set up the kernel identity mapping for regions 6 and 5. The mapping for region
* 7 is setup up in _start().
*/
ia64_clear_ic(flags);
psr = ia64_clear_ic();
rid = ia64_rid(IA64_REGION_ID_KERNEL, __IA64_UNCACHED_OFFSET);
ia64_set_rr(__IA64_UNCACHED_OFFSET, (rid << 8) | (IA64_GRANULE_SHIFT << 2));
......@@ -291,7 +291,7 @@ ia64_mmu_init (void *my_cpu_data)
ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL)), PAGE_SHIFT);
__restore_flags(flags);
ia64_set_psr(psr);
ia64_srlz_i();
/*
......
......@@ -75,7 +75,7 @@ sgi_mcatest(void)
if (mcatest == 5) {
int zzzspec(long);
int i;
long flags, dcr, res, val, addr=0xff00000000UL;
long psr, dcr, res, val, addr=0xff00000000UL;
dcr = ia64_get_dcr();
for (i=0; i<5; i++) {
......@@ -87,11 +87,11 @@ sgi_mcatest(void)
ia64_set_dcr(dcr);
res = ia64_sn_probe_io_slot(0xff00000000UL, 8, &val);
printk("zzzspec: probe %ld, 0x%lx\n", res, val);
ia64_clear_ic(flags);
psr = ia64_clear_ic();
ia64_itc(0x2, 0xe00000ff00000000UL,
pte_val(mk_pte_phys(0xff00000000UL,
__pgprot(__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RW))), _PAGE_SIZE_256M);
local_irq_restore(flags);
ia64_set_psr(psr);
ia64_srlz_i ();
}
......
......@@ -632,14 +632,22 @@ ia64_invala (void)
asm volatile ("invala" ::: "memory");
}
static inline __u64
ia64_clear_ic (void)
{
__u64 psr;
asm volatile ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" : "=r"(psr) :: "memory");
return psr;
}
/*
* Save the processor status flags in FLAGS and then clear the interrupt collection and
* interrupt enable bits. Don't trigger any mandatory RSE references while this bit is
* off!
* Restore the psr.
*/
#define ia64_clear_ic(flags) \
asm volatile ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" \
: "=r"(flags) :: "memory");
static inline void
ia64_set_psr (__u64 psr)
{
asm volatile (";; mov psr.l=%0;; srlz.d" :: "r" (psr) : "memory");
}
/*
* Insert a translation into an instruction and/or data translation
......
......@@ -137,14 +137,18 @@ do { \
# define local_irq_restore(x) \
do { \
unsigned long ip, old_psr, psr = (x); \
\
__asm__ __volatile__ (";;mov %0=psr; mov psr.l=%1;; srlz.d" \
: "=&r" (old_psr) : "r" (psr) : "memory"); \
if ((old_psr & (1UL << 14)) && !(psr & (1UL << 14))) { \
__asm__ ("mov %0=ip" : "=r"(ip)); \
last_cli_ip = ip; \
} \
unsigned long ip, old_psr, psr = (x); \
\
__asm__ __volatile__ ("mov %0=psr;" \
"cmp.ne p6,p7=%1,r0;;" \
"(p6) ssm psr.i;" \
"(p7) rsm psr.i;;" \
"srlz.d" \
: "=&r" (old_psr) : "r"((psr) & IA64_PSR_I) : "memory"); \
if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) { \
__asm__ ("mov %0=ip" : "=r"(ip)); \
last_cli_ip = ip; \
} \
} while (0)
#else /* !CONFIG_IA64_DEBUG_IRQ */
......@@ -153,8 +157,11 @@ do { \
: "=r" (x) :: "memory")
# define local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory")
/* (potentially) setting psr.i requires data serialization: */
# define local_irq_restore(x) __asm__ __volatile__ (";; mov psr.l=%0;; srlz.d" \
:: "r" (x) : "memory")
# define local_irq_restore(x) __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \
"(p6) ssm psr.i;" \
"(p7) rsm psr.i;;" \
"srlz.d" \
:: "r"((x) & IA64_PSR_I) : "memory")
#endif /* !CONFIG_IA64_DEBUG_IRQ */
#define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment