Commit 7ffcf8ec authored by Anton Blanchard's avatar Anton Blanchard Committed by Benjamin Herrenschmidt

powerpc: Fix little endian lppaca, slb_shadow and dtl_entry

The lppaca, slb_shadow and dtl_entry hypervisor structures are
big endian, so we have to byte swap them in little endian builds.

LE KVM hosts will also need to be fixed but for now add an #error
to remind us.
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent c72cd555
...@@ -32,6 +32,15 @@ ...@@ -32,6 +32,15 @@
#define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), RS) #define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), RS)
#define PPC_LR_STKOFF 16 #define PPC_LR_STKOFF 16
#define PPC_MIN_STKFRM 112 #define PPC_MIN_STKFRM 112
#ifdef __BIG_ENDIAN__
#define LDX_BE stringify_in_c(ldx)
#define STDX_BE stringify_in_c(stdx)
#else
#define LDX_BE stringify_in_c(ldbrx)
#define STDX_BE stringify_in_c(stdbrx)
#endif
#else /* 32-bit */ #else /* 32-bit */
/* operations for longs and pointers */ /* operations for longs and pointers */
......
...@@ -54,7 +54,8 @@ BEGIN_FW_FTR_SECTION; \ ...@@ -54,7 +54,8 @@ BEGIN_FW_FTR_SECTION; \
/* from user - see if there are any DTL entries to process */ \ /* from user - see if there are any DTL entries to process */ \
ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \ ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \
ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \ ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \
ld r10,LPPACA_DTLIDX(r10); /* get log write index */ \ addi r10,r10,LPPACA_DTLIDX; \
LDX_BE r10,0,r10; /* get log write index */ \
cmpd cr1,r11,r10; \ cmpd cr1,r11,r10; \
beq+ cr1,33f; \ beq+ cr1,33f; \
bl .accumulate_stolen_time; \ bl .accumulate_stolen_time; \
......
...@@ -102,7 +102,8 @@ BEGIN_FW_FTR_SECTION ...@@ -102,7 +102,8 @@ BEGIN_FW_FTR_SECTION
/* if from user, see if there are any DTL entries to process */ /* if from user, see if there are any DTL entries to process */
ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */ ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
ld r11,PACA_DTL_RIDX(r13) /* get log read index */ ld r11,PACA_DTL_RIDX(r13) /* get log read index */
ld r10,LPPACA_DTLIDX(r10) /* get log write index */ addi r10,r10,LPPACA_DTLIDX
LDX_BE r10,0,r10 /* get log write index */
cmpd cr1,r11,r10 cmpd cr1,r11,r10
beq+ cr1,33f beq+ cr1,33f
bl .accumulate_stolen_time bl .accumulate_stolen_time
...@@ -531,9 +532,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) ...@@ -531,9 +532,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
*/ */
ld r9,PACA_SLBSHADOWPTR(r13) ld r9,PACA_SLBSHADOWPTR(r13)
li r12,0 li r12,0
std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ li r12,SLBSHADOW_STACKVSID
std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ STDX_BE r7,r12,r9 /* Save VSID */
li r12,SLBSHADOW_STACKESID
STDX_BE r0,r12,r9 /* Save ESID */
/* No need to check for MMU_FTR_NO_SLBIE_B here, since when /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
* we have 1TB segments, the only CPUs known to have the errata * we have 1TB segments, the only CPUs known to have the errata
......
...@@ -387,8 +387,8 @@ static void pseries_cmo_data(struct seq_file *m) ...@@ -387,8 +387,8 @@ static void pseries_cmo_data(struct seq_file *m)
return; return;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
cmo_faults += lppaca_of(cpu).cmo_faults; cmo_faults += be64_to_cpu(lppaca_of(cpu).cmo_faults);
cmo_fault_time += lppaca_of(cpu).cmo_fault_time; cmo_fault_time += be64_to_cpu(lppaca_of(cpu).cmo_fault_time);
} }
seq_printf(m, "cmo_faults=%lu\n", cmo_faults); seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
...@@ -406,8 +406,9 @@ static void splpar_dispatch_data(struct seq_file *m) ...@@ -406,8 +406,9 @@ static void splpar_dispatch_data(struct seq_file *m)
unsigned long dispatch_dispersions = 0; unsigned long dispatch_dispersions = 0;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
dispatches += lppaca_of(cpu).yield_count; dispatches += be32_to_cpu(lppaca_of(cpu).yield_count);
dispatch_dispersions += lppaca_of(cpu).dispersion_count; dispatch_dispersions +=
be32_to_cpu(lppaca_of(cpu).dispersion_count);
} }
seq_printf(m, "dispatches=%lu\n", dispatches); seq_printf(m, "dispatches=%lu\n", dispatches);
......
...@@ -34,10 +34,10 @@ extern unsigned long __toc_start; ...@@ -34,10 +34,10 @@ extern unsigned long __toc_start;
*/ */
struct lppaca lppaca[] = { struct lppaca lppaca[] = {
[0 ... (NR_LPPACAS-1)] = { [0 ... (NR_LPPACAS-1)] = {
.desc = 0xd397d781, /* "LpPa" */ .desc = cpu_to_be32(0xd397d781), /* "LpPa" */
.size = sizeof(struct lppaca), .size = cpu_to_be16(sizeof(struct lppaca)),
.fpregs_in_use = 1, .fpregs_in_use = 1,
.slb_count = 64, .slb_count = cpu_to_be16(64),
.vmxregs_in_use = 0, .vmxregs_in_use = 0,
.page_ins = 0, .page_ins = 0,
}, },
...@@ -101,8 +101,8 @@ static inline void free_lppacas(void) { } ...@@ -101,8 +101,8 @@ static inline void free_lppacas(void) { }
*/ */
struct slb_shadow slb_shadow[] __cacheline_aligned = { struct slb_shadow slb_shadow[] __cacheline_aligned = {
[0 ... (NR_CPUS-1)] = { [0 ... (NR_CPUS-1)] = {
.persistent = SLB_NUM_BOLTED, .persistent = cpu_to_be32(SLB_NUM_BOLTED),
.buffer_length = sizeof(struct slb_shadow), .buffer_length = cpu_to_be32(sizeof(struct slb_shadow)),
}, },
}; };
......
...@@ -210,18 +210,18 @@ static u64 scan_dispatch_log(u64 stop_tb) ...@@ -210,18 +210,18 @@ static u64 scan_dispatch_log(u64 stop_tb)
if (!dtl) if (!dtl)
return 0; return 0;
if (i == vpa->dtl_idx) if (i == be64_to_cpu(vpa->dtl_idx))
return 0; return 0;
while (i < vpa->dtl_idx) { while (i < be64_to_cpu(vpa->dtl_idx)) {
if (dtl_consumer) if (dtl_consumer)
dtl_consumer(dtl, i); dtl_consumer(dtl, i);
dtb = dtl->timebase; dtb = be64_to_cpu(dtl->timebase);
tb_delta = dtl->enqueue_to_dispatch_time + tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
dtl->ready_to_enqueue_time; be32_to_cpu(dtl->ready_to_enqueue_time);
barrier(); barrier();
if (i + N_DISPATCH_LOG < vpa->dtl_idx) { if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
/* buffer has overflowed */ /* buffer has overflowed */
i = vpa->dtl_idx - N_DISPATCH_LOG; i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
continue; continue;
} }
...@@ -269,7 +269,7 @@ static inline u64 calculate_stolen_time(u64 stop_tb) ...@@ -269,7 +269,7 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
{ {
u64 stolen = 0; u64 stolen = 0;
if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) { if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) {
stolen = scan_dispatch_log(stop_tb); stolen = scan_dispatch_log(stop_tb);
get_paca()->system_time -= stolen; get_paca()->system_time -= stolen;
} }
......
...@@ -17,6 +17,10 @@ ...@@ -17,6 +17,10 @@
* Authors: Alexander Graf <agraf@suse.de> * Authors: Alexander Graf <agraf@suse.de>
*/ */
#ifdef __LITTLE_ENDIAN__
#error Need to fix SLB shadow accesses in little endian mode
#endif
#define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10)) #define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10))
#define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8) #define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
#define UNBOLT_SLB_ENTRY(num) \ #define UNBOLT_SLB_ENTRY(num) \
......
...@@ -29,6 +29,10 @@ ...@@ -29,6 +29,10 @@
#include <asm/kvm_book3s_asm.h> #include <asm/kvm_book3s_asm.h>
#include <asm/mmu-hash64.h> #include <asm/mmu-hash64.h>
#ifdef __LITTLE_ENDIAN__
#error Need to fix lppaca and SLB shadow accesses in little endian mode
#endif
/***************************************************************************** /*****************************************************************************
* * * *
* Real Mode handlers that need to be in the linear mapping * * Real Mode handlers that need to be in the linear mapping *
......
...@@ -32,7 +32,7 @@ void __spin_yield(arch_spinlock_t *lock) ...@@ -32,7 +32,7 @@ void __spin_yield(arch_spinlock_t *lock)
return; return;
holder_cpu = lock_value & 0xffff; holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS); BUG_ON(holder_cpu >= NR_CPUS);
yield_count = lppaca_of(holder_cpu).yield_count; yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
if ((yield_count & 1) == 0) if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */ return; /* virtual cpu is currently running */
rmb(); rmb();
...@@ -57,7 +57,7 @@ void __rw_yield(arch_rwlock_t *rw) ...@@ -57,7 +57,7 @@ void __rw_yield(arch_rwlock_t *rw)
return; /* no write lock at present */ return; /* no write lock at present */
holder_cpu = lock_value & 0xffff; holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS); BUG_ON(holder_cpu >= NR_CPUS);
yield_count = lppaca_of(holder_cpu).yield_count; yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
if ((yield_count & 1) == 0) if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */ return; /* virtual cpu is currently running */
rmb(); rmb();
......
...@@ -443,8 +443,12 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -443,8 +443,12 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
regs, address); regs, address);
#ifdef CONFIG_PPC_SMLPAR #ifdef CONFIG_PPC_SMLPAR
if (firmware_has_feature(FW_FEATURE_CMO)) { if (firmware_has_feature(FW_FEATURE_CMO)) {
u32 page_ins;
preempt_disable(); preempt_disable();
get_lppaca()->page_ins += (1 << PAGE_FACTOR); page_ins = be32_to_cpu(get_lppaca()->page_ins);
page_ins += 1 << PAGE_FACTOR;
get_lppaca()->page_ins = cpu_to_be32(page_ins);
preempt_enable(); preempt_enable();
} }
#endif /* CONFIG_PPC_SMLPAR */ #endif /* CONFIG_PPC_SMLPAR */
......
...@@ -66,8 +66,10 @@ static inline void slb_shadow_update(unsigned long ea, int ssize, ...@@ -66,8 +66,10 @@ static inline void slb_shadow_update(unsigned long ea, int ssize,
* we only update the current CPU's SLB shadow buffer. * we only update the current CPU's SLB shadow buffer.
*/ */
get_slb_shadow()->save_area[entry].esid = 0; get_slb_shadow()->save_area[entry].esid = 0;
get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags); get_slb_shadow()->save_area[entry].vsid =
get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry); cpu_to_be64(mk_vsid_data(ea, ssize, flags));
get_slb_shadow()->save_area[entry].esid =
cpu_to_be64(mk_esid_data(ea, ssize, entry));
} }
static inline void slb_shadow_clear(unsigned long entry) static inline void slb_shadow_clear(unsigned long entry)
...@@ -112,7 +114,8 @@ static void __slb_flush_and_rebolt(void) ...@@ -112,7 +114,8 @@ static void __slb_flush_and_rebolt(void)
} else { } else {
/* Update stack entry; others don't change */ /* Update stack entry; others don't change */
slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2); slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; ksp_vsid_data =
be64_to_cpu(get_slb_shadow()->save_area[2].vsid);
} }
/* We need to do this all in asm, so we're sure we don't touch /* We need to do this all in asm, so we're sure we don't touch
......
...@@ -87,7 +87,7 @@ static void consume_dtle(struct dtl_entry *dtle, u64 index) ...@@ -87,7 +87,7 @@ static void consume_dtle(struct dtl_entry *dtle, u64 index)
barrier(); barrier();
/* check for hypervisor ring buffer overflow, ignore this entry if so */ /* check for hypervisor ring buffer overflow, ignore this entry if so */
if (index + N_DISPATCH_LOG < vpa->dtl_idx) if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
return; return;
++wp; ++wp;
......
...@@ -106,7 +106,7 @@ void vpa_init(int cpu) ...@@ -106,7 +106,7 @@ void vpa_init(int cpu)
lppaca_of(cpu).dtl_idx = 0; lppaca_of(cpu).dtl_idx = 0;
/* hypervisor reads buffer length from this field */ /* hypervisor reads buffer length from this field */
dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
ret = register_dtl(hwcpu, __pa(dtl)); ret = register_dtl(hwcpu, __pa(dtl));
if (ret) if (ret)
pr_err("WARNING: DTL registration of cpu %d (hw %d) " pr_err("WARNING: DTL registration of cpu %d (hw %d) "
......
...@@ -45,7 +45,11 @@ static inline void idle_loop_prolog(unsigned long *in_purr) ...@@ -45,7 +45,11 @@ static inline void idle_loop_prolog(unsigned long *in_purr)
static inline void idle_loop_epilog(unsigned long in_purr) static inline void idle_loop_epilog(unsigned long in_purr)
{ {
get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; u64 wait_cycles;
wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
wait_cycles += mfspr(SPRN_PURR) - in_purr;
get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
get_lppaca()->idle = 0; get_lppaca()->idle = 0;
} }
......
...@@ -323,7 +323,7 @@ static int alloc_dispatch_logs(void) ...@@ -323,7 +323,7 @@ static int alloc_dispatch_logs(void)
get_paca()->lppaca_ptr->dtl_idx = 0; get_paca()->lppaca_ptr->dtl_idx = 0;
/* hypervisor reads buffer length from this field */ /* hypervisor reads buffer length from this field */
dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
ret = register_dtl(hard_smp_processor_id(), __pa(dtl)); ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
if (ret) if (ret)
pr_err("WARNING: DTL registration of cpu %d (hw %d) failed " pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment