Commit 6bb82119 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-4.15-7' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "One fix for an oops at boot if we take a hotplug interrupt before we
  are ready to handle it.

  The bulk is patches to implement mitigation for Meltdown, see the
  change logs for more details.

  Thanks to: Nicholas Piggin, Michael Neuling, Oliver O'Halloran, Jon
  Masters, Jose Ricardo Ziviani, David Gibson"

* tag 'powerpc-4.15-7' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/powernv: Check device-tree for RFI flush settings
  powerpc/pseries: Query hypervisor for RFI flush settings
  powerpc/64s: Support disabling RFI flush with no_rfi_flush and nopti
  powerpc/64s: Add support for RFI flush of L1-D cache
  powerpc/64s: Convert slb_miss_common to use RFI_TO_USER/KERNEL
  powerpc/64: Convert fast_exception_return to use RFI_TO_USER/KERNEL
  powerpc/64: Convert the syscall exit path to use RFI_TO_USER/KERNEL
  powerpc/64s: Simple RFI macro conversions
  powerpc/64: Add macros for annotating the destination of rfid/hrfid
  powerpc/pseries: Add H_GET_CPU_CHARACTERISTICS flags & wrapper
  powerpc/pseries: Make RAS IRQ explicitly dependent on DLPAR WQ
parents 9443c168 6e032b35
...@@ -209,5 +209,11 @@ exc_##label##_book3e: ...@@ -209,5 +209,11 @@ exc_##label##_book3e:
ori r3,r3,vector_offset@l; \ ori r3,r3,vector_offset@l; \
mtspr SPRN_IVOR##vector_number,r3; mtspr SPRN_IVOR##vector_number,r3;
#define RFI_TO_KERNEL \
rfi
#define RFI_TO_USER \
rfi
#endif /* _ASM_POWERPC_EXCEPTION_64E_H */ #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
...@@ -74,6 +74,59 @@ ...@@ -74,6 +74,59 @@
*/ */
#define EX_R3 EX_DAR #define EX_R3 EX_DAR
/*
* Macros for annotating the expected destination of (h)rfid
*
* The nop instructions allow us to insert one or more instructions to flush the
* L1-D cache when returning to userspace or a guest.
*/
#define RFI_FLUSH_SLOT \
RFI_FLUSH_FIXUP_SECTION; \
nop; \
nop; \
nop
#define RFI_TO_KERNEL \
rfid
#define RFI_TO_USER \
RFI_FLUSH_SLOT; \
rfid; \
b rfi_flush_fallback
#define RFI_TO_USER_OR_KERNEL \
RFI_FLUSH_SLOT; \
rfid; \
b rfi_flush_fallback
#define RFI_TO_GUEST \
RFI_FLUSH_SLOT; \
rfid; \
b rfi_flush_fallback
#define HRFI_TO_KERNEL \
hrfid
#define HRFI_TO_USER \
RFI_FLUSH_SLOT; \
hrfid; \
b hrfi_flush_fallback
#define HRFI_TO_USER_OR_KERNEL \
RFI_FLUSH_SLOT; \
hrfid; \
b hrfi_flush_fallback
#define HRFI_TO_GUEST \
RFI_FLUSH_SLOT; \
hrfid; \
b hrfi_flush_fallback
#define HRFI_TO_UNKNOWN \
RFI_FLUSH_SLOT; \
hrfid; \
b hrfi_flush_fallback
#ifdef CONFIG_RELOCATABLE #ifdef CONFIG_RELOCATABLE
#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
...@@ -218,7 +271,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) ...@@ -218,7 +271,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
mtspr SPRN_##h##SRR0,r12; \ mtspr SPRN_##h##SRR0,r12; \
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
mtspr SPRN_##h##SRR1,r10; \ mtspr SPRN_##h##SRR1,r10; \
h##rfid; \ h##RFI_TO_KERNEL; \
b . /* prevent speculative execution */ b . /* prevent speculative execution */
#define EXCEPTION_PROLOG_PSERIES_1(label, h) \ #define EXCEPTION_PROLOG_PSERIES_1(label, h) \
__EXCEPTION_PROLOG_PSERIES_1(label, h) __EXCEPTION_PROLOG_PSERIES_1(label, h)
...@@ -232,7 +285,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) ...@@ -232,7 +285,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
mtspr SPRN_##h##SRR0,r12; \ mtspr SPRN_##h##SRR0,r12; \
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
mtspr SPRN_##h##SRR1,r10; \ mtspr SPRN_##h##SRR1,r10; \
h##rfid; \ h##RFI_TO_KERNEL; \
b . /* prevent speculative execution */ b . /* prevent speculative execution */
#define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \ #define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
......
...@@ -187,7 +187,20 @@ label##3: \ ...@@ -187,7 +187,20 @@ label##3: \
FTR_ENTRY_OFFSET label##1b-label##3b; \ FTR_ENTRY_OFFSET label##1b-label##3b; \
.popsection; .popsection;
#define RFI_FLUSH_FIXUP_SECTION \
951: \
.pushsection __rfi_flush_fixup,"a"; \
.align 2; \
952: \
FTR_ENTRY_OFFSET 951b-952b; \
.popsection;
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/types.h>
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
void apply_feature_fixups(void); void apply_feature_fixups(void);
void setup_feature_keys(void); void setup_feature_keys(void);
#endif #endif
......
...@@ -241,6 +241,7 @@ ...@@ -241,6 +241,7 @@
#define H_GET_HCA_INFO 0x1B8 #define H_GET_HCA_INFO 0x1B8
#define H_GET_PERF_COUNT 0x1BC #define H_GET_PERF_COUNT 0x1BC
#define H_MANAGE_TRACE 0x1C0 #define H_MANAGE_TRACE 0x1C0
#define H_GET_CPU_CHARACTERISTICS 0x1C8
#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
#define H_QUERY_INT_STATE 0x1E4 #define H_QUERY_INT_STATE 0x1E4
#define H_POLL_PENDING 0x1D8 #define H_POLL_PENDING 0x1D8
...@@ -330,6 +331,17 @@ ...@@ -330,6 +331,17 @@
#define H_SIGNAL_SYS_RESET_ALL_OTHERS -2 #define H_SIGNAL_SYS_RESET_ALL_OTHERS -2
/* >= 0 values are CPU number */ /* >= 0 values are CPU number */
/* H_GET_CPU_CHARACTERISTICS return values */
#define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0
#define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1
#define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
#define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
#define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
/* Flag values used in H_REGISTER_PROC_TBL hcall */ /* Flag values used in H_REGISTER_PROC_TBL hcall */
#define PROC_TABLE_OP_MASK 0x18 #define PROC_TABLE_OP_MASK 0x18
#define PROC_TABLE_DEREG 0x10 #define PROC_TABLE_DEREG 0x10
...@@ -436,6 +448,11 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc) ...@@ -436,6 +448,11 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc)
} }
} }
struct h_cpu_char_result {
u64 character;
u64 behaviour;
};
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HVCALL_H */ #endif /* _ASM_POWERPC_HVCALL_H */
...@@ -232,6 +232,16 @@ struct paca_struct { ...@@ -232,6 +232,16 @@ struct paca_struct {
struct sibling_subcore_state *sibling_subcore_state; struct sibling_subcore_state *sibling_subcore_state;
#endif #endif
#endif #endif
#ifdef CONFIG_PPC_BOOK3S_64
/*
* rfi fallback flush must be in its own cacheline to prevent
* other paca data leaking into the L1d
*/
u64 exrfi[EX_SIZE] __aligned(0x80);
void *rfi_flush_fallback_area;
u64 l1d_flush_congruence;
u64 l1d_flush_sets;
#endif
}; };
extern void copy_mm_to_paca(struct mm_struct *mm); extern void copy_mm_to_paca(struct mm_struct *mm);
......
...@@ -326,4 +326,18 @@ static inline long plapr_signal_sys_reset(long cpu) ...@@ -326,4 +326,18 @@ static inline long plapr_signal_sys_reset(long cpu)
return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu); return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
} }
static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long rc;
rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
if (rc == H_SUCCESS) {
p->character = retbuf[0];
p->behaviour = retbuf[1];
}
return rc;
}
#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
...@@ -39,6 +39,19 @@ static inline void pseries_big_endian_exceptions(void) {} ...@@ -39,6 +39,19 @@ static inline void pseries_big_endian_exceptions(void) {}
static inline void pseries_little_endian_exceptions(void) {} static inline void pseries_little_endian_exceptions(void) {}
#endif /* CONFIG_PPC_PSERIES */ #endif /* CONFIG_PPC_PSERIES */
void rfi_flush_enable(bool enable);
/* These are bit flags */
enum l1d_flush_type {
L1D_FLUSH_NONE = 0x1,
L1D_FLUSH_FALLBACK = 0x2,
L1D_FLUSH_ORI = 0x4,
L1D_FLUSH_MTTRIG = 0x8,
};
void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
void do_rfi_flush_fixups(enum l1d_flush_type types);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_SETUP_H */ #endif /* _ASM_POWERPC_SETUP_H */
......
...@@ -237,6 +237,11 @@ int main(void) ...@@ -237,6 +237,11 @@ int main(void)
OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp); OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
OFFSET(PACA_IN_MCE, paca_struct, in_mce); OFFSET(PACA_IN_MCE, paca_struct, in_mce);
OFFSET(PACA_IN_NMI, paca_struct, in_nmi); OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
OFFSET(PACA_EXRFI, paca_struct, exrfi);
OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence);
OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets);
#endif #endif
OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
OFFSET(PACAKEXECSTATE, paca_struct, kexec_state); OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
......
...@@ -37,6 +37,11 @@ ...@@ -37,6 +37,11 @@
#include <asm/tm.h> #include <asm/tm.h>
#include <asm/ppc-opcode.h> #include <asm/ppc-opcode.h>
#include <asm/export.h> #include <asm/export.h>
#ifdef CONFIG_PPC_BOOK3S
#include <asm/exception-64s.h>
#else
#include <asm/exception-64e.h>
#endif
/* /*
* System calls. * System calls.
...@@ -262,13 +267,23 @@ BEGIN_FTR_SECTION ...@@ -262,13 +267,23 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
ld r2,GPR2(r1)
ld r1,GPR1(r1)
mtlr r4
mtcr r5
mtspr SPRN_SRR0,r7
mtspr SPRN_SRR1,r8
RFI_TO_USER
b . /* prevent speculative execution */
/* exit to kernel */
1: ld r2,GPR2(r1) 1: ld r2,GPR2(r1)
ld r1,GPR1(r1) ld r1,GPR1(r1)
mtlr r4 mtlr r4
mtcr r5 mtcr r5
mtspr SPRN_SRR0,r7 mtspr SPRN_SRR0,r7
mtspr SPRN_SRR1,r8 mtspr SPRN_SRR1,r8
RFI RFI_TO_KERNEL
b . /* prevent speculative execution */ b . /* prevent speculative execution */
.Lsyscall_error: .Lsyscall_error:
...@@ -397,8 +412,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ...@@ -397,8 +412,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
mtmsrd r10, 1 mtmsrd r10, 1
mtspr SPRN_SRR0, r11 mtspr SPRN_SRR0, r11
mtspr SPRN_SRR1, r12 mtspr SPRN_SRR1, r12
RFI_TO_USER
rfid
b . /* prevent speculative execution */ b . /* prevent speculative execution */
#endif #endif
_ASM_NOKPROBE_SYMBOL(system_call_common); _ASM_NOKPROBE_SYMBOL(system_call_common);
...@@ -878,7 +892,7 @@ BEGIN_FTR_SECTION ...@@ -878,7 +892,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ACCOUNT_CPU_USER_EXIT(r13, r2, r4) ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
REST_GPR(13, r1) REST_GPR(13, r1)
1:
mtspr SPRN_SRR1,r3 mtspr SPRN_SRR1,r3
ld r2,_CCR(r1) ld r2,_CCR(r1)
...@@ -891,8 +905,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ...@@ -891,8 +905,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ld r3,GPR3(r1) ld r3,GPR3(r1)
ld r4,GPR4(r1) ld r4,GPR4(r1)
ld r1,GPR1(r1) ld r1,GPR1(r1)
RFI_TO_USER
b . /* prevent speculative execution */
rfid 1: mtspr SPRN_SRR1,r3
ld r2,_CCR(r1)
mtcrf 0xFF,r2
ld r2,_NIP(r1)
mtspr SPRN_SRR0,r2
ld r0,GPR0(r1)
ld r2,GPR2(r1)
ld r3,GPR3(r1)
ld r4,GPR4(r1)
ld r1,GPR1(r1)
RFI_TO_KERNEL
b . /* prevent speculative execution */ b . /* prevent speculative execution */
#endif /* CONFIG_PPC_BOOK3E */ #endif /* CONFIG_PPC_BOOK3E */
...@@ -1073,7 +1101,7 @@ __enter_rtas: ...@@ -1073,7 +1101,7 @@ __enter_rtas:
mtspr SPRN_SRR0,r5 mtspr SPRN_SRR0,r5
mtspr SPRN_SRR1,r6 mtspr SPRN_SRR1,r6
rfid RFI_TO_KERNEL
b . /* prevent speculative execution */ b . /* prevent speculative execution */
rtas_return_loc: rtas_return_loc:
...@@ -1098,7 +1126,7 @@ rtas_return_loc: ...@@ -1098,7 +1126,7 @@ rtas_return_loc:
mtspr SPRN_SRR0,r3 mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4 mtspr SPRN_SRR1,r4
rfid RFI_TO_KERNEL
b . /* prevent speculative execution */ b . /* prevent speculative execution */
_ASM_NOKPROBE_SYMBOL(__enter_rtas) _ASM_NOKPROBE_SYMBOL(__enter_rtas)
_ASM_NOKPROBE_SYMBOL(rtas_return_loc) _ASM_NOKPROBE_SYMBOL(rtas_return_loc)
...@@ -1171,7 +1199,7 @@ _GLOBAL(enter_prom) ...@@ -1171,7 +1199,7 @@ _GLOBAL(enter_prom)
LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
andc r11,r11,r12 andc r11,r11,r12
mtsrr1 r11 mtsrr1 r11
rfid RFI_TO_KERNEL
#endif /* CONFIG_PPC_BOOK3E */ #endif /* CONFIG_PPC_BOOK3E */
1: /* Return from OF */ 1: /* Return from OF */
......
...@@ -256,7 +256,7 @@ BEGIN_FTR_SECTION ...@@ -256,7 +256,7 @@ BEGIN_FTR_SECTION
LOAD_HANDLER(r12, machine_check_handle_early) LOAD_HANDLER(r12, machine_check_handle_early)
1: mtspr SPRN_SRR0,r12 1: mtspr SPRN_SRR0,r12
mtspr SPRN_SRR1,r11 mtspr SPRN_SRR1,r11
rfid RFI_TO_KERNEL
b . /* prevent speculative execution */ b . /* prevent speculative execution */
2: 2:
/* Stack overflow. Stay on emergency stack and panic. /* Stack overflow. Stay on emergency stack and panic.
...@@ -445,7 +445,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) ...@@ -445,7 +445,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
li r3,MSR_ME li r3,MSR_ME
andc r10,r10,r3 /* Turn off MSR_ME */ andc r10,r10,r3 /* Turn off MSR_ME */
mtspr SPRN_SRR1,r10 mtspr SPRN_SRR1,r10
rfid RFI_TO_KERNEL
b . b .
2: 2:
/* /*
...@@ -463,7 +463,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) ...@@ -463,7 +463,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
*/ */
bl machine_check_queue_event bl machine_check_queue_event
MACHINE_CHECK_HANDLER_WINDUP MACHINE_CHECK_HANDLER_WINDUP
rfid RFI_TO_USER_OR_KERNEL
9: 9:
/* Deliver the machine check to host kernel in V mode. */ /* Deliver the machine check to host kernel in V mode. */
MACHINE_CHECK_HANDLER_WINDUP MACHINE_CHECK_HANDLER_WINDUP
...@@ -598,6 +598,9 @@ EXC_COMMON_BEGIN(slb_miss_common) ...@@ -598,6 +598,9 @@ EXC_COMMON_BEGIN(slb_miss_common)
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
andi. r9,r11,MSR_PR // Check for exception from userspace
cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later
/* /*
* Test MSR_RI before calling slb_allocate_realmode, because the * Test MSR_RI before calling slb_allocate_realmode, because the
* MSR in r11 gets clobbered. However we still want to allocate * MSR in r11 gets clobbered. However we still want to allocate
...@@ -624,9 +627,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) ...@@ -624,9 +627,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
/* All done -- return from exception. */ /* All done -- return from exception. */
bne cr4,1f /* returning to kernel */
.machine push .machine push
.machine "power4" .machine "power4"
mtcrf 0x80,r9 mtcrf 0x80,r9
mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
mtcrf 0x02,r9 /* I/D indication is in cr6 */ mtcrf 0x02,r9 /* I/D indication is in cr6 */
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
...@@ -640,9 +646,30 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) ...@@ -640,9 +646,30 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
ld r11,PACA_EXSLB+EX_R11(r13) ld r11,PACA_EXSLB+EX_R11(r13)
ld r12,PACA_EXSLB+EX_R12(r13) ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13) ld r13,PACA_EXSLB+EX_R13(r13)
rfid RFI_TO_USER
b . /* prevent speculative execution */
1:
.machine push
.machine "power4"
mtcrf 0x80,r9
mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
mtcrf 0x02,r9 /* I/D indication is in cr6 */
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
.machine pop
RESTORE_CTR(r9, PACA_EXSLB)
RESTORE_PPR_PACA(PACA_EXSLB, r9)
mr r3,r12
ld r9,PACA_EXSLB+EX_R9(r13)
ld r10,PACA_EXSLB+EX_R10(r13)
ld r11,PACA_EXSLB+EX_R11(r13)
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
RFI_TO_KERNEL
b . /* prevent speculative execution */ b . /* prevent speculative execution */
2: std r3,PACA_EXSLB+EX_DAR(r13) 2: std r3,PACA_EXSLB+EX_DAR(r13)
mr r3,r12 mr r3,r12
mfspr r11,SPRN_SRR0 mfspr r11,SPRN_SRR0
...@@ -651,7 +678,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) ...@@ -651,7 +678,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
mtspr SPRN_SRR0,r10 mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13) ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10 mtspr SPRN_SRR1,r10
rfid RFI_TO_KERNEL
b . b .
8: std r3,PACA_EXSLB+EX_DAR(r13) 8: std r3,PACA_EXSLB+EX_DAR(r13)
...@@ -662,7 +689,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) ...@@ -662,7 +689,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
mtspr SPRN_SRR0,r10 mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13) ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10 mtspr SPRN_SRR1,r10
rfid RFI_TO_KERNEL
b . b .
EXC_COMMON_BEGIN(unrecov_slb) EXC_COMMON_BEGIN(unrecov_slb)
...@@ -901,7 +928,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) ...@@ -901,7 +928,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
mtspr SPRN_SRR0,r10 ; \ mtspr SPRN_SRR0,r10 ; \
ld r10,PACAKMSR(r13) ; \ ld r10,PACAKMSR(r13) ; \
mtspr SPRN_SRR1,r10 ; \ mtspr SPRN_SRR1,r10 ; \
rfid ; \ RFI_TO_KERNEL ; \
b . ; /* prevent speculative execution */ b . ; /* prevent speculative execution */
#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
...@@ -917,7 +944,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ ...@@ -917,7 +944,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
xori r12,r12,MSR_LE ; \ xori r12,r12,MSR_LE ; \
mtspr SPRN_SRR1,r12 ; \ mtspr SPRN_SRR1,r12 ; \
mr r13,r9 ; \ mr r13,r9 ; \
rfid ; /* return to userspace */ \ RFI_TO_USER ; /* return to userspace */ \
b . ; /* prevent speculative execution */ b . ; /* prevent speculative execution */
#else #else
#define SYSCALL_FASTENDIAN_TEST #define SYSCALL_FASTENDIAN_TEST
...@@ -1063,7 +1090,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early) ...@@ -1063,7 +1090,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
mtcr r11 mtcr r11
REST_GPR(11, r1) REST_GPR(11, r1)
ld r1,GPR1(r1) ld r1,GPR1(r1)
hrfid HRFI_TO_USER_OR_KERNEL
1: mtcr r11 1: mtcr r11
REST_GPR(11, r1) REST_GPR(11, r1)
...@@ -1314,7 +1341,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) ...@@ -1314,7 +1341,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
ld r11,PACA_EXGEN+EX_R11(r13) ld r11,PACA_EXGEN+EX_R11(r13)
ld r12,PACA_EXGEN+EX_R12(r13) ld r12,PACA_EXGEN+EX_R12(r13)
ld r13,PACA_EXGEN+EX_R13(r13) ld r13,PACA_EXGEN+EX_R13(r13)
HRFID HRFI_TO_UNKNOWN
b . b .
#endif #endif
...@@ -1418,10 +1445,94 @@ masked_##_H##interrupt: \ ...@@ -1418,10 +1445,94 @@ masked_##_H##interrupt: \
ld r10,PACA_EXGEN+EX_R10(r13); \ ld r10,PACA_EXGEN+EX_R10(r13); \
ld r11,PACA_EXGEN+EX_R11(r13); \ ld r11,PACA_EXGEN+EX_R11(r13); \
/* returns to kernel where r13 must be set up, so don't restore it */ \ /* returns to kernel where r13 must be set up, so don't restore it */ \
##_H##rfid; \ ##_H##RFI_TO_KERNEL; \
b .; \ b .; \
MASKED_DEC_HANDLER(_H) MASKED_DEC_HANDLER(_H)
TRAMP_REAL_BEGIN(rfi_flush_fallback)
SET_SCRATCH0(r13);
GET_PACA(r13);
std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
std r12,PACA_EXRFI+EX_R12(r13)
std r8,PACA_EXRFI+EX_R13(r13)
mfctr r9
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
ld r11,PACA_L1D_FLUSH_SETS(r13)
ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
/*
* The load adresses are at staggered offsets within cachelines,
* which suits some pipelines better (on others it should not
* hurt).
*/
addi r12,r12,8
mtctr r11
DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
/* order ld/st prior to dcbt stop all streams with flushing */
sync
1: li r8,0
.rept 8 /* 8-way set associative */
ldx r11,r10,r8
add r8,r8,r12
xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
add r8,r8,r11 // Add 0, this creates a dependency on the ldx
.endr
addi r10,r10,128 /* 128 byte cache line */
bdnz 1b
mtctr r9
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
ld r11,PACA_EXRFI+EX_R11(r13)
ld r12,PACA_EXRFI+EX_R12(r13)
ld r8,PACA_EXRFI+EX_R13(r13)
GET_SCRATCH0(r13);
rfid
TRAMP_REAL_BEGIN(hrfi_flush_fallback)
SET_SCRATCH0(r13);
GET_PACA(r13);
std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
std r12,PACA_EXRFI+EX_R12(r13)
std r8,PACA_EXRFI+EX_R13(r13)
mfctr r9
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
ld r11,PACA_L1D_FLUSH_SETS(r13)
ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
/*
* The load adresses are at staggered offsets within cachelines,
* which suits some pipelines better (on others it should not
* hurt).
*/
addi r12,r12,8
mtctr r11
DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
/* order ld/st prior to dcbt stop all streams with flushing */
sync
1: li r8,0
.rept 8 /* 8-way set associative */
ldx r11,r10,r8
add r8,r8,r12
xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
add r8,r8,r11 // Add 0, this creates a dependency on the ldx
.endr
addi r10,r10,128 /* 128 byte cache line */
bdnz 1b
mtctr r9
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
ld r11,PACA_EXRFI+EX_R11(r13)
ld r12,PACA_EXRFI+EX_R12(r13)
ld r8,PACA_EXRFI+EX_R13(r13)
GET_SCRATCH0(r13);
hrfid
/* /*
* Real mode exceptions actually use this too, but alternate * Real mode exceptions actually use this too, but alternate
* instruction code patches (which end up in the common .text area) * instruction code patches (which end up in the common .text area)
...@@ -1441,7 +1552,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt) ...@@ -1441,7 +1552,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
addi r13, r13, 4 addi r13, r13, 4
mtspr SPRN_SRR0, r13 mtspr SPRN_SRR0, r13
GET_SCRATCH0(r13) GET_SCRATCH0(r13)
rfid RFI_TO_KERNEL
b . b .
TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
...@@ -1453,7 +1564,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) ...@@ -1453,7 +1564,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
addi r13, r13, 4 addi r13, r13, 4
mtspr SPRN_HSRR0, r13 mtspr SPRN_HSRR0, r13
GET_SCRATCH0(r13) GET_SCRATCH0(r13)
hrfid HRFI_TO_KERNEL
b . b .
#endif #endif
......
...@@ -801,3 +801,104 @@ static int __init disable_hardlockup_detector(void) ...@@ -801,3 +801,104 @@ static int __init disable_hardlockup_detector(void)
return 0; return 0;
} }
early_initcall(disable_hardlockup_detector); early_initcall(disable_hardlockup_detector);
#ifdef CONFIG_PPC_BOOK3S_64
static enum l1d_flush_type enabled_flush_types;
static void *l1d_flush_fallback_area;
static bool no_rfi_flush;
bool rfi_flush;
static int __init handle_no_rfi_flush(char *p)
{
pr_info("rfi-flush: disabled on command line.");
no_rfi_flush = true;
return 0;
}
early_param("no_rfi_flush", handle_no_rfi_flush);
/*
* The RFI flush is not KPTI, but because users will see doco that says to use
* nopti we hijack that option here to also disable the RFI flush.
*/
static int __init handle_no_pti(char *p)
{
pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
handle_no_rfi_flush(NULL);
return 0;
}
early_param("nopti", handle_no_pti);
static void do_nothing(void *unused)
{
/*
* We don't need to do the flush explicitly, just enter+exit kernel is
* sufficient, the RFI exit handlers will do the right thing.
*/
}
void rfi_flush_enable(bool enable)
{
if (rfi_flush == enable)
return;
if (enable) {
do_rfi_flush_fixups(enabled_flush_types);
on_each_cpu(do_nothing, NULL, 1);
} else
do_rfi_flush_fixups(L1D_FLUSH_NONE);
rfi_flush = enable;
}
static void init_fallback_flush(void)
{
u64 l1d_size, limit;
int cpu;
l1d_size = ppc64_caches.l1d.size;
limit = min(safe_stack_limit(), ppc64_rma_size);
/*
* Align to L1d size, and size it at 2x L1d size, to catch possible
* hardware prefetch runoff. We don't have a recipe for load patterns to
* reliably avoid the prefetcher.
*/
l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
memset(l1d_flush_fallback_area, 0, l1d_size * 2);
for_each_possible_cpu(cpu) {
/*
* The fallback flush is currently coded for 8-way
* associativity. Different associativity is possible, but it
* will be treated as 8-way and may not evict the lines as
* effectively.
*
* 128 byte lines are mandatory.
*/
u64 c = l1d_size / 8;
paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
paca[cpu].l1d_flush_congruence = c;
paca[cpu].l1d_flush_sets = c / 128;
}
}
void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
{
if (types & L1D_FLUSH_FALLBACK) {
pr_info("rfi-flush: Using fallback displacement flush\n");
init_fallback_flush();
}
if (types & L1D_FLUSH_ORI)
pr_info("rfi-flush: Using ori type flush\n");
if (types & L1D_FLUSH_MTTRIG)
pr_info("rfi-flush: Using mttrig type flush\n");
enabled_flush_types = types;
if (!no_rfi_flush)
rfi_flush_enable(enable);
}
#endif /* CONFIG_PPC_BOOK3S_64 */
...@@ -132,6 +132,15 @@ SECTIONS ...@@ -132,6 +132,15 @@ SECTIONS
/* Read-only data */ /* Read-only data */
RO_DATA(PAGE_SIZE) RO_DATA(PAGE_SIZE)
#ifdef CONFIG_PPC64
. = ALIGN(8);
__rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
__start___rfi_flush_fixup = .;
*(__rfi_flush_fixup)
__stop___rfi_flush_fixup = .;
}
#endif
EXCEPTION_TABLE(0) EXCEPTION_TABLE(0)
NOTES :kernel :notes NOTES :kernel :notes
......
...@@ -79,7 +79,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline) ...@@ -79,7 +79,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
mtmsrd r0,1 /* clear RI in MSR */ mtmsrd r0,1 /* clear RI in MSR */
mtsrr0 r5 mtsrr0 r5
mtsrr1 r6 mtsrr1 r6
RFI RFI_TO_KERNEL
kvmppc_call_hv_entry: kvmppc_call_hv_entry:
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
...@@ -199,7 +199,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -199,7 +199,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtmsrd r6, 1 /* Clear RI in MSR */ mtmsrd r6, 1 /* Clear RI in MSR */
mtsrr0 r8 mtsrr0 r8
mtsrr1 r7 mtsrr1 r7
RFI RFI_TO_KERNEL
/* Virtual-mode return */ /* Virtual-mode return */
.Lvirt_return: .Lvirt_return:
...@@ -1167,8 +1167,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ...@@ -1167,8 +1167,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r0, VCPU_GPR(R0)(r4) ld r0, VCPU_GPR(R0)(r4)
ld r4, VCPU_GPR(R4)(r4) ld r4, VCPU_GPR(R4)(r4)
HRFI_TO_GUEST
hrfid
b . b .
secondary_too_late: secondary_too_late:
...@@ -3320,7 +3319,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) ...@@ -3320,7 +3319,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
ld r4, PACAKMSR(r13) ld r4, PACAKMSR(r13)
mtspr SPRN_SRR0, r3 mtspr SPRN_SRR0, r3
mtspr SPRN_SRR1, r4 mtspr SPRN_SRR1, r4
rfid RFI_TO_KERNEL
9: addi r3, r1, STACK_FRAME_OVERHEAD 9: addi r3, r1, STACK_FRAME_OVERHEAD
bl kvmppc_bad_interrupt bl kvmppc_bad_interrupt
b 9b b 9b
......
...@@ -46,6 +46,9 @@ ...@@ -46,6 +46,9 @@
#define FUNC(name) name #define FUNC(name) name
#define RFI_TO_KERNEL RFI
#define RFI_TO_GUEST RFI
.macro INTERRUPT_TRAMPOLINE intno .macro INTERRUPT_TRAMPOLINE intno
.global kvmppc_trampoline_\intno .global kvmppc_trampoline_\intno
...@@ -141,7 +144,7 @@ kvmppc_handler_skip_ins: ...@@ -141,7 +144,7 @@ kvmppc_handler_skip_ins:
GET_SCRATCH0(r13) GET_SCRATCH0(r13)
/* And get back into the code */ /* And get back into the code */
RFI RFI_TO_KERNEL
#endif #endif
/* /*
...@@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline) ...@@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline)
ori r5, r5, MSR_EE ori r5, r5, MSR_EE
mtsrr0 r7 mtsrr0 r7
mtsrr1 r6 mtsrr1 r6
RFI RFI_TO_KERNEL
#include "book3s_segment.S" #include "book3s_segment.S"
...@@ -156,7 +156,7 @@ no_dcbz32_on: ...@@ -156,7 +156,7 @@ no_dcbz32_on:
PPC_LL r9, SVCPU_R9(r3) PPC_LL r9, SVCPU_R9(r3)
PPC_LL r3, (SVCPU_R3)(r3) PPC_LL r3, (SVCPU_R3)(r3)
RFI RFI_TO_GUEST
kvmppc_handler_trampoline_enter_end: kvmppc_handler_trampoline_enter_end:
...@@ -407,5 +407,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) ...@@ -407,5 +407,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
cmpwi r12, BOOK3S_INTERRUPT_DOORBELL cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
beqa BOOK3S_INTERRUPT_DOORBELL beqa BOOK3S_INTERRUPT_DOORBELL
RFI RFI_TO_KERNEL
kvmppc_handler_trampoline_exit_end: kvmppc_handler_trampoline_exit_end:
...@@ -116,6 +116,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) ...@@ -116,6 +116,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
} }
} }
#ifdef CONFIG_PPC_BOOK3S_64
void do_rfi_flush_fixups(enum l1d_flush_type types)
{
unsigned int instrs[3], *dest;
long *start, *end;
int i;
start = PTRRELOC(&__start___rfi_flush_fixup),
end = PTRRELOC(&__stop___rfi_flush_fixup);
instrs[0] = 0x60000000; /* nop */
instrs[1] = 0x60000000; /* nop */
instrs[2] = 0x60000000; /* nop */
if (types & L1D_FLUSH_FALLBACK)
/* b .+16 to fallback flush */
instrs[0] = 0x48000010;
i = 0;
if (types & L1D_FLUSH_ORI) {
instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
}
if (types & L1D_FLUSH_MTTRIG)
instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
patch_instruction(dest, instrs[0]);
patch_instruction(dest + 1, instrs[1]);
patch_instruction(dest + 2, instrs[2]);
}
printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
}
#endif /* CONFIG_PPC_BOOK3S_64 */
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
{ {
long *start, *end; long *start, *end;
......
...@@ -37,13 +37,62 @@ ...@@ -37,13 +37,62 @@
#include <asm/kexec.h> #include <asm/kexec.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/tm.h> #include <asm/tm.h>
#include <asm/setup.h>
#include "powernv.h" #include "powernv.h"
static void pnv_setup_rfi_flush(void)
{
struct device_node *np, *fw_features;
enum l1d_flush_type type;
int enable;
/* Default to fallback in case fw-features are not available */
type = L1D_FLUSH_FALLBACK;
enable = 1;
np = of_find_node_by_name(NULL, "ibm,opal");
fw_features = of_get_child_by_name(np, "fw-features");
of_node_put(np);
if (fw_features) {
np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
if (np && of_property_read_bool(np, "enabled"))
type = L1D_FLUSH_MTTRIG;
of_node_put(np);
np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
if (np && of_property_read_bool(np, "enabled"))
type = L1D_FLUSH_ORI;
of_node_put(np);
/* Enable unless firmware says NOT to */
enable = 2;
np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
if (np && of_property_read_bool(np, "disabled"))
enable--;
of_node_put(np);
np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
if (np && of_property_read_bool(np, "disabled"))
enable--;
of_node_put(np);
of_node_put(fw_features);
}
setup_rfi_flush(type, enable > 0);
}
static void __init pnv_setup_arch(void) static void __init pnv_setup_arch(void)
{ {
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
pnv_setup_rfi_flush();
/* Initialize SMP */ /* Initialize SMP */
pnv_smp_init(); pnv_smp_init();
......
...@@ -574,11 +574,26 @@ static ssize_t dlpar_show(struct class *class, struct class_attribute *attr, ...@@ -574,11 +574,26 @@ static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
static CLASS_ATTR_RW(dlpar); static CLASS_ATTR_RW(dlpar);
static int __init pseries_dlpar_init(void) int __init dlpar_workqueue_init(void)
{ {
if (pseries_hp_wq)
return 0;
pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
WQ_UNBOUND, 1); WQ_UNBOUND, 1);
return pseries_hp_wq ? 0 : -ENOMEM;
}
static int __init dlpar_sysfs_init(void)
{
int rc;
rc = dlpar_workqueue_init();
if (rc)
return rc;
return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
} }
machine_device_initcall(pseries, pseries_dlpar_init); machine_device_initcall(pseries, dlpar_sysfs_init);
...@@ -98,4 +98,6 @@ static inline unsigned long cmo_get_page_size(void) ...@@ -98,4 +98,6 @@ static inline unsigned long cmo_get_page_size(void)
return CMO_PageSize; return CMO_PageSize;
} }
int dlpar_workqueue_init(void);
#endif /* _PSERIES_PSERIES_H */ #endif /* _PSERIES_PSERIES_H */
...@@ -69,7 +69,8 @@ static int __init init_ras_IRQ(void) ...@@ -69,7 +69,8 @@ static int __init init_ras_IRQ(void)
/* Hotplug Events */ /* Hotplug Events */
np = of_find_node_by_path("/event-sources/hot-plug-events"); np = of_find_node_by_path("/event-sources/hot-plug-events");
if (np != NULL) { if (np != NULL) {
request_event_sources_irqs(np, ras_hotplug_interrupt, if (dlpar_workqueue_init() == 0)
request_event_sources_irqs(np, ras_hotplug_interrupt,
"RAS_HOTPLUG"); "RAS_HOTPLUG");
of_node_put(np); of_node_put(np);
} }
......
...@@ -459,6 +459,39 @@ static void __init find_and_init_phbs(void) ...@@ -459,6 +459,39 @@ static void __init find_and_init_phbs(void)
of_pci_check_probe_only(); of_pci_check_probe_only();
} }
static void pseries_setup_rfi_flush(void)
{
struct h_cpu_char_result result;
enum l1d_flush_type types;
bool enable;
long rc;
/* Enable by default */
enable = true;
rc = plpar_get_cpu_characteristics(&result);
if (rc == H_SUCCESS) {
types = L1D_FLUSH_NONE;
if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
types |= L1D_FLUSH_MTTRIG;
if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
types |= L1D_FLUSH_ORI;
/* Use fallback if nothing set in hcall */
if (types == L1D_FLUSH_NONE)
types = L1D_FLUSH_FALLBACK;
if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
enable = false;
} else {
/* Default to fallback if case hcall is not available */
types = L1D_FLUSH_FALLBACK;
}
setup_rfi_flush(types, enable);
}
static void __init pSeries_setup_arch(void) static void __init pSeries_setup_arch(void)
{ {
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
...@@ -476,6 +509,8 @@ static void __init pSeries_setup_arch(void) ...@@ -476,6 +509,8 @@ static void __init pSeries_setup_arch(void)
fwnmi_init(); fwnmi_init();
pseries_setup_rfi_flush();
/* By default, only probe PCI (can be overridden by rtas_pci) */ /* By default, only probe PCI (can be overridden by rtas_pci) */
pci_add_flags(PCI_PROBE_ONLY); pci_add_flags(PCI_PROBE_ONLY);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment