Commit 2bf1071a authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64s: Remove POWER9 DD1 support

POWER9 DD1 was never a product. It is no longer supported by upstream
firmware, and it is not effectively supported in Linux due to lack of
testing.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Reviewed-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
[mpe: Remove arch_make_huge_pte() entirely]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent ce397d21
...@@ -32,26 +32,6 @@ static inline int hstate_get_psize(struct hstate *hstate) ...@@ -32,26 +32,6 @@ static inline int hstate_get_psize(struct hstate *hstate)
} }
} }
#define arch_make_huge_pte arch_make_huge_pte
static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
struct page *page, int writable)
{
unsigned long page_shift;
if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
return entry;
page_shift = huge_page_shift(hstate_vma(vma));
/*
* We don't support 1G hugetlb pages yet.
*/
VM_WARN_ON(page_shift == mmu_psize_defs[MMU_PAGE_1G].shift);
if (page_shift == mmu_psize_defs[MMU_PAGE_2M].shift)
return __pte(pte_val(entry) | R_PAGE_LARGE);
else
return entry;
}
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static inline bool gigantic_page_supported(void) static inline bool gigantic_page_supported(void)
{ {
......
...@@ -474,9 +474,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, ...@@ -474,9 +474,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
{ {
if (full && radix_enabled()) { if (full && radix_enabled()) {
/* /*
* Let's skip the DD1 style pte update here. We know that * We know that this is a full mm pte clear and
* this is a full mm pte clear and hence can be sure there is * hence can be sure there is no parallel set_pte.
* no parallel set_pte.
*/ */
return radix__ptep_get_and_clear_full(mm, addr, ptep, full); return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
} }
......
...@@ -12,12 +12,6 @@ ...@@ -12,12 +12,6 @@
#include <asm/book3s/64/radix-4k.h> #include <asm/book3s/64/radix-4k.h>
#endif #endif
/*
* For P9 DD1 only, we need to track whether the pte's huge.
*/
#define R_PAGE_LARGE _RPAGE_RSV1
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/book3s/64/tlbflush-radix.h> #include <asm/book3s/64/tlbflush-radix.h>
#include <asm/cpu_has_feature.h> #include <asm/cpu_has_feature.h>
...@@ -154,19 +148,6 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm, ...@@ -154,19 +148,6 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
{ {
unsigned long old_pte; unsigned long old_pte;
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
unsigned long new_pte;
old_pte = __radix_pte_update(ptep, ~0ul, 0);
/*
* new value of pte
*/
new_pte = (old_pte | set) & ~clr;
radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr);
if (new_pte)
__radix_pte_update(ptep, 0, new_pte);
} else
old_pte = __radix_pte_update(ptep, clr, set); old_pte = __radix_pte_update(ptep, clr, set);
if (!huge) if (!huge)
assert_pte_locked(mm, addr); assert_pte_locked(mm, addr);
...@@ -253,8 +234,6 @@ static inline int radix__pmd_trans_huge(pmd_t pmd) ...@@ -253,8 +234,6 @@ static inline int radix__pmd_trans_huge(pmd_t pmd)
static inline pmd_t radix__pmd_mkhuge(pmd_t pmd) static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
{ {
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
return __pmd(pmd_val(pmd) | _PAGE_PTE | R_PAGE_LARGE);
return __pmd(pmd_val(pmd) | _PAGE_PTE); return __pmd(pmd_val(pmd) | _PAGE_PTE);
} }
...@@ -285,18 +264,14 @@ static inline unsigned long radix__get_tree_size(void) ...@@ -285,18 +264,14 @@ static inline unsigned long radix__get_tree_size(void)
unsigned long rts_field; unsigned long rts_field;
/* /*
* We support 52 bits, hence: * We support 52 bits, hence:
* DD1 52-28 = 24, 0b11000 * bits 52 - 31 = 21, 0b10101
* Others 52-31 = 21, 0b10101
* RTS encoding details * RTS encoding details
* bits 0 - 3 of rts -> bits 6 - 8 unsigned long * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
* bits 4 - 5 of rts -> bits 62 - 63 of unsigned long * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
*/ */
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
rts_field = (0x3UL << 61);
else {
rts_field = (0x5UL << 5); /* 6 - 8 bits */ rts_field = (0x5UL << 5); /* 6 - 8 bits */
rts_field |= (0x2UL << 61); rts_field |= (0x2UL << 61);
}
return rts_field; return rts_field;
} }
......
...@@ -48,8 +48,6 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad ...@@ -48,8 +48,6 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad
extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr); extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
extern void radix__flush_tlb_all(void); extern void radix__flush_tlb_all(void);
extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
unsigned long address);
extern void radix__flush_tlb_lpid_page(unsigned int lpid, extern void radix__flush_tlb_lpid_page(unsigned int lpid,
unsigned long addr, unsigned long addr,
......
...@@ -210,7 +210,6 @@ static inline void cpu_feature_keys_init(void) { } ...@@ -210,7 +210,6 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0000008000000000) #define CPU_FTR_DAWR LONG_ASM_CONST(0x0000008000000000)
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0000010000000000) #define CPU_FTR_DABRX LONG_ASM_CONST(0x0000010000000000)
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x0000020000000000) #define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x0000020000000000)
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x0000040000000000)
#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000) #define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000)
#define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000) #define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000)
#define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000) #define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000)
...@@ -464,8 +463,6 @@ static inline void cpu_feature_keys_init(void) { } ...@@ -464,8 +463,6 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \ CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR) CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR)
#define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \
(~CPU_FTR_SAO))
#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9 #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1) #define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
#define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \ #define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
...@@ -489,16 +486,14 @@ static inline void cpu_feature_keys_init(void) { } ...@@ -489,16 +486,14 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTRS_POSSIBLE \ #define CPU_FTRS_POSSIBLE \
(CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \ (CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \
CPU_FTRS_POWER8_DD1 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | \ CPU_FTRS_POWER8_DD1 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | \
CPU_FTRS_POWER9 | CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1 | \ CPU_FTRS_POWER9 | CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2)
CPU_FTRS_POWER9_DD2_2)
#else #else
#define CPU_FTRS_POSSIBLE \ #define CPU_FTRS_POSSIBLE \
(CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \ (CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \
CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \ CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \
CPU_FTRS_POWER8 | CPU_FTRS_POWER8_DD1 | CPU_FTRS_CELL | \ CPU_FTRS_POWER8 | CPU_FTRS_POWER8_DD1 | CPU_FTRS_CELL | \
CPU_FTRS_PA6T | CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | \ CPU_FTRS_PA6T | CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | \
CPU_FTRS_POWER9 | CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1 | \ CPU_FTRS_POWER9 | CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2)
CPU_FTRS_POWER9_DD2_2)
#endif /* CONFIG_CPU_LITTLE_ENDIAN */ #endif /* CONFIG_CPU_LITTLE_ENDIAN */
#endif #endif
#else #else
...@@ -567,7 +562,7 @@ enum { ...@@ -567,7 +562,7 @@ enum {
#define CPU_FTRS_ALWAYS \ #define CPU_FTRS_ALWAYS \
(CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & CPU_FTRS_POWER7 & \ (CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & CPU_FTRS_POWER7 & \
CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER8_DD1 & \ CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER8_DD1 & \
CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1 & \ CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD2_1 & \
CPU_FTRS_DT_CPU_BASE) CPU_FTRS_DT_CPU_BASE)
#else #else
#define CPU_FTRS_ALWAYS \ #define CPU_FTRS_ALWAYS \
...@@ -575,7 +570,7 @@ enum { ...@@ -575,7 +570,7 @@ enum {
CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \ CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \
CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \ CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \
CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & \ CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & \
CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1 & \ CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD2_1 & \
CPU_FTRS_DT_CPU_BASE) CPU_FTRS_DT_CPU_BASE)
#endif /* CONFIG_CPU_LITTLE_ENDIAN */ #endif /* CONFIG_CPU_LITTLE_ENDIAN */
#endif #endif
......
...@@ -187,11 +187,6 @@ struct paca_struct { ...@@ -187,11 +187,6 @@ struct paca_struct {
u8 subcore_sibling_mask; u8 subcore_sibling_mask;
/* Flag to request this thread not to stop */ /* Flag to request this thread not to stop */
atomic_t dont_stop; atomic_t dont_stop;
/*
* Pointer to an array which contains pointer
* to the sibling threads' paca.
*/
struct paca_struct **thread_sibling_pacas;
/* The PSSCR value that the kernel requested before going to stop */ /* The PSSCR value that the kernel requested before going to stop */
u64 requested_psscr; u64 requested_psscr;
......
...@@ -766,7 +766,6 @@ int main(void) ...@@ -766,7 +766,6 @@ int main(void)
OFFSET(PACA_THREAD_IDLE_STATE, paca_struct, thread_idle_state); OFFSET(PACA_THREAD_IDLE_STATE, paca_struct, thread_idle_state);
OFFSET(PACA_THREAD_MASK, paca_struct, thread_mask); OFFSET(PACA_THREAD_MASK, paca_struct, thread_mask);
OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask); OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask);
OFFSET(PACA_SIBLING_PACA_PTRS, paca_struct, thread_sibling_pacas);
OFFSET(PACA_REQ_PSSCR, paca_struct, requested_psscr); OFFSET(PACA_REQ_PSSCR, paca_struct, requested_psscr);
OFFSET(PACA_DONT_STOP, paca_struct, dont_stop); OFFSET(PACA_DONT_STOP, paca_struct, dont_stop);
#define STOP_SPR(x, f) OFFSET(x, paca_struct, stop_sprs.f) #define STOP_SPR(x, f) OFFSET(x, paca_struct, stop_sprs.f)
......
...@@ -485,25 +485,6 @@ static struct cpu_spec __initdata cpu_specs[] = { ...@@ -485,25 +485,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8, .machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8", .platform = "power8",
}, },
{ /* Power9 DD1*/
.pvr_mask = 0xffffff00,
.pvr_value = 0x004e0100,
.cpu_name = "POWER9 (raw)",
.cpu_features = CPU_FTRS_POWER9_DD1,
.cpu_user_features = COMMON_USER_POWER9,
.cpu_user_features2 = COMMON_USER2_POWER9,
.mmu_features = MMU_FTRS_POWER9,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power9",
.oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
{ /* Power9 DD2.0 */ { /* Power9 DD2.0 */
.pvr_mask = 0xffffefff, .pvr_mask = 0xffffefff,
.pvr_value = 0x004e0200, .pvr_value = 0x004e0200,
......
...@@ -701,9 +701,7 @@ static __init void cpufeatures_cpu_quirks(void) ...@@ -701,9 +701,7 @@ static __init void cpufeatures_cpu_quirks(void)
/* /*
* Not all quirks can be derived from the cpufeatures device tree. * Not all quirks can be derived from the cpufeatures device tree.
*/ */
if ((version & 0xffffff00) == 0x004e0100) if ((version & 0xffffefff) == 0x004e0200)
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
else if ((version & 0xffffefff) == 0x004e0200)
; /* DD2.0 has no feature flag */ ; /* DD2.0 has no feature flag */
else if ((version & 0xffffefff) == 0x004e0201) else if ((version & 0xffffefff) == 0x004e0201)
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
......
...@@ -276,9 +276,7 @@ BEGIN_FTR_SECTION ...@@ -276,9 +276,7 @@ BEGIN_FTR_SECTION
* *
* This interrupt can wake directly from idle. If that is the case, * This interrupt can wake directly from idle. If that is the case,
* the machine check is handled then the idle wakeup code is called * the machine check is handled then the idle wakeup code is called
* to restore state. In that case, the POWER9 DD1 idle PACA workaround * to restore state.
* is not applied in the early machine check code, which will cause
* bugs.
*/ */
mr r11,r1 /* Save r1 */ mr r11,r1 /* Save r1 */
lhz r10,PACA_IN_MCE(r13) lhz r10,PACA_IN_MCE(r13)
......
...@@ -466,43 +466,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) ...@@ -466,43 +466,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
blr /* return 0 for wakeup cause / SRR1 value */ blr /* return 0 for wakeup cause / SRR1 value */
#endif #endif
/*
* On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1,
* HSPRG0 will be set to the HSPRG0 value of one of the
* threads in this core. Thus the value we have in r13
* may not be this thread's paca pointer.
*
* Fortunately, the TIR remains invariant. Since this thread's
* paca pointer is recorded in all its sibling's paca, we can
* correctly recover this thread's paca pointer if we
* know the index of this thread in the core.
*
* This index can be obtained from the TIR.
*
* i.e, thread's position in the core = TIR.
* If this value is i, then this thread's paca is
* paca->thread_sibling_pacas[i].
*/
power9_dd1_recover_paca:
mfspr r4, SPRN_TIR
/*
* Since each entry in thread_sibling_pacas is 8 bytes
* we need to left-shift by 3 bits. Thus r4 = i * 8
*/
sldi r4, r4, 3
/* Get &paca->thread_sibling_pacas[0] in r5 */
ld r5, PACA_SIBLING_PACA_PTRS(r13)
/* Load paca->thread_sibling_pacas[i] into r13 */
ldx r13, r4, r5
SET_PACA(r13)
/*
* Indicate that we have lost NVGPR state
* which needs to be restored from the stack.
*/
li r3, 1
stb r3,PACA_NAPSTATELOST(r13)
blr
/* /*
* Called from machine check handler for powersave wakeups. * Called from machine check handler for powersave wakeups.
* Low level machine check processing has already been done. Now just * Low level machine check processing has already been done. Now just
...@@ -537,9 +500,6 @@ pnv_powersave_wakeup: ...@@ -537,9 +500,6 @@ pnv_powersave_wakeup:
ld r2, PACATOC(r13) ld r2, PACATOC(r13)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
BEGIN_FTR_SECTION_NESTED(70)
bl power9_dd1_recover_paca
END_FTR_SECTION_NESTED_IFSET(CPU_FTR_POWER9_DD1, 70)
bl pnv_restore_hyp_resource_arch300 bl pnv_restore_hyp_resource_arch300
FTR_SECTION_ELSE FTR_SECTION_ELSE
bl pnv_restore_hyp_resource_arch207 bl pnv_restore_hyp_resource_arch207
...@@ -602,22 +562,12 @@ END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1) ...@@ -602,22 +562,12 @@ END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
BEGIN_FTR_SECTION_NESTED(71)
/*
* Assume that we are waking up from the state
* same as the Requested Level (RL) in the PSSCR
* which are Bits 60-63
*/
ld r5,PACA_REQ_PSSCR(r13)
rldicl r5,r5,0,60
FTR_SECTION_ELSE_NESTED(71)
/* /*
* 0-3 bits correspond to Power-Saving Level Status * 0-3 bits correspond to Power-Saving Level Status
* which indicates the idle state we are waking up from * which indicates the idle state we are waking up from
*/ */
mfspr r5, SPRN_PSSCR mfspr r5, SPRN_PSSCR
rldicl r5,r5,4,60 rldicl r5,r5,4,60
ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_POWER9_DD1, 71)
li r0, 0 /* clear requested_psscr to say we're awake */ li r0, 0 /* clear requested_psscr to say we're awake */
std r0, PACA_REQ_PSSCR(r13) std r0, PACA_REQ_PSSCR(r13)
cmpd cr4,r5,r4 cmpd cr4,r5,r4
......
...@@ -1250,17 +1250,9 @@ struct task_struct *__switch_to(struct task_struct *prev, ...@@ -1250,17 +1250,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
* mappings. If the new process has the foreign real address * mappings. If the new process has the foreign real address
* mappings, we must issue a cp_abort to clear any state and * mappings, we must issue a cp_abort to clear any state and
* prevent snooping, corruption or a covert channel. * prevent snooping, corruption or a covert channel.
*
* DD1 allows paste into normal system memory so we do an
* unpaired copy, rather than cp_abort, to clear the buffer,
* since cp_abort is quite expensive.
*/ */
if (current_thread_info()->task->thread.used_vas) { if (current_thread_info()->task->thread.used_vas)
asm volatile(PPC_CP_ABORT); asm volatile(PPC_CP_ABORT);
} else if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
asm volatile(PPC_COPY(%0, %1)
: : "r"(dummy_copy_buffer), "r"(0));
}
} }
#endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* CONFIG_PPC_BOOK3S_64 */
......
...@@ -66,10 +66,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -66,10 +66,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
bits = root & RPDS_MASK; bits = root & RPDS_MASK;
root = root & RPDB_MASK; root = root & RPDB_MASK;
/* P9 DD1 interprets RTS (radix tree size) differently */
offset = rts + 31; offset = rts + 31;
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
offset -= 3;
/* current implementations only support 52-bit space */ /* current implementations only support 52-bit space */
if (offset != 52) if (offset != 52)
...@@ -160,17 +157,7 @@ static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, ...@@ -160,17 +157,7 @@ static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
unsigned long clr, unsigned long set, unsigned long clr, unsigned long set,
unsigned long addr, unsigned int shift) unsigned long addr, unsigned int shift)
{ {
unsigned long old = 0; return __radix_pte_update(ptep, clr, set);
if (!(clr & _PAGE_PRESENT) && cpu_has_feature(CPU_FTR_POWER9_DD1) &&
pte_present(*ptep)) {
/* have to invalidate it first */
old = __radix_pte_update(ptep, _PAGE_PRESENT, 0);
kvmppc_radix_tlbie_page(kvm, addr, shift);
set |= _PAGE_PRESENT;
old &= _PAGE_PRESENT;
}
return __radix_pte_update(ptep, clr, set) | old;
} }
void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr, void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
......
...@@ -1693,14 +1693,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -1693,14 +1693,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break; break;
case KVM_REG_PPC_TB_OFFSET: case KVM_REG_PPC_TB_OFFSET:
/*
* POWER9 DD1 has an erratum where writing TBU40 causes
* the timebase to lose ticks. So we don't let the
* timebase offset be changed on P9 DD1. (It is
* initialized to zero.)
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
break;
/* round up to multiple of 2^24 */ /* round up to multiple of 2^24 */
vcpu->arch.vcore->tb_offset = vcpu->arch.vcore->tb_offset =
ALIGN(set_reg_val(id, *val), 1UL << 24); ALIGN(set_reg_val(id, *val), 1UL << 24);
...@@ -2026,8 +2018,6 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, ...@@ -2026,8 +2018,6 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
/* /*
* Set the default HFSCR for the guest from the host value. * Set the default HFSCR for the guest from the host value.
* This value is only used on POWER9. * This value is only used on POWER9.
* On POWER9 DD1, TM doesn't work, so we make sure to
* prevent the guest from using it.
* On POWER9, we want to virtualize the doorbell facility, so we * On POWER9, we want to virtualize the doorbell facility, so we
* turn off the HFSCR bit, which causes those instructions to trap. * turn off the HFSCR bit, which causes those instructions to trap.
*/ */
......
...@@ -916,9 +916,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR) ...@@ -916,9 +916,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR)
mtspr SPRN_BESCR, r6 mtspr SPRN_BESCR, r6
mtspr SPRN_PID, r7 mtspr SPRN_PID, r7
mtspr SPRN_WORT, r8 mtspr SPRN_WORT, r8
BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* POWER8-only registers */ /* POWER8-only registers */
ld r5, VCPU_TCSCR(r4) ld r5, VCPU_TCSCR(r4)
...@@ -1912,7 +1909,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ...@@ -1912,7 +1909,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r5, VCPU_KVM(r9) ld r5, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r5) lbz r0, KVM_RADIX(r5)
cmpwi cr2, r0, 0 cmpwi cr2, r0, 0
beq cr2, 4f beq cr2, 2f
/* /*
* Radix: do eieio; tlbsync; ptesync sequence in case we * Radix: do eieio; tlbsync; ptesync sequence in case we
...@@ -1952,11 +1949,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ...@@ -1952,11 +1949,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
bdnz 1b bdnz 1b
ptesync ptesync
2: /* Flush the ERAT on radix P9 DD1 guest exit */ 2:
BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
4:
#endif /* CONFIG_PPC_RADIX_MMU */ #endif /* CONFIG_PPC_RADIX_MMU */
/* /*
...@@ -3367,11 +3360,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) ...@@ -3367,11 +3360,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_CIABR, r0 mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX, r0 mtspr SPRN_DAWRX, r0
/* Flush the ERAT on radix P9 DD1 guest exit */
BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
BEGIN_MMU_FTR_SECTION BEGIN_MMU_FTR_SECTION
b 4f b 4f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
......
...@@ -25,18 +25,6 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) ...@@ -25,18 +25,6 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
*/ */
eieio(); eieio();
/*
* DD1 bug workaround: If PIPR is less favored than CPPR
* ignore the interrupt or we might incorrectly lose an IPB
* bit.
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
__be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
u8 pipr = be64_to_cpu(qw1) & 0xff;
if (pipr >= xc->hw_cppr)
return;
}
/* Perform the acknowledge OS to register cycle. */ /* Perform the acknowledge OS to register cycle. */
ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG)); ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
...@@ -89,8 +77,15 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd) ...@@ -89,8 +77,15 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
/* If the XIVE supports the new "store EOI facility, use it */ /* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
__x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW)
opal_int_eoi(hw_irq); opal_int_eoi(hw_irq);
else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
/*
* For LSIs the HW EOI cycle is used rather than PQ bits,
* as they are automatically re-triggred in HW when still
* pending.
*/
__x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
} else { } else {
uint64_t eoi_val; uint64_t eoi_val;
...@@ -102,21 +97,13 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd) ...@@ -102,21 +97,13 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
* *
* This allows us to then do a re-trigger if Q was set * This allows us to then do a re-trigger if Q was set
* rather than synthetizing an interrupt in software * rather than synthetizing an interrupt in software
*
* For LSIs, using the HW EOI cycle works around a problem
* on P9 DD1 PHBs where the other ESB accesses don't work
* properly.
*/ */
if (xd->flags & XIVE_IRQ_FLAG_LSI)
__x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
else {
eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
/* Re-trigger if needed */ /* Re-trigger if needed */
if ((eoi_val & 1) && __x_trig_page(xd)) if ((eoi_val & 1) && __x_trig_page(xd))
__x_writeq(0, __x_trig_page(xd)); __x_writeq(0, __x_trig_page(xd));
} }
}
} }
enum { enum {
......
...@@ -808,31 +808,6 @@ int hash__remove_section_mapping(unsigned long start, unsigned long end) ...@@ -808,31 +808,6 @@ int hash__remove_section_mapping(unsigned long start, unsigned long end)
} }
#endif /* CONFIG_MEMORY_HOTPLUG */ #endif /* CONFIG_MEMORY_HOTPLUG */
static void update_hid_for_hash(void)
{
unsigned long hid0;
unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
asm volatile("ptesync": : :"memory");
/* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(0), "i"(0), "i"(2), "r"(0) : "memory");
asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
trace_tlbie(0, 0, rb, 0, 2, 0, 0);
/*
* now switch the HID
*/
hid0 = mfspr(SPRN_HID0);
hid0 &= ~HID0_POWER9_RADIX;
mtspr(SPRN_HID0, hid0);
asm volatile("isync": : :"memory");
/* Wait for it to happen */
while ((mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
cpu_relax();
}
static void __init hash_init_partition_table(phys_addr_t hash_table, static void __init hash_init_partition_table(phys_addr_t hash_table,
unsigned long htab_size) unsigned long htab_size)
{ {
...@@ -845,8 +820,6 @@ static void __init hash_init_partition_table(phys_addr_t hash_table, ...@@ -845,8 +820,6 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
htab_size = __ilog2(htab_size) - 18; htab_size = __ilog2(htab_size) - 18;
mmu_partition_table_set_entry(0, hash_table | htab_size, 0); mmu_partition_table_set_entry(0, hash_table | htab_size, 0);
pr_info("Partition table %p\n", partition_tb); pr_info("Partition table %p\n", partition_tb);
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
update_hid_for_hash();
} }
static void __init htab_initialize(void) static void __init htab_initialize(void)
...@@ -1077,9 +1050,6 @@ void hash__early_init_mmu_secondary(void) ...@@ -1077,9 +1050,6 @@ void hash__early_init_mmu_secondary(void)
/* Initialize hash table for that CPU */ /* Initialize hash table for that CPU */
if (!firmware_has_feature(FW_FEATURE_LPAR)) { if (!firmware_has_feature(FW_FEATURE_LPAR)) {
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
update_hid_for_hash();
if (!cpu_has_feature(CPU_FTR_ARCH_300)) if (!cpu_has_feature(CPU_FTR_ARCH_300))
mtspr(SPRN_SDR1, _SDR1); mtspr(SPRN_SDR1, _SDR1);
else else
......
...@@ -620,15 +620,12 @@ static int __init add_huge_page_size(unsigned long long size) ...@@ -620,15 +620,12 @@ static int __init add_huge_page_size(unsigned long long size)
* firmware we only add hugetlb support for page sizes that can be * firmware we only add hugetlb support for page sizes that can be
* supported by linux page table layout. * supported by linux page table layout.
* For now we have * For now we have
* Radix: 2M * Radix: 2M and 1G
* Hash: 16M and 16G * Hash: 16M and 16G
*/ */
if (radix_enabled()) { if (radix_enabled()) {
if (mmu_psize != MMU_PAGE_2M) { if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G)
if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
(mmu_psize != MMU_PAGE_1G))
return -EINVAL; return -EINVAL;
}
} else { } else {
if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G) if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
return -EINVAL; return -EINVAL;
......
...@@ -273,15 +273,7 @@ void arch_exit_mmap(struct mm_struct *mm) ...@@ -273,15 +273,7 @@ void arch_exit_mmap(struct mm_struct *mm)
#ifdef CONFIG_PPC_RADIX_MMU #ifdef CONFIG_PPC_RADIX_MMU
void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
{ {
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
isync();
mtspr(SPRN_PID, next->context.id); mtspr(SPRN_PID, next->context.id);
isync(); isync();
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
} else {
mtspr(SPRN_PID, next->context.id);
isync();
}
} }
#endif #endif
...@@ -226,16 +226,6 @@ void radix__mark_rodata_ro(void) ...@@ -226,16 +226,6 @@ void radix__mark_rodata_ro(void)
{ {
unsigned long start, end; unsigned long start, end;
/*
* mark_rodata_ro() will mark itself as !writable at some point.
* Due to DD1 workaround in radix__pte_update(), we'll end up with
* an invalid pte and the system will crash quite severly.
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n");
return;
}
start = (unsigned long)_stext; start = (unsigned long)_stext;
end = (unsigned long)__init_begin; end = (unsigned long)__init_begin;
...@@ -533,35 +523,6 @@ void __init radix__early_init_devtree(void) ...@@ -533,35 +523,6 @@ void __init radix__early_init_devtree(void)
return; return;
} }
static void update_hid_for_radix(void)
{
unsigned long hid0;
unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
asm volatile("ptesync": : :"memory");
/* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
/* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
trace_tlbie(0, 0, rb, 0, 2, 0, 1);
trace_tlbie(0, 0, rb, 0, 2, 1, 1);
/*
* now switch the HID
*/
hid0 = mfspr(SPRN_HID0);
hid0 |= HID0_POWER9_RADIX;
mtspr(SPRN_HID0, hid0);
asm volatile("isync": : :"memory");
/* Wait for it to happen */
while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
cpu_relax();
}
static void radix_init_amor(void) static void radix_init_amor(void)
{ {
/* /*
...@@ -576,22 +537,12 @@ static void radix_init_amor(void) ...@@ -576,22 +537,12 @@ static void radix_init_amor(void)
static void radix_init_iamr(void) static void radix_init_iamr(void)
{ {
unsigned long iamr;
/*
* The IAMR should set to 0 on DD1.
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
iamr = 0;
else
iamr = (1ul << 62);
/* /*
* Radix always uses key0 of the IAMR to determine if an access is * Radix always uses key0 of the IAMR to determine if an access is
* allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
* fetch. * fetch.
*/ */
mtspr(SPRN_IAMR, iamr); mtspr(SPRN_IAMR, (1ul << 62));
} }
void __init radix__early_init_mmu(void) void __init radix__early_init_mmu(void)
...@@ -644,8 +595,6 @@ void __init radix__early_init_mmu(void) ...@@ -644,8 +595,6 @@ void __init radix__early_init_mmu(void)
if (!firmware_has_feature(FW_FEATURE_LPAR)) { if (!firmware_has_feature(FW_FEATURE_LPAR)) {
radix_init_native(); radix_init_native();
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
update_hid_for_radix();
lpcr = mfspr(SPRN_LPCR); lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
radix_init_partition_table(); radix_init_partition_table();
...@@ -671,10 +620,6 @@ void radix__early_init_mmu_secondary(void) ...@@ -671,10 +620,6 @@ void radix__early_init_mmu_secondary(void)
* update partition table control register and UPRT * update partition table control register and UPRT
*/ */
if (!firmware_has_feature(FW_FEATURE_LPAR)) { if (!firmware_has_feature(FW_FEATURE_LPAR)) {
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
update_hid_for_radix();
lpcr = mfspr(SPRN_LPCR); lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
...@@ -1095,8 +1040,7 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, ...@@ -1095,8 +1040,7 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
* To avoid NMMU hang while relaxing access, we need mark * To avoid NMMU hang while relaxing access, we need mark
* the pte invalid in between. * the pte invalid in between.
*/ */
if (cpu_has_feature(CPU_FTR_POWER9_DD1) || if (atomic_read(&mm->context.copros) > 0) {
atomic_read(&mm->context.copros) > 0) {
unsigned long old_pte, new_pte; unsigned long old_pte, new_pte;
old_pte = __radix_pte_update(ptep, ~0, 0); old_pte = __radix_pte_update(ptep, ~0, 0);
......
...@@ -994,24 +994,6 @@ void radix__flush_tlb_all(void) ...@@ -994,24 +994,6 @@ void radix__flush_tlb_all(void)
asm volatile("eieio; tlbsync; ptesync": : :"memory"); asm volatile("eieio; tlbsync; ptesync": : :"memory");
} }
void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
unsigned long address)
{
/*
* We track page size in pte only for DD1, So we can
* call this only on DD1.
*/
if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
VM_WARN_ON(1);
return;
}
if (old_pte & R_PAGE_LARGE)
radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
else
radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
}
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
extern void radix_kvm_prefetch_workaround(struct mm_struct *mm) extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
{ {
......
...@@ -128,10 +128,6 @@ static inline void power_pmu_bhrb_disable(struct perf_event *event) {} ...@@ -128,10 +128,6 @@ static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {} static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { } static void pmao_restore_workaround(bool ebb) { }
static bool use_ic(u64 event)
{
return false;
}
#endif /* CONFIG_PPC32 */ #endif /* CONFIG_PPC32 */
static bool regs_use_siar(struct pt_regs *regs) static bool regs_use_siar(struct pt_regs *regs)
...@@ -714,14 +710,6 @@ static void pmao_restore_workaround(bool ebb) ...@@ -714,14 +710,6 @@ static void pmao_restore_workaround(bool ebb)
mtspr(SPRN_PMC6, pmcs[5]); mtspr(SPRN_PMC6, pmcs[5]);
} }
static bool use_ic(u64 event)
{
if (cpu_has_feature(CPU_FTR_POWER9_DD1) &&
(event == 0x200f2 || event == 0x300f2))
return true;
return false;
}
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs); static void perf_event_interrupt(struct pt_regs *regs);
...@@ -1046,7 +1034,6 @@ static u64 check_and_compute_delta(u64 prev, u64 val) ...@@ -1046,7 +1034,6 @@ static u64 check_and_compute_delta(u64 prev, u64 val)
static void power_pmu_read(struct perf_event *event) static void power_pmu_read(struct perf_event *event)
{ {
s64 val, delta, prev; s64 val, delta, prev;
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
if (event->hw.state & PERF_HES_STOPPED) if (event->hw.state & PERF_HES_STOPPED)
return; return;
...@@ -1056,13 +1043,6 @@ static void power_pmu_read(struct perf_event *event) ...@@ -1056,13 +1043,6 @@ static void power_pmu_read(struct perf_event *event)
if (is_ebb_event(event)) { if (is_ebb_event(event)) {
val = read_pmc(event->hw.idx); val = read_pmc(event->hw.idx);
if (use_ic(event->attr.config)) {
val = mfspr(SPRN_IC);
if (val > cpuhw->ic_init)
val = val - cpuhw->ic_init;
else
val = val + (0 - cpuhw->ic_init);
}
local64_set(&event->hw.prev_count, val); local64_set(&event->hw.prev_count, val);
return; return;
} }
...@@ -1076,13 +1056,6 @@ static void power_pmu_read(struct perf_event *event) ...@@ -1076,13 +1056,6 @@ static void power_pmu_read(struct perf_event *event)
prev = local64_read(&event->hw.prev_count); prev = local64_read(&event->hw.prev_count);
barrier(); barrier();
val = read_pmc(event->hw.idx); val = read_pmc(event->hw.idx);
if (use_ic(event->attr.config)) {
val = mfspr(SPRN_IC);
if (val > cpuhw->ic_init)
val = val - cpuhw->ic_init;
else
val = val + (0 - cpuhw->ic_init);
}
delta = check_and_compute_delta(prev, val); delta = check_and_compute_delta(prev, val);
if (!delta) if (!delta)
return; return;
...@@ -1535,13 +1508,6 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) ...@@ -1535,13 +1508,6 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
event->attr.branch_sample_type); event->attr.branch_sample_type);
} }
/*
* Workaround for POWER9 DD1 to use the Instruction Counter
* register value for instruction counting
*/
if (use_ic(event->attr.config))
cpuhw->ic_init = mfspr(SPRN_IC);
perf_pmu_enable(event->pmu); perf_pmu_enable(event->pmu);
local_irq_restore(flags); local_irq_restore(flags);
return ret; return ret;
......
...@@ -59,7 +59,7 @@ static bool is_event_valid(u64 event) ...@@ -59,7 +59,7 @@ static bool is_event_valid(u64 event)
{ {
u64 valid_mask = EVENT_VALID_MASK; u64 valid_mask = EVENT_VALID_MASK;
if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1)) if (cpu_has_feature(CPU_FTR_ARCH_300))
valid_mask = p9_EVENT_VALID_MASK; valid_mask = p9_EVENT_VALID_MASK;
return !(event & ~valid_mask); return !(event & ~valid_mask);
...@@ -86,8 +86,6 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) ...@@ -86,8 +86,6 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
* Incase of Power9: * Incase of Power9:
* Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'), * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
* or if group already have any marked events. * or if group already have any marked events.
* Non-Marked events (for DD1):
* MMCRA[SDAR_MODE] will be set to 0b01
* For rest * For rest
* MMCRA[SDAR_MODE] will be set from event code. * MMCRA[SDAR_MODE] will be set from event code.
* If sdar_mode from event is zero, default to 0b01. Hardware * If sdar_mode from event is zero, default to 0b01. Hardware
...@@ -96,7 +94,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) ...@@ -96,7 +94,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE)) if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
*mmcra &= MMCRA_SDAR_MODE_NO_UPDATES; *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
else if (!cpu_has_feature(CPU_FTR_POWER9_DD1) && p9_SDAR_MODE(event)) else if (p9_SDAR_MODE(event))
*mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT; *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
else else
*mmcra |= MMCRA_SDAR_MODE_DCACHE; *mmcra |= MMCRA_SDAR_MODE_DCACHE;
...@@ -106,7 +104,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) ...@@ -106,7 +104,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
static u64 thresh_cmp_val(u64 value) static u64 thresh_cmp_val(u64 value)
{ {
if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1)) if (cpu_has_feature(CPU_FTR_ARCH_300))
return value << p9_MMCRA_THR_CMP_SHIFT; return value << p9_MMCRA_THR_CMP_SHIFT;
return value << MMCRA_THR_CMP_SHIFT; return value << MMCRA_THR_CMP_SHIFT;
...@@ -114,7 +112,7 @@ static u64 thresh_cmp_val(u64 value) ...@@ -114,7 +112,7 @@ static u64 thresh_cmp_val(u64 value)
static unsigned long combine_from_event(u64 event) static unsigned long combine_from_event(u64 event)
{ {
if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1)) if (cpu_has_feature(CPU_FTR_ARCH_300))
return p9_EVENT_COMBINE(event); return p9_EVENT_COMBINE(event);
return EVENT_COMBINE(event); return EVENT_COMBINE(event);
...@@ -122,7 +120,7 @@ static unsigned long combine_from_event(u64 event) ...@@ -122,7 +120,7 @@ static unsigned long combine_from_event(u64 event)
static unsigned long combine_shift(unsigned long pmc) static unsigned long combine_shift(unsigned long pmc)
{ {
if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1)) if (cpu_has_feature(CPU_FTR_ARCH_300))
return p9_MMCR1_COMBINE_SHIFT(pmc); return p9_MMCR1_COMBINE_SHIFT(pmc);
return MMCR1_COMBINE_SHIFT(pmc); return MMCR1_COMBINE_SHIFT(pmc);
......
...@@ -158,11 +158,6 @@ ...@@ -158,11 +158,6 @@
CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \ CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
/*
* Lets restrict use of PMC5 for instruction counting.
*/
#define P9_DD1_TEST_ADDER (ISA207_TEST_ADDER | CNST_PMC_VAL(5))
/* Bits in MMCR1 for PowerISA v2.07 */ /* Bits in MMCR1 for PowerISA v2.07 */
#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1))) #define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1)) #define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
......
...@@ -219,12 +219,6 @@ static struct attribute_group power9_pmu_events_group = { ...@@ -219,12 +219,6 @@ static struct attribute_group power9_pmu_events_group = {
.attrs = power9_events_attr, .attrs = power9_events_attr,
}; };
static const struct attribute_group *power9_isa207_pmu_attr_groups[] = {
&isa207_pmu_format_group,
&power9_pmu_events_group,
NULL,
};
PMU_FORMAT_ATTR(event, "config:0-51"); PMU_FORMAT_ATTR(event, "config:0-51");
PMU_FORMAT_ATTR(pmcxsel, "config:0-7"); PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
PMU_FORMAT_ATTR(mark, "config:8"); PMU_FORMAT_ATTR(mark, "config:8");
...@@ -267,17 +261,6 @@ static const struct attribute_group *power9_pmu_attr_groups[] = { ...@@ -267,17 +261,6 @@ static const struct attribute_group *power9_pmu_attr_groups[] = {
NULL, NULL,
}; };
static int power9_generic_events_dd1[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_DISP,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL_ALT,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
[PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
[PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
};
static int power9_generic_events[] = { static int power9_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC, [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
...@@ -439,25 +422,6 @@ static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { ...@@ -439,25 +422,6 @@ static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
#undef C #undef C
static struct power_pmu power9_isa207_pmu = {
.name = "POWER9",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
.test_adder = P9_DD1_TEST_ADDER,
.compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power9_config_bhrb,
.bhrb_filter_map = power9_bhrb_filter_map,
.get_constraint = isa207_get_constraint,
.get_alternatives = power9_get_alternatives,
.disable_pmc = isa207_disable_pmc,
.flags = PPMU_NO_SIAR | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power9_generic_events_dd1),
.generic_events = power9_generic_events_dd1,
.cache_events = &power9_cache_events,
.attr_groups = power9_isa207_pmu_attr_groups,
.bhrb_nr = 32,
};
static struct power_pmu power9_pmu = { static struct power_pmu power9_pmu = {
.name = "POWER9", .name = "POWER9",
.n_counter = MAX_PMU_COUNTERS, .n_counter = MAX_PMU_COUNTERS,
...@@ -500,23 +464,7 @@ static int __init init_power9_pmu(void) ...@@ -500,23 +464,7 @@ static int __init init_power9_pmu(void)
} }
} }
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
/*
* Since PM_INST_CMPL may not provide right counts in all
* sampling scenarios in power9 DD1, instead use PM_INST_DISP.
*/
EVENT_VAR(PM_INST_CMPL, _g).id = PM_INST_DISP;
/*
* Power9 DD1 should use PM_BR_CMPL_ALT event code for
* "branches" to provide correct counter value.
*/
EVENT_VAR(PM_BR_CMPL, _g).id = PM_BR_CMPL_ALT;
EVENT_VAR(PM_BR_CMPL, _c).id = PM_BR_CMPL_ALT;
rc = register_power_pmu(&power9_isa207_pmu);
} else {
rc = register_power_pmu(&power9_pmu); rc = register_power_pmu(&power9_pmu);
}
if (rc) if (rc)
return rc; return rc;
......
...@@ -177,11 +177,6 @@ static void pnv_alloc_idle_core_states(void) ...@@ -177,11 +177,6 @@ static void pnv_alloc_idle_core_states(void)
paca_ptrs[cpu]->core_idle_state_ptr = core_idle_state; paca_ptrs[cpu]->core_idle_state_ptr = core_idle_state;
paca_ptrs[cpu]->thread_idle_state = PNV_THREAD_RUNNING; paca_ptrs[cpu]->thread_idle_state = PNV_THREAD_RUNNING;
paca_ptrs[cpu]->thread_mask = 1 << j; paca_ptrs[cpu]->thread_mask = 1 << j;
if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
continue;
paca_ptrs[cpu]->thread_sibling_pacas =
kmalloc_node(paca_ptr_array_size,
GFP_KERNEL, node);
} }
} }
...@@ -805,29 +800,6 @@ static int __init pnv_init_idle_states(void) ...@@ -805,29 +800,6 @@ static int __init pnv_init_idle_states(void)
pnv_alloc_idle_core_states(); pnv_alloc_idle_core_states();
/*
* For each CPU, record its PACA address in each of it's
* sibling thread's PACA at the slot corresponding to this
* CPU's index in the core.
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
int cpu;
pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n");
for_each_present_cpu(cpu) {
int base_cpu = cpu_first_thread_sibling(cpu);
int idx = cpu_thread_in_core(cpu);
int i;
for (i = 0; i < threads_per_core; i++) {
int j = base_cpu + i;
paca_ptrs[j]->thread_sibling_pacas[idx] =
paca_ptrs[cpu];
}
}
}
if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED) if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED)
ppc_md.power_save = power7_idle; ppc_md.power_save = power7_idle;
......
...@@ -283,23 +283,6 @@ static void pnv_cause_ipi(int cpu) ...@@ -283,23 +283,6 @@ static void pnv_cause_ipi(int cpu)
ic_cause_ipi(cpu); ic_cause_ipi(cpu);
} }
static void pnv_p9_dd1_cause_ipi(int cpu)
{
int this_cpu = get_cpu();
/*
* POWER9 DD1 has a global addressed msgsnd, but for now we restrict
* IPIs to same core, because it requires additional synchronization
* for inter-core doorbells which we do not implement.
*/
if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu)))
doorbell_global_ipi(cpu);
else
ic_cause_ipi(cpu);
put_cpu();
}
static void __init pnv_smp_probe(void) static void __init pnv_smp_probe(void)
{ {
if (xive_enabled()) if (xive_enabled())
...@@ -311,15 +294,11 @@ static void __init pnv_smp_probe(void) ...@@ -311,15 +294,11 @@ static void __init pnv_smp_probe(void)
ic_cause_ipi = smp_ops->cause_ipi; ic_cause_ipi = smp_ops->cause_ipi;
WARN_ON(!ic_cause_ipi); WARN_ON(!ic_cause_ipi);
if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (cpu_has_feature(CPU_FTR_ARCH_300))
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
smp_ops->cause_ipi = pnv_p9_dd1_cause_ipi;
else
smp_ops->cause_ipi = doorbell_global_ipi; smp_ops->cause_ipi = doorbell_global_ipi;
} else { else
smp_ops->cause_ipi = pnv_cause_ipi; smp_ops->cause_ipi = pnv_cause_ipi;
} }
}
} }
static int pnv_system_reset_exception(struct pt_regs *regs) static int pnv_system_reset_exception(struct pt_regs *regs)
......
...@@ -319,7 +319,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) ...@@ -319,7 +319,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
* The FW told us to call it. This happens for some * The FW told us to call it. This happens for some
* interrupt sources that need additional HW whacking * interrupt sources that need additional HW whacking
* beyond the ESB manipulation. For example LPC interrupts * beyond the ESB manipulation. For example LPC interrupts
* on P9 DD1.0 need a latch to be clared in the LPC bridge * on P9 DD1.0 needed a latch to be clared in the LPC bridge
* itself. The Firmware will take care of it. * itself. The Firmware will take care of it.
*/ */
if (WARN_ON_ONCE(!xive_ops->eoi)) if (WARN_ON_ONCE(!xive_ops->eoi))
...@@ -337,9 +337,9 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) ...@@ -337,9 +337,9 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
* This allows us to then do a re-trigger if Q was set * This allows us to then do a re-trigger if Q was set
* rather than synthesizing an interrupt in software * rather than synthesizing an interrupt in software
* *
* For LSIs, using the HW EOI cycle works around a problem * For LSIs the HW EOI cycle is used rather than PQ bits,
* on P9 DD1 PHBs where the other ESB accesses don't work * as they are automatically re-triggred in HW when still
* properly. * pending.
*/ */
if (xd->flags & XIVE_IRQ_FLAG_LSI) if (xd->flags & XIVE_IRQ_FLAG_LSI)
xive_esb_read(xd, XIVE_ESB_LOAD_EOI); xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
......
...@@ -2429,7 +2429,6 @@ static void dump_one_paca(int cpu) ...@@ -2429,7 +2429,6 @@ static void dump_one_paca(int cpu)
DUMP(p, thread_idle_state, "%#-*x"); DUMP(p, thread_idle_state, "%#-*x");
DUMP(p, thread_mask, "%#-*x"); DUMP(p, thread_mask, "%#-*x");
DUMP(p, subcore_sibling_mask, "%#-*x"); DUMP(p, subcore_sibling_mask, "%#-*x");
DUMP(p, thread_sibling_pacas, "%-*px");
DUMP(p, requested_psscr, "%#-*llx"); DUMP(p, requested_psscr, "%#-*llx");
DUMP(p, stop_sprs.pid, "%#-*llx"); DUMP(p, stop_sprs.pid, "%#-*llx");
DUMP(p, stop_sprs.ldbar, "%#-*llx"); DUMP(p, stop_sprs.ldbar, "%#-*llx");
......
...@@ -865,14 +865,6 @@ static inline bool cxl_is_power9(void) ...@@ -865,14 +865,6 @@ static inline bool cxl_is_power9(void)
return false; return false;
} }
static inline bool cxl_is_power9_dd1(void)
{
if ((pvr_version_is(PVR_POWER9)) &&
cpu_has_feature(CPU_FTR_POWER9_DD1))
return true;
return false;
}
ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
loff_t off, size_t count); loff_t off, size_t count);
......
...@@ -102,10 +102,6 @@ int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg) ...@@ -102,10 +102,6 @@ int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg)
rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &cfg->dsnctl); rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &cfg->dsnctl);
if (rc) if (rc)
return rc; return rc;
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
/* workaround for DD1 - nbwind = capiind */
cfg->dsnctl |= ((u64)0x02 << (63-47));
}
cfg->version = CXL_XSL_CONFIG_CURRENT_VERSION; cfg->version = CXL_XSL_CONFIG_CURRENT_VERSION;
cfg->log_bar_size = CXL_CAPI_WINDOW_LOG_SIZE; cfg->log_bar_size = CXL_CAPI_WINDOW_LOG_SIZE;
......
...@@ -465,7 +465,6 @@ int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg) ...@@ -465,7 +465,6 @@ int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg)
/* nMMU_ID Defaults to: b’000001001’*/ /* nMMU_ID Defaults to: b’000001001’*/
xsl_dsnctl |= ((u64)0x09 << (63-28)); xsl_dsnctl |= ((u64)0x09 << (63-28));
if (!(cxl_is_power9_dd1())) {
/* /*
* Used to identify CAPI packets which should be sorted into * Used to identify CAPI packets which should be sorted into
* the Non-Blocking queues by the PHB. This field should match * the Non-Blocking queues by the PHB. This field should match
...@@ -481,7 +480,6 @@ int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg) ...@@ -481,7 +480,6 @@ int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg)
* Not supported on P9 DD1. * Not supported on P9 DD1.
*/ */
xsl_dsnctl |= asnind; xsl_dsnctl |= asnind;
}
*reg = xsl_dsnctl; *reg = xsl_dsnctl;
return 0; return 0;
...@@ -539,15 +537,8 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, ...@@ -539,15 +537,8 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
/* Snoop machines */ /* Snoop machines */
cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL); cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL);
if (cxl_is_power9_dd1()) {
/* Disabling deadlock counter CAR */
cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0020000000000001ULL);
/* Enable NORST */
cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x8000000000000000ULL);
} else {
/* Enable NORST and DD2 features */ /* Enable NORST and DD2 features */
cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL); cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL);
}
/* /*
* Check if PSL has data-cache. We need to flush adapter datacache * Check if PSL has data-cache. We need to flush adapter datacache
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment