Commit 1b948d6c authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/mm,tlb: optimize TLB flushing for zEC12

The zEC12 machines introduced the local-clearing control for the IDTE
and IPTE instruction. If the control is set only the TLB of the local
CPU is cleared of entries, either all entries of a single address space
for IDTE, or the entry for a single page-table entry for IPTE.
Without the local-clearing control the TLB flush is broadcasted to all
CPUs in the configuration, which is expensive.

The reset of the bit mask of the CPUs that need flushing after a
non-local IDTE is tricky. As TLB entries for an address space remain
in the TLB even if the address space is detached a new bit field is
required to keep track of attached CPUs vs. CPUs in the need of a
flush. After a non-local flush with IDTE the bit-field of attached CPUs
is copied to the bit-field of CPUs in need of a flush. The ordering
of operations on cpu_attach_mask, attach_count and mm_cpumask(mm) is
such that an underindication in mm_cpumask(mm) is prevented but an
overindication in mm_cpumask(mm) is possible.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 02a8f3ab
#ifndef __MMU_H #ifndef __MMU_H
#define __MMU_H #define __MMU_H
#include <linux/cpumask.h>
#include <linux/errno.h> #include <linux/errno.h>
typedef struct { typedef struct {
cpumask_t cpu_attach_mask;
atomic_t attach_count; atomic_t attach_count;
unsigned int flush_mm; unsigned int flush_mm;
spinlock_t list_lock; spinlock_t list_lock;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
static inline int init_new_context(struct task_struct *tsk, static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)
{ {
cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.attach_count, 0); atomic_set(&mm->context.attach_count, 0);
mm->context.flush_mm = 0; mm->context.flush_mm = 0;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
...@@ -59,6 +60,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -59,6 +60,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (prev == next) if (prev == next)
return; return;
if (MACHINE_HAS_TLB_LC)
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
if (atomic_inc_return(&next->context.attach_count) >> 16) { if (atomic_inc_return(&next->context.attach_count) >> 16) {
/* Delay update_user_asce until all TLB flushes are done. */ /* Delay update_user_asce until all TLB flushes are done. */
set_tsk_thread_flag(tsk, TIF_TLB_WAIT); set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
...@@ -73,6 +76,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -73,6 +76,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
} }
atomic_dec(&prev->context.attach_count); atomic_dec(&prev->context.attach_count);
WARN_ON(atomic_read(&prev->context.attach_count) < 0); WARN_ON(atomic_read(&prev->context.attach_count) < 0);
if (MACHINE_HAS_TLB_LC)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
} }
#define finish_arch_post_lock_switch finish_arch_post_lock_switch #define finish_arch_post_lock_switch finish_arch_post_lock_switch
......
...@@ -1068,12 +1068,35 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) ...@@ -1068,12 +1068,35 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
} }
static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
#ifndef CONFIG_64BIT
/* pto in ESA mode must point to the start of the segment table */
pto &= 0x7ffffc00;
#endif
/* Invalidation + local TLB flush for the pte */
asm volatile(
" .insn rrf,0xb2210000,%2,%3,0,1"
: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
}
static inline void ptep_flush_direct(struct mm_struct *mm, static inline void ptep_flush_direct(struct mm_struct *mm,
unsigned long address, pte_t *ptep) unsigned long address, pte_t *ptep)
{ {
int active, count;
if (pte_val(*ptep) & _PAGE_INVALID) if (pte_val(*ptep) & _PAGE_INVALID)
return; return;
active = (mm == current->active_mm) ? 1 : 0;
count = atomic_add_return(0x10000, &mm->context.attach_count);
if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
__ptep_ipte_local(address, ptep);
else
__ptep_ipte(address, ptep); __ptep_ipte(address, ptep);
atomic_sub(0x10000, &mm->context.attach_count);
} }
static inline void ptep_flush_lazy(struct mm_struct *mm, static inline void ptep_flush_lazy(struct mm_struct *mm,
...@@ -1382,35 +1405,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) ...@@ -1382,35 +1405,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
{
unsigned long sto = (unsigned long) pmdp -
pmd_index(address) * sizeof(pmd_t);
if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
asm volatile(
" .insn rrf,0xb98e0000,%2,%3,0,0"
: "=m" (*pmdp)
: "m" (*pmdp), "a" (sto),
"a" ((address & HPAGE_MASK))
: "cc"
);
}
}
static inline void __pmd_csp(pmd_t *pmdp)
{
register unsigned long reg2 asm("2") = pmd_val(*pmdp);
register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
_SEGMENT_ENTRY_INVALID;
register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
asm volatile(
" csp %1,%3"
: "=m" (*pmdp)
: "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
}
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{ {
...@@ -1479,18 +1473,80 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) ...@@ -1479,18 +1473,80 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
static inline void __pmdp_csp(pmd_t *pmdp)
{
register unsigned long reg2 asm("2") = pmd_val(*pmdp);
register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
_SEGMENT_ENTRY_INVALID;
register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
asm volatile(
" csp %1,%3"
: "=m" (*pmdp)
: "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
}
static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
{
unsigned long sto;
sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
asm volatile(
" .insn rrf,0xb98e0000,%2,%3,0,0"
: "=m" (*pmdp)
: "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
: "cc" );
}
static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
{
unsigned long sto;
sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
asm volatile(
" .insn rrf,0xb98e0000,%2,%3,0,1"
: "=m" (*pmdp)
: "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
: "cc" );
}
static inline void pmdp_flush_direct(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
int active, count;
if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
return;
if (!MACHINE_HAS_IDTE) {
__pmdp_csp(pmdp);
return;
}
active = (mm == current->active_mm) ? 1 : 0;
count = atomic_add_return(0x10000, &mm->context.attach_count);
if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
__pmdp_idte_local(address, pmdp);
else
__pmdp_idte(address, pmdp);
atomic_sub(0x10000, &mm->context.attach_count);
}
static inline void pmdp_flush_lazy(struct mm_struct *mm, static inline void pmdp_flush_lazy(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp) unsigned long address, pmd_t *pmdp)
{ {
int active, count; int active, count;
if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
return;
active = (mm == current->active_mm) ? 1 : 0; active = (mm == current->active_mm) ? 1 : 0;
count = atomic_add_return(0x10000, &mm->context.attach_count); count = atomic_add_return(0x10000, &mm->context.attach_count);
if ((count & 0xffff) <= active) { if ((count & 0xffff) <= active) {
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
mm->context.flush_mm = 1; mm->context.flush_mm = 1;
} else } else if (MACHINE_HAS_IDTE)
__pmd_idte(address, pmdp); __pmdp_idte(address, pmdp);
else
__pmdp_csp(pmdp);
atomic_sub(0x10000, &mm->context.attach_count); atomic_sub(0x10000, &mm->context.attach_count);
} }
...@@ -1543,7 +1599,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, ...@@ -1543,7 +1599,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
pmd_t pmd; pmd_t pmd;
pmd = *pmdp; pmd = *pmdp;
__pmd_idte(address, pmdp); pmdp_flush_direct(vma->vm_mm, address, pmdp);
*pmdp = pmd_mkold(pmd); *pmdp = pmd_mkold(pmd);
return pmd_young(pmd); return pmd_young(pmd);
} }
...@@ -1554,7 +1610,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, ...@@ -1554,7 +1610,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
{ {
pmd_t pmd = *pmdp; pmd_t pmd = *pmdp;
__pmd_idte(address, pmdp); pmdp_flush_direct(mm, address, pmdp);
pmd_clear(pmdp); pmd_clear(pmdp);
return pmd; return pmd;
} }
...@@ -1570,7 +1626,7 @@ static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, ...@@ -1570,7 +1626,7 @@ static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
static inline void pmdp_invalidate(struct vm_area_struct *vma, static inline void pmdp_invalidate(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp) unsigned long address, pmd_t *pmdp)
{ {
__pmd_idte(address, pmdp); pmdp_flush_direct(vma->vm_mm, address, pmdp);
} }
#define __HAVE_ARCH_PMDP_SET_WRPROTECT #define __HAVE_ARCH_PMDP_SET_WRPROTECT
...@@ -1580,7 +1636,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, ...@@ -1580,7 +1636,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
pmd_t pmd = *pmdp; pmd_t pmd = *pmdp;
if (pmd_write(pmd)) { if (pmd_write(pmd)) {
__pmd_idte(address, pmdp); pmdp_flush_direct(mm, address, pmdp);
set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
} }
} }
......
...@@ -68,6 +68,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, ...@@ -68,6 +68,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
#define MACHINE_FLAG_TOPOLOGY (1UL << 14) #define MACHINE_FLAG_TOPOLOGY (1UL << 14)
#define MACHINE_FLAG_TE (1UL << 15) #define MACHINE_FLAG_TE (1UL << 15)
#define MACHINE_FLAG_RRBM (1UL << 16) #define MACHINE_FLAG_RRBM (1UL << 16)
#define MACHINE_FLAG_TLB_LC (1UL << 17)
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
...@@ -90,6 +91,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, ...@@ -90,6 +91,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
#define MACHINE_HAS_TOPOLOGY (0) #define MACHINE_HAS_TOPOLOGY (0)
#define MACHINE_HAS_TE (0) #define MACHINE_HAS_TE (0)
#define MACHINE_HAS_RRBM (0) #define MACHINE_HAS_RRBM (0)
#define MACHINE_HAS_TLB_LC (0)
#else /* CONFIG_64BIT */ #else /* CONFIG_64BIT */
#define MACHINE_HAS_IEEE (1) #define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1) #define MACHINE_HAS_CSP (1)
...@@ -102,6 +104,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, ...@@ -102,6 +104,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) #define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
/* /*
......
...@@ -7,19 +7,41 @@ ...@@ -7,19 +7,41 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
/* /*
* Flush all tlb entries on the local cpu. * Flush all TLB entries on the local CPU.
*/ */
static inline void __tlb_flush_local(void) static inline void __tlb_flush_local(void)
{ {
asm volatile("ptlb" : : : "memory"); asm volatile("ptlb" : : : "memory");
} }
#ifdef CONFIG_SMP
/* /*
* Flush all tlb entries on all cpus. * Flush TLB entries for a specific ASCE on all CPUs
*/ */
static inline void __tlb_flush_idte(unsigned long asce)
{
/* Global TLB flush for the mm */
asm volatile(
" .insn rrf,0xb98e0000,0,%0,%1,0"
: : "a" (2048), "a" (asce) : "cc");
}
/*
* Flush TLB entries for a specific ASCE on the local CPU
*/
static inline void __tlb_flush_idte_local(unsigned long asce)
{
/* Local TLB flush for the mm */
asm volatile(
" .insn rrf,0xb98e0000,0,%0,%1,1"
: : "a" (2048), "a" (asce) : "cc");
}
#ifdef CONFIG_SMP
void smp_ptlb_all(void); void smp_ptlb_all(void);
/*
* Flush all TLB entries on all CPUs.
*/
static inline void __tlb_flush_global(void) static inline void __tlb_flush_global(void)
{ {
register unsigned long reg2 asm("2"); register unsigned long reg2 asm("2");
...@@ -42,36 +64,89 @@ static inline void __tlb_flush_global(void) ...@@ -42,36 +64,89 @@ static inline void __tlb_flush_global(void)
: : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
} }
/*
* Flush TLB entries for a specific mm on all CPUs (in case gmap is used
* this implicates multiple ASCEs!).
*/
static inline void __tlb_flush_full(struct mm_struct *mm) static inline void __tlb_flush_full(struct mm_struct *mm)
{ {
cpumask_t local_cpumask;
preempt_disable(); preempt_disable();
/* atomic_add(0x10000, &mm->context.attach_count);
* If the process only ran on the local cpu, do a local flush. if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
*/ /* Local TLB flush */
cpumask_copy(&local_cpumask, cpumask_of(smp_processor_id()));
if (cpumask_equal(mm_cpumask(mm), &local_cpumask))
__tlb_flush_local(); __tlb_flush_local();
} else {
/* Global TLB flush */
__tlb_flush_global();
/* Reset TLB flush mask */
if (MACHINE_HAS_TLB_LC)
cpumask_copy(mm_cpumask(mm),
&mm->context.cpu_attach_mask);
}
atomic_sub(0x10000, &mm->context.attach_count);
preempt_enable();
}
/*
* Flush TLB entries for a specific ASCE on all CPUs.
*/
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
{
int active, count;
preempt_disable();
active = (mm == current->active_mm) ? 1 : 0;
count = atomic_add_return(0x10000, &mm->context.attach_count);
if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
__tlb_flush_idte_local(asce);
} else {
if (MACHINE_HAS_IDTE)
__tlb_flush_idte(asce);
else else
__tlb_flush_global(); __tlb_flush_global();
/* Reset TLB flush mask */
if (MACHINE_HAS_TLB_LC)
cpumask_copy(mm_cpumask(mm),
&mm->context.cpu_attach_mask);
}
atomic_sub(0x10000, &mm->context.attach_count);
preempt_enable(); preempt_enable();
} }
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_IDTE)
__tlb_flush_idte((unsigned long) init_mm.pgd |
init_mm.context.asce_bits);
else
__tlb_flush_global();
}
#else #else
#define __tlb_flush_full(mm) __tlb_flush_local()
#define __tlb_flush_global() __tlb_flush_local() #define __tlb_flush_global() __tlb_flush_local()
#endif #define __tlb_flush_full(mm) __tlb_flush_local()
/* /*
* Flush all tlb entries of a page table on all cpus. * Flush TLB entries for a specific ASCE on all CPUs.
*/ */
static inline void __tlb_flush_idte(unsigned long asce) static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
{ {
asm volatile( if (MACHINE_HAS_TLB_LC)
" .insn rrf,0xb98e0000,0,%0,%1,0" __tlb_flush_idte_local(asce);
: : "a" (2048), "a" (asce) : "cc" ); else
__tlb_flush_local();
} }
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_TLB_LC)
__tlb_flush_idte_local((unsigned long) init_mm.pgd |
init_mm.context.asce_bits);
else
__tlb_flush_local();
}
#endif
static inline void __tlb_flush_mm(struct mm_struct * mm) static inline void __tlb_flush_mm(struct mm_struct * mm)
{ {
/* /*
...@@ -80,7 +155,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) ...@@ -80,7 +155,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* only ran on the local cpu. * only ran on the local cpu.
*/ */
if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
__tlb_flush_idte((unsigned long) mm->pgd | __tlb_flush_asce(mm, (unsigned long) mm->pgd |
mm->context.asce_bits); mm->context.asce_bits);
else else
__tlb_flush_full(mm); __tlb_flush_full(mm);
...@@ -130,7 +205,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, ...@@ -130,7 +205,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
static inline void flush_tlb_kernel_range(unsigned long start, static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end) unsigned long end)
{ {
__tlb_flush_mm(&init_mm); __tlb_flush_kernel();
} }
#endif /* _S390_TLBFLUSH_H */ #endif /* _S390_TLBFLUSH_H */
...@@ -386,6 +386,8 @@ static __init void detect_machine_facilities(void) ...@@ -386,6 +386,8 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_TE; S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
if (test_facility(66)) if (test_facility(66))
S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM; S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM;
if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
#endif #endif
} }
......
...@@ -236,6 +236,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) ...@@ -236,6 +236,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
{ {
struct _lowcore *lc = pcpu->lowcore; struct _lowcore *lc = pcpu->lowcore;
if (MACHINE_HAS_TLB_LC)
cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
atomic_inc(&init_mm.context.attach_count); atomic_inc(&init_mm.context.attach_count);
lc->cpu_nr = cpu; lc->cpu_nr = cpu;
lc->percpu_offset = __per_cpu_offset[cpu]; lc->percpu_offset = __per_cpu_offset[cpu];
...@@ -760,6 +763,9 @@ void __cpu_die(unsigned int cpu) ...@@ -760,6 +763,9 @@ void __cpu_die(unsigned int cpu)
cpu_relax(); cpu_relax();
pcpu_free_lowcore(pcpu); pcpu_free_lowcore(pcpu);
atomic_dec(&init_mm.context.attach_count); atomic_dec(&init_mm.context.attach_count);
cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
if (MACHINE_HAS_TLB_LC)
cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
} }
void __noreturn cpu_die(void) void __noreturn cpu_die(void)
......
...@@ -123,10 +123,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, ...@@ -123,10 +123,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
pmd_t *pmdp = (pmd_t *) ptep; pmd_t *pmdp = (pmd_t *) ptep;
pte_t pte = huge_ptep_get(ptep); pte_t pte = huge_ptep_get(ptep);
if (MACHINE_HAS_IDTE) pmdp_flush_direct(mm, addr, pmdp);
__pmd_idte(addr, pmdp);
else
__pmd_csp(pmdp);
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
return pte; return pte;
} }
......
...@@ -124,8 +124,6 @@ void __init paging_init(void) ...@@ -124,8 +124,6 @@ void __init paging_init(void)
__ctl_load(S390_lowcore.kernel_asce, 13, 13); __ctl_load(S390_lowcore.kernel_asce, 13, 13);
arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
atomic_set(&init_mm.context.attach_count, 1);
sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init(); sparse_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
...@@ -136,6 +134,11 @@ void __init paging_init(void) ...@@ -136,6 +134,11 @@ void __init paging_init(void)
void __init mem_init(void) void __init mem_init(void)
{ {
if (MACHINE_HAS_TLB_LC)
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(0, mm_cpumask(&init_mm));
atomic_set(&init_mm.context.attach_count, 1);
max_mapnr = max_low_pfn; max_mapnr = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
......
...@@ -200,7 +200,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) ...@@ -200,7 +200,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
static void gmap_flush_tlb(struct gmap *gmap) static void gmap_flush_tlb(struct gmap *gmap)
{ {
if (MACHINE_HAS_IDTE) if (MACHINE_HAS_IDTE)
__tlb_flush_idte((unsigned long) gmap->table | __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
_ASCE_TYPE_REGION1); _ASCE_TYPE_REGION1);
else else
__tlb_flush_global(); __tlb_flush_global();
...@@ -219,7 +219,7 @@ void gmap_free(struct gmap *gmap) ...@@ -219,7 +219,7 @@ void gmap_free(struct gmap *gmap)
/* Flush tlb. */ /* Flush tlb. */
if (MACHINE_HAS_IDTE) if (MACHINE_HAS_IDTE)
__tlb_flush_idte((unsigned long) gmap->table | __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
_ASCE_TYPE_REGION1); _ASCE_TYPE_REGION1);
else else
__tlb_flush_global(); __tlb_flush_global();
......
...@@ -138,7 +138,6 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) ...@@ -138,7 +138,6 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
} }
ret = 0; ret = 0;
out: out:
flush_tlb_kernel_range(start, end);
return ret; return ret;
} }
...@@ -265,7 +264,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) ...@@ -265,7 +264,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
memset((void *)start, 0, end - start); memset((void *)start, 0, end - start);
ret = 0; ret = 0;
out: out:
flush_tlb_kernel_range(start, end);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment