Commit 44b6cc81 authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/mm,kvm: flush gmap address space with IDTE

The __tlb_flush_mm() helper uses a global flush if the mm struct
has a gmap structure attached to it. Replace the global flush with
two individual flushes by means of the IDTE instruction if only a
single gmap is attached the the mm.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent d5dcafee
...@@ -12,6 +12,7 @@ typedef struct { ...@@ -12,6 +12,7 @@ typedef struct {
struct list_head pgtable_list; struct list_head pgtable_list;
spinlock_t gmap_lock; spinlock_t gmap_lock;
struct list_head gmap_list; struct list_head gmap_list;
unsigned long gmap_asce;
unsigned long asce; unsigned long asce;
unsigned long asce_limit; unsigned long asce_limit;
unsigned long vdso_base; unsigned long vdso_base;
......
...@@ -21,6 +21,7 @@ static inline int init_new_context(struct task_struct *tsk, ...@@ -21,6 +21,7 @@ static inline int init_new_context(struct task_struct *tsk,
INIT_LIST_HEAD(&mm->context.gmap_list); INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask); cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.flush_count, 0); atomic_set(&mm->context.flush_count, 0);
mm->context.gmap_asce = 0;
mm->context.flush_mm = 0; mm->context.flush_mm = 0;
#ifdef CONFIG_PGSTE #ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste; mm->context.alloc_pgste = page_table_allocate_pgste;
......
...@@ -60,18 +60,25 @@ static inline void __tlb_flush_full(struct mm_struct *mm) ...@@ -60,18 +60,25 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
preempt_enable(); preempt_enable();
} }
/* static inline void __tlb_flush_mm(struct mm_struct *mm)
* Flush TLB entries for a specific ASCE on all CPUs. Should never be used
* when more than one asce (e.g. gmap) ran on this mm.
*/
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
{ {
unsigned long gmap_asce;
/*
* If the machine has IDTE we prefer to do a per mm flush
* on all cpus instead of doing a local flush if the mm
* only ran on the local cpu.
*/
preempt_disable(); preempt_disable();
atomic_inc(&mm->context.flush_count); atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_IDTE) gmap_asce = READ_ONCE(mm->context.gmap_asce);
__tlb_flush_idte(asce); if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
else if (gmap_asce)
__tlb_flush_global(); __tlb_flush_idte(gmap_asce);
__tlb_flush_idte(mm->context.asce);
} else {
__tlb_flush_full(mm);
}
/* Reset TLB flush mask */ /* Reset TLB flush mask */
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
atomic_dec(&mm->context.flush_count); atomic_dec(&mm->context.flush_count);
...@@ -92,7 +99,7 @@ static inline void __tlb_flush_kernel(void) ...@@ -92,7 +99,7 @@ static inline void __tlb_flush_kernel(void)
/* /*
* Flush TLB entries for a specific ASCE on all CPUs. * Flush TLB entries for a specific ASCE on all CPUs.
*/ */
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) static inline void __tlb_flush_mm(struct mm_struct *mm)
{ {
__tlb_flush_local(); __tlb_flush_local();
} }
...@@ -103,19 +110,6 @@ static inline void __tlb_flush_kernel(void) ...@@ -103,19 +110,6 @@ static inline void __tlb_flush_kernel(void)
} }
#endif #endif
static inline void __tlb_flush_mm(struct mm_struct * mm)
{
/*
* If the machine has IDTE we prefer to do a per mm flush
* on all cpus instead of doing a local flush if the mm
* only ran on the local cpu.
*/
if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
__tlb_flush_asce(mm, mm->context.asce);
else
__tlb_flush_full(mm);
}
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{ {
if (mm->context.flush_mm) { if (mm->context.flush_mm) {
......
...@@ -94,6 +94,7 @@ static struct gmap *gmap_alloc(unsigned long limit) ...@@ -94,6 +94,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit) struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
{ {
struct gmap *gmap; struct gmap *gmap;
unsigned long gmap_asce;
gmap = gmap_alloc(limit); gmap = gmap_alloc(limit);
if (!gmap) if (!gmap)
...@@ -101,6 +102,11 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit) ...@@ -101,6 +102,11 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
gmap->mm = mm; gmap->mm = mm;
spin_lock(&mm->context.gmap_lock); spin_lock(&mm->context.gmap_lock);
list_add_rcu(&gmap->list, &mm->context.gmap_list); list_add_rcu(&gmap->list, &mm->context.gmap_list);
if (list_is_singular(&mm->context.gmap_list))
gmap_asce = gmap->asce;
else
gmap_asce = -1UL;
WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
spin_unlock(&mm->context.gmap_lock); spin_unlock(&mm->context.gmap_lock);
return gmap; return gmap;
} }
...@@ -230,6 +236,7 @@ EXPORT_SYMBOL_GPL(gmap_put); ...@@ -230,6 +236,7 @@ EXPORT_SYMBOL_GPL(gmap_put);
void gmap_remove(struct gmap *gmap) void gmap_remove(struct gmap *gmap)
{ {
struct gmap *sg, *next; struct gmap *sg, *next;
unsigned long gmap_asce;
/* Remove all shadow gmaps linked to this gmap */ /* Remove all shadow gmaps linked to this gmap */
if (!list_empty(&gmap->children)) { if (!list_empty(&gmap->children)) {
...@@ -243,6 +250,14 @@ void gmap_remove(struct gmap *gmap) ...@@ -243,6 +250,14 @@ void gmap_remove(struct gmap *gmap)
/* Remove gmap from the pre-mm list */ /* Remove gmap from the pre-mm list */
spin_lock(&gmap->mm->context.gmap_lock); spin_lock(&gmap->mm->context.gmap_lock);
list_del_rcu(&gmap->list); list_del_rcu(&gmap->list);
if (list_empty(&gmap->mm->context.gmap_list))
gmap_asce = 0;
else if (list_is_singular(&gmap->mm->context.gmap_list))
gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
struct gmap, list)->asce;
else
gmap_asce = -1UL;
WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
spin_unlock(&gmap->mm->context.gmap_lock); spin_unlock(&gmap->mm->context.gmap_lock);
synchronize_rcu(); synchronize_rcu();
/* Put reference */ /* Put reference */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment