Commit fa4531f7 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm: Don't send IPI to all cpus on THP updates

Now that we made sure that lockless walk of linux page table is mostly
limitted to current task(current->mm->pgdir) we can update the THP
update sequence to only send IPI to CPUs on which this task has run.
This helps in reducing the IPI overload on systems with large number
of CPUs.

WRT kvm even though kvm is walking page table with vpc->arch.pgdir,
it is done only on secondary CPUs and in that case we have primary CPU
added to task's mm cpumask. Sending an IPI to primary will force the
secondary to do a vm exit and hence this mm cpumask usage is safe
here.

WRT CAPI, we still end up walking linux page table with capi context
MM. For now the pte lookup serialization sends an IPI to all CPUs in
CPI is in use. We can further improve this by adding the CAPI
interrupt handling CPU to task mm cpumask. That will be done in a
later patch.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 8434f089
...@@ -1165,6 +1165,7 @@ static inline bool arch_needs_pgtable_deposit(void) ...@@ -1165,6 +1165,7 @@ static inline bool arch_needs_pgtable_deposit(void)
return false; return false;
return true; return true;
} }
extern void serialize_against_pte_lookup(struct mm_struct *mm);
static inline pmd_t pmd_mkdevmap(pmd_t pmd) static inline pmd_t pmd_mkdevmap(pmd_t pmd)
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <misc/cxl-base.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlb.h> #include <asm/tlb.h>
...@@ -64,6 +65,35 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, ...@@ -64,6 +65,35 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
trace_hugepage_set_pmd(addr, pmd_val(pmd)); trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
} }
static void do_nothing(void *unused)
{
}
/*
* Serialize against find_current_mm_pte which does lock-less
* lookup in page tables with local interrupts disabled. For huge pages
* it casts pmd_t to pte_t. Since format of pte_t is different from
* pmd_t we want to prevent transit from pmd pointing to page table
* to pmd pointing to huge page (and back) while interrupts are disabled.
* We clear pmd to possibly replace it with page table pointer in
* different code paths. So make sure we wait for the parallel
* find_current_mm_pte to finish.
*/
void serialize_against_pte_lookup(struct mm_struct *mm)
{
smp_mb();
/*
* Cxl fault handling requires us to do a lockless page table
* walk while inserting hash page table entry with mm tracked
* in cxl context. Hence we need to do a global flush.
*/
if (cxl_ctx_in_use())
smp_call_function(do_nothing, NULL, 1);
else
smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
}
/* /*
* We use this to invalidate a pmdp entry before switching from a * We use this to invalidate a pmdp entry before switching from a
* hugepte to regular pmd entry. * hugepte to regular pmd entry.
...@@ -77,7 +107,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ...@@ -77,7 +107,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
* This ensures that generic code that rely on IRQ disabling * This ensures that generic code that rely on IRQ disabling
* to prevent a parallel THP split work as expected. * to prevent a parallel THP split work as expected.
*/ */
kick_all_cpus_sync(); serialize_against_pte_lookup(vma->vm_mm);
} }
static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
......
...@@ -239,7 +239,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres ...@@ -239,7 +239,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
* by sending an IPI to all the cpus and executing a dummy * by sending an IPI to all the cpus and executing a dummy
* function there. * function there.
*/ */
kick_all_cpus_sync(); serialize_against_pte_lookup(vma->vm_mm);
/* /*
* Now invalidate the hpte entries in the range * Now invalidate the hpte entries in the range
* covered by pmd. This make sure we take a * covered by pmd. This make sure we take a
...@@ -380,16 +380,16 @@ pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, ...@@ -380,16 +380,16 @@ pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
*/ */
memset(pgtable, 0, PTE_FRAG_SIZE); memset(pgtable, 0, PTE_FRAG_SIZE);
/* /*
* Serialize against find_linux_pte_or_hugepte which does lock-less * Serialize against find_current_mm_pte variants which does lock-less
* lookup in page tables with local interrupts disabled. For huge pages * lookup in page tables with local interrupts disabled. For huge pages
* it casts pmd_t to pte_t. Since format of pte_t is different from * it casts pmd_t to pte_t. Since format of pte_t is different from
* pmd_t we want to prevent transit from pmd pointing to page table * pmd_t we want to prevent transit from pmd pointing to page table
* to pmd pointing to huge page (and back) while interrupts are disabled. * to pmd pointing to huge page (and back) while interrupts are disabled.
* We clear pmd to possibly replace it with page table pointer in * We clear pmd to possibly replace it with page table pointer in
* different code paths. So make sure we wait for the parallel * different code paths. So make sure we wait for the parallel
* find_linux_pte_or_hugepage to finish. * find_curren_mm_pte to finish.
*/ */
kick_all_cpus_sync(); serialize_against_pte_lookup(mm);
return old_pmd; return old_pmd;
} }
......
...@@ -811,7 +811,7 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre ...@@ -811,7 +811,7 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
pmd_clear(pmdp); pmd_clear(pmdp);
/*FIXME!! Verify whether we need this kick below */ /*FIXME!! Verify whether we need this kick below */
kick_all_cpus_sync(); serialize_against_pte_lookup(vma->vm_mm);
radix__flush_tlb_collapsed_pmd(vma->vm_mm, address); radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
...@@ -873,16 +873,16 @@ pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, ...@@ -873,16 +873,16 @@ pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
old_pmd = __pmd(old); old_pmd = __pmd(old);
/* /*
* Serialize against find_linux_pte_or_hugepte which does lock-less * Serialize against find_current_mm_pte which does lock-less
* lookup in page tables with local interrupts disabled. For huge pages * lookup in page tables with local interrupts disabled. For huge pages
* it casts pmd_t to pte_t. Since format of pte_t is different from * it casts pmd_t to pte_t. Since format of pte_t is different from
* pmd_t we want to prevent transit from pmd pointing to page table * pmd_t we want to prevent transit from pmd pointing to page table
* to pmd pointing to huge page (and back) while interrupts are disabled. * to pmd pointing to huge page (and back) while interrupts are disabled.
* We clear pmd to possibly replace it with page table pointer in * We clear pmd to possibly replace it with page table pointer in
* different code paths. So make sure we wait for the parallel * different code paths. So make sure we wait for the parallel
* find_linux_pte_or_hugepage to finish. * find_current_mm_pte to finish.
*/ */
kick_all_cpus_sync(); serialize_against_pte_lookup(mm);
return old_pmd; return old_pmd;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment