Commit b2f58941 authored by Hugh Dickins's avatar Hugh Dickins Committed by Andrew Morton

s390: gmap use pte_unmap_unlock() not spin_unlock()

pte_alloc_map_lock() expects to be followed by pte_unmap_unlock(): to
keep balance in future, pass ptep as well as ptl to gmap_pte_op_end(),
and use pte_unmap_unlock() instead of direct spin_unlock() (even though
ptep ends up unused inside the macro).

Link: https://lkml.kernel.org/r/78873af-e1ec-4f9-47ac-483940ac6daa@google.comSigned-off-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: John David Anglin <dave.anglin@bell.net>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 5c7f3bf0
...@@ -895,12 +895,12 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr, ...@@ -895,12 +895,12 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
/** /**
* gmap_pte_op_end - release the page table lock * gmap_pte_op_end - release the page table lock
* @ptl: pointer to the spinlock pointer * @ptep: pointer to the locked pte
* @ptl: pointer to the page table spinlock
*/ */
static void gmap_pte_op_end(spinlock_t *ptl) static void gmap_pte_op_end(pte_t *ptep, spinlock_t *ptl)
{ {
if (ptl) pte_unmap_unlock(ptep, ptl);
spin_unlock(ptl);
} }
/** /**
...@@ -1011,7 +1011,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr, ...@@ -1011,7 +1011,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
{ {
int rc; int rc;
pte_t *ptep; pte_t *ptep;
spinlock_t *ptl = NULL; spinlock_t *ptl;
unsigned long pbits = 0; unsigned long pbits = 0;
if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
...@@ -1025,7 +1025,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr, ...@@ -1025,7 +1025,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0; pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
/* Protect and unlock. */ /* Protect and unlock. */
rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits); rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
gmap_pte_op_end(ptl); gmap_pte_op_end(ptep, ptl);
return rc; return rc;
} }
...@@ -1154,7 +1154,7 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val) ...@@ -1154,7 +1154,7 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
/* Do *NOT* clear the _PAGE_INVALID bit! */ /* Do *NOT* clear the _PAGE_INVALID bit! */
rc = 0; rc = 0;
} }
gmap_pte_op_end(ptl); gmap_pte_op_end(ptep, ptl);
} }
if (!rc) if (!rc)
break; break;
...@@ -1248,7 +1248,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr, ...@@ -1248,7 +1248,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
if (!rc) if (!rc)
gmap_insert_rmap(sg, vmaddr, rmap); gmap_insert_rmap(sg, vmaddr, rmap);
spin_unlock(&sg->guest_table_lock); spin_unlock(&sg->guest_table_lock);
gmap_pte_op_end(ptl); gmap_pte_op_end(ptep, ptl);
} }
radix_tree_preload_end(); radix_tree_preload_end();
if (rc) { if (rc) {
...@@ -2156,7 +2156,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte) ...@@ -2156,7 +2156,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
tptep = (pte_t *) gmap_table_walk(sg, saddr, 0); tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
if (!tptep) { if (!tptep) {
spin_unlock(&sg->guest_table_lock); spin_unlock(&sg->guest_table_lock);
gmap_pte_op_end(ptl); gmap_pte_op_end(sptep, ptl);
radix_tree_preload_end(); radix_tree_preload_end();
break; break;
} }
...@@ -2167,7 +2167,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte) ...@@ -2167,7 +2167,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
rmap = NULL; rmap = NULL;
rc = 0; rc = 0;
} }
gmap_pte_op_end(ptl); gmap_pte_op_end(sptep, ptl);
spin_unlock(&sg->guest_table_lock); spin_unlock(&sg->guest_table_lock);
} }
radix_tree_preload_end(); radix_tree_preload_end();
...@@ -2495,7 +2495,7 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4], ...@@ -2495,7 +2495,7 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
continue; continue;
if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep)) if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
set_bit(i, bitmap); set_bit(i, bitmap);
spin_unlock(ptl); pte_unmap_unlock(ptep, ptl);
} }
} }
gmap_pmd_op_end(gmap, pmdp); gmap_pmd_op_end(gmap, pmdp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment