Commit 2cb7c9cb authored by David Hildenbrand's avatar David Hildenbrand Committed by Ingo Molnar

sched/preempt, mm/kmap: Explicitly disable/enable preemption in kmap_atomic_*

The existing code relies on pagefault_disable() implicitly disabling
preemption, so that no schedule will happen between kmap_atomic() and
kunmap_atomic().

Let's make this explicit, to prepare for pagefault_disable() not
touching preemption anymore.
Reviewed-and-tested-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: David.Laight@ACULAB.COM
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: airlied@linux.ie
Cc: akpm@linux-foundation.org
Cc: benh@kernel.crashing.org
Cc: bigeasy@linutronix.de
Cc: borntraeger@de.ibm.com
Cc: daniel.vetter@intel.com
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: hocko@suse.cz
Cc: hughd@google.com
Cc: mst@redhat.com
Cc: paulus@samba.org
Cc: ralf@linux-mips.org
Cc: schwidefsky@de.ibm.com
Cc: yang.shi@windriver.com
Link: http://lkml.kernel.org/r/1431359540-32227-5-git-send-email-dahi@linux.vnet.ibm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b3c395ef
...@@ -59,6 +59,7 @@ void *kmap_atomic(struct page *page) ...@@ -59,6 +59,7 @@ void *kmap_atomic(struct page *page)
void *kmap; void *kmap;
int type; int type;
preempt_disable();
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
...@@ -121,6 +122,7 @@ void __kunmap_atomic(void *kvaddr) ...@@ -121,6 +122,7 @@ void __kunmap_atomic(void *kvaddr)
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
} }
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
EXPORT_SYMBOL(__kunmap_atomic); EXPORT_SYMBOL(__kunmap_atomic);
...@@ -130,6 +132,7 @@ void *kmap_atomic_pfn(unsigned long pfn) ...@@ -130,6 +132,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
int idx, type; int idx, type;
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
preempt_disable();
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
......
...@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page) ...@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
unsigned long paddr; unsigned long paddr;
int type; int type;
preempt_disable();
pagefault_disable(); pagefault_disable();
type = kmap_atomic_idx_push(); type = kmap_atomic_idx_push();
paddr = page_to_phys(page); paddr = page_to_phys(page);
...@@ -85,5 +86,6 @@ void __kunmap_atomic(void *kvaddr) ...@@ -85,5 +86,6 @@ void __kunmap_atomic(void *kvaddr)
} }
kmap_atomic_idx_pop(); kmap_atomic_idx_pop();
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
EXPORT_SYMBOL(__kunmap_atomic); EXPORT_SYMBOL(__kunmap_atomic);
...@@ -43,7 +43,7 @@ void *kmap_atomic(struct page *page) ...@@ -43,7 +43,7 @@ void *kmap_atomic(struct page *page)
unsigned long vaddr; unsigned long vaddr;
int type; int type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ preempt_disable();
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
...@@ -82,6 +82,7 @@ void __kunmap_atomic(void *kvaddr) ...@@ -82,6 +82,7 @@ void __kunmap_atomic(void *kvaddr)
} }
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
EXPORT_SYMBOL(__kunmap_atomic); EXPORT_SYMBOL(__kunmap_atomic);
...@@ -95,6 +96,7 @@ void *kmap_atomic_pfn(unsigned long pfn) ...@@ -95,6 +96,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
unsigned long vaddr; unsigned long vaddr;
int type; int type;
preempt_disable();
pagefault_disable(); pagefault_disable();
type = kmap_atomic_idx_push(); type = kmap_atomic_idx_push();
......
...@@ -37,7 +37,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) ...@@ -37,7 +37,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
unsigned long vaddr; unsigned long vaddr;
int idx, type; int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ preempt_disable();
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
...@@ -63,6 +63,7 @@ void __kunmap_atomic(void *kvaddr) ...@@ -63,6 +63,7 @@ void __kunmap_atomic(void *kvaddr)
if (vaddr < __fix_to_virt(FIX_KMAP_END)) { if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable(); pagefault_enable();
preempt_enable();
return; return;
} }
...@@ -84,5 +85,6 @@ void __kunmap_atomic(void *kvaddr) ...@@ -84,5 +85,6 @@ void __kunmap_atomic(void *kvaddr)
#endif #endif
kmap_atomic_idx_pop(); kmap_atomic_idx_pop();
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
EXPORT_SYMBOL(__kunmap_atomic); EXPORT_SYMBOL(__kunmap_atomic);
...@@ -47,7 +47,7 @@ void *kmap_atomic(struct page *page) ...@@ -47,7 +47,7 @@ void *kmap_atomic(struct page *page)
unsigned long vaddr; unsigned long vaddr;
int idx, type; int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ preempt_disable();
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
...@@ -72,6 +72,7 @@ void __kunmap_atomic(void *kvaddr) ...@@ -72,6 +72,7 @@ void __kunmap_atomic(void *kvaddr)
if (vaddr < FIXADDR_START) { // FIXME if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable(); pagefault_enable();
preempt_enable();
return; return;
} }
...@@ -92,6 +93,7 @@ void __kunmap_atomic(void *kvaddr) ...@@ -92,6 +93,7 @@ void __kunmap_atomic(void *kvaddr)
#endif #endif
kmap_atomic_idx_pop(); kmap_atomic_idx_pop();
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
EXPORT_SYMBOL(__kunmap_atomic); EXPORT_SYMBOL(__kunmap_atomic);
...@@ -104,6 +106,7 @@ void *kmap_atomic_pfn(unsigned long pfn) ...@@ -104,6 +106,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
unsigned long vaddr; unsigned long vaddr;
int idx, type; int idx, type;
preempt_disable();
pagefault_disable(); pagefault_disable();
type = kmap_atomic_idx_push(); type = kmap_atomic_idx_push();
......
...@@ -75,6 +75,7 @@ static inline void *kmap_atomic(struct page *page) ...@@ -75,6 +75,7 @@ static inline void *kmap_atomic(struct page *page)
unsigned long vaddr; unsigned long vaddr;
int idx, type; int idx, type;
preempt_disable();
pagefault_disable(); pagefault_disable();
if (page < highmem_start_page) if (page < highmem_start_page)
return page_address(page); return page_address(page);
...@@ -98,6 +99,7 @@ static inline void __kunmap_atomic(unsigned long vaddr) ...@@ -98,6 +99,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
if (vaddr < FIXADDR_START) { /* FIXME */ if (vaddr < FIXADDR_START) { /* FIXME */
pagefault_enable(); pagefault_enable();
preempt_enable();
return; return;
} }
...@@ -122,6 +124,7 @@ static inline void __kunmap_atomic(unsigned long vaddr) ...@@ -122,6 +124,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
kmap_atomic_idx_pop(); kmap_atomic_idx_pop();
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -142,6 +142,7 @@ static inline void kunmap(struct page *page) ...@@ -142,6 +142,7 @@ static inline void kunmap(struct page *page)
static inline void *kmap_atomic(struct page *page) static inline void *kmap_atomic(struct page *page)
{ {
preempt_disable();
pagefault_disable(); pagefault_disable();
return page_address(page); return page_address(page);
} }
...@@ -150,6 +151,7 @@ static inline void __kunmap_atomic(void *addr) ...@@ -150,6 +151,7 @@ static inline void __kunmap_atomic(void *addr)
{ {
flush_kernel_dcache_page_addr(addr); flush_kernel_dcache_page_addr(addr);
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
#define kmap_atomic_prot(page, prot) kmap_atomic(page) #define kmap_atomic_prot(page, prot) kmap_atomic(page)
......
...@@ -34,7 +34,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) ...@@ -34,7 +34,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
unsigned long vaddr; unsigned long vaddr;
int idx, type; int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ preempt_disable();
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
...@@ -59,6 +59,7 @@ void __kunmap_atomic(void *kvaddr) ...@@ -59,6 +59,7 @@ void __kunmap_atomic(void *kvaddr)
if (vaddr < __fix_to_virt(FIX_KMAP_END)) { if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable(); pagefault_enable();
preempt_enable();
return; return;
} }
...@@ -82,5 +83,6 @@ void __kunmap_atomic(void *kvaddr) ...@@ -82,5 +83,6 @@ void __kunmap_atomic(void *kvaddr)
kmap_atomic_idx_pop(); kmap_atomic_idx_pop();
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
EXPORT_SYMBOL(__kunmap_atomic); EXPORT_SYMBOL(__kunmap_atomic);
...@@ -53,7 +53,7 @@ void *kmap_atomic(struct page *page) ...@@ -53,7 +53,7 @@ void *kmap_atomic(struct page *page)
unsigned long vaddr; unsigned long vaddr;
long idx, type; long idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ preempt_disable();
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
...@@ -91,6 +91,7 @@ void __kunmap_atomic(void *kvaddr) ...@@ -91,6 +91,7 @@ void __kunmap_atomic(void *kvaddr)
if (vaddr < FIXADDR_START) { // FIXME if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable(); pagefault_enable();
preempt_enable();
return; return;
} }
...@@ -126,5 +127,6 @@ void __kunmap_atomic(void *kvaddr) ...@@ -126,5 +127,6 @@ void __kunmap_atomic(void *kvaddr)
kmap_atomic_idx_pop(); kmap_atomic_idx_pop();
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
EXPORT_SYMBOL(__kunmap_atomic); EXPORT_SYMBOL(__kunmap_atomic);
...@@ -201,7 +201,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) ...@@ -201,7 +201,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
int idx, type; int idx, type;
pte_t *pte; pte_t *pte;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ preempt_disable();
pagefault_disable(); pagefault_disable();
/* Avoid icache flushes by disallowing atomic executable mappings. */ /* Avoid icache flushes by disallowing atomic executable mappings. */
...@@ -259,6 +259,7 @@ void __kunmap_atomic(void *kvaddr) ...@@ -259,6 +259,7 @@ void __kunmap_atomic(void *kvaddr)
} }
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
EXPORT_SYMBOL(__kunmap_atomic); EXPORT_SYMBOL(__kunmap_atomic);
......
...@@ -35,7 +35,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) ...@@ -35,7 +35,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
unsigned long vaddr; unsigned long vaddr;
int idx, type; int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ preempt_disable();
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
...@@ -100,6 +100,7 @@ void __kunmap_atomic(void *kvaddr) ...@@ -100,6 +100,7 @@ void __kunmap_atomic(void *kvaddr)
#endif #endif
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
EXPORT_SYMBOL(__kunmap_atomic); EXPORT_SYMBOL(__kunmap_atomic);
......
...@@ -59,6 +59,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) ...@@ -59,6 +59,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
unsigned long vaddr; unsigned long vaddr;
int idx, type; int idx, type;
preempt_disable();
pagefault_disable(); pagefault_disable();
type = kmap_atomic_idx_push(); type = kmap_atomic_idx_push();
...@@ -117,5 +118,6 @@ iounmap_atomic(void __iomem *kvaddr) ...@@ -117,5 +118,6 @@ iounmap_atomic(void __iomem *kvaddr)
} }
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
EXPORT_SYMBOL_GPL(iounmap_atomic); EXPORT_SYMBOL_GPL(iounmap_atomic);
...@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page) ...@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
enum fixed_addresses idx; enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
preempt_disable();
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
...@@ -79,6 +80,7 @@ void __kunmap_atomic(void *kvaddr) ...@@ -79,6 +80,7 @@ void __kunmap_atomic(void *kvaddr)
} }
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
EXPORT_SYMBOL(__kunmap_atomic); EXPORT_SYMBOL(__kunmap_atomic);
......
...@@ -65,6 +65,7 @@ static inline void kunmap(struct page *page) ...@@ -65,6 +65,7 @@ static inline void kunmap(struct page *page)
static inline void *kmap_atomic(struct page *page) static inline void *kmap_atomic(struct page *page)
{ {
preempt_disable();
pagefault_disable(); pagefault_disable();
return page_address(page); return page_address(page);
} }
...@@ -73,6 +74,7 @@ static inline void *kmap_atomic(struct page *page) ...@@ -73,6 +74,7 @@ static inline void *kmap_atomic(struct page *page)
static inline void __kunmap_atomic(void *addr) static inline void __kunmap_atomic(void *addr)
{ {
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
......
...@@ -141,6 +141,7 @@ static inline void __iomem * ...@@ -141,6 +141,7 @@ static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping, io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset) unsigned long offset)
{ {
preempt_disable();
pagefault_disable(); pagefault_disable();
return ((char __force __iomem *) mapping) + offset; return ((char __force __iomem *) mapping) + offset;
} }
...@@ -149,6 +150,7 @@ static inline void ...@@ -149,6 +150,7 @@ static inline void
io_mapping_unmap_atomic(void __iomem *vaddr) io_mapping_unmap_atomic(void __iomem *vaddr)
{ {
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
/* Non-atomic map/unmap */ /* Non-atomic map/unmap */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment