Commit f3ba3c71 authored by Thomas Gleixner's avatar Thomas Gleixner

mm/highmem: Provide kmap_local*

Now that the kmap atomic index is stored in task struct provide a
preemptible variant. On context switch the maps of an outgoing task are
removed and the map of the incoming task are restored. That's obviously
slow, but highmem is slow anyway.

The kmap_local.*() functions can be invoked from both preemptible and
atomic context. kmap local sections disable migration to keep the resulting
virtual mapping address correct, but disable neither pagefaults nor
preemption.

A wholesale conversion of kmap_atomic to be fully preemptible is not
possible because some of the usage sites might rely on the preemption
disable for serialization or on the implicit pagefault disable. Needs to be
done on a case by case basis.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20201118204007.468533059@linutronix.de
parent 5fbda3ec
...@@ -68,6 +68,26 @@ static inline void kmap_flush_unused(void) ...@@ -68,6 +68,26 @@ static inline void kmap_flush_unused(void)
__kmap_flush_unused(); __kmap_flush_unused();
} }
static inline void *kmap_local_page(struct page *page)
{
return __kmap_local_page_prot(page, kmap_prot);
}
static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
{
return __kmap_local_page_prot(page, prot);
}
static inline void *kmap_local_pfn(unsigned long pfn)
{
return __kmap_local_pfn_prot(pfn, kmap_prot);
}
static inline void __kunmap_local(void *vaddr)
{
kunmap_local_indexed(vaddr);
}
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{ {
preempt_disable(); preempt_disable();
...@@ -140,6 +160,28 @@ static inline void kunmap(struct page *page) ...@@ -140,6 +160,28 @@ static inline void kunmap(struct page *page)
#endif #endif
} }
static inline void *kmap_local_page(struct page *page)
{
return page_address(page);
}
static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
{
return kmap_local_page(page);
}
static inline void *kmap_local_pfn(unsigned long pfn)
{
return kmap_local_page(pfn_to_page(pfn));
}
static inline void __kunmap_local(void *addr)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(addr);
#endif
}
static inline void *kmap_atomic(struct page *page) static inline void *kmap_atomic(struct page *page)
{ {
preempt_disable(); preempt_disable();
...@@ -181,4 +223,10 @@ do { \ ...@@ -181,4 +223,10 @@ do { \
__kunmap_atomic(__addr); \ __kunmap_atomic(__addr); \
} while (0) } while (0)
#define kunmap_local(__addr) \
do { \
BUILD_BUG_ON(__same_type((__addr), struct page *)); \
__kunmap_local(__addr); \
} while (0)
#endif #endif
...@@ -60,24 +60,22 @@ static inline struct page *kmap_to_page(void *addr); ...@@ -60,24 +60,22 @@ static inline struct page *kmap_to_page(void *addr);
static inline void kmap_flush_unused(void); static inline void kmap_flush_unused(void);
/** /**
* kmap_atomic - Atomically map a page for temporary usage * kmap_local_page - Map a page for temporary usage
* @page: Pointer to the page to be mapped * @page: Pointer to the page to be mapped
* *
* Returns: The virtual address of the mapping * Returns: The virtual address of the mapping
* *
* Side effect: On return pagefaults and preemption are disabled.
*
* Can be invoked from any context. * Can be invoked from any context.
* *
* Requires careful handling when nesting multiple mappings because the map * Requires careful handling when nesting multiple mappings because the map
* management is stack based. The unmap has to be in the reverse order of * management is stack based. The unmap has to be in the reverse order of
* the map operation: * the map operation:
* *
* addr1 = kmap_atomic(page1); * addr1 = kmap_local_page(page1);
* addr2 = kmap_atomic(page2); * addr2 = kmap_local_page(page2);
* ... * ...
* kunmap_atomic(addr2); * kunmap_local(addr2);
* kunmap_atomic(addr1); * kunmap_local(addr1);
* *
* Unmapping addr1 before addr2 is invalid and causes malfunction. * Unmapping addr1 before addr2 is invalid and causes malfunction.
* *
...@@ -88,10 +86,26 @@ static inline void kmap_flush_unused(void); ...@@ -88,10 +86,26 @@ static inline void kmap_flush_unused(void);
* virtual address of the direct mapping. Only real highmem pages are * virtual address of the direct mapping. Only real highmem pages are
* temporarily mapped. * temporarily mapped.
* *
* While it is significantly faster than kmap() it comes with restrictions * While it is significantly faster than kmap() for the higmem case it
* about the pointer validity and the side effects of disabling page faults * comes with restrictions about the pointer validity. Only use when really
* and preemption. Use it only when absolutely necessary, e.g. from non * necessary.
* preemptible contexts. *
* On HIGHMEM enabled systems mapping a highmem page has the side effect of
* disabling migration in order to keep the virtual address stable across
* preemption. No caller of kmap_local_page() can rely on this side effect.
*/
static inline void *kmap_local_page(struct page *page);
/**
* kmap_atomic - Atomically map a page for temporary usage - Deprecated!
* @page: Pointer to the page to be mapped
*
* Returns: The virtual address of the mapping
*
* Effectively a wrapper around kmap_local_page() which disables pagefaults
* and preemption.
*
* Do not use in new code. Use kmap_local_page() instead.
*/ */
static inline void *kmap_atomic(struct page *page); static inline void *kmap_atomic(struct page *page);
...@@ -101,12 +115,9 @@ static inline void *kmap_atomic(struct page *page); ...@@ -101,12 +115,9 @@ static inline void *kmap_atomic(struct page *page);
* *
* Counterpart to kmap_atomic(). * Counterpart to kmap_atomic().
* *
* Undoes the side effects of kmap_atomic(), i.e. reenabling pagefaults and * Effectively a wrapper around kunmap_local() which additionally undoes
* the side effects of kmap_atomic(), i.e. reenabling pagefaults and
* preemption. * preemption.
*
* Other than that a NOOP for CONFIG_HIGHMEM=n and for mappings of pages
* in the low memory area. For real highmen pages the mapping which was
* established with kmap_atomic() is destroyed.
*/ */
/* Highmem related interfaces for management code */ /* Highmem related interfaces for management code */
......
...@@ -453,6 +453,11 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) ...@@ -453,6 +453,11 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
unsigned long vaddr; unsigned long vaddr;
int idx; int idx;
/*
* Disable migration so resulting virtual address is stable
* accross preemption.
*/
migrate_disable();
preempt_disable(); preempt_disable();
idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
...@@ -522,6 +527,7 @@ void kunmap_local_indexed(void *vaddr) ...@@ -522,6 +527,7 @@ void kunmap_local_indexed(void *vaddr)
current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
kmap_local_idx_pop(); kmap_local_idx_pop();
preempt_enable(); preempt_enable();
migrate_enable();
} }
EXPORT_SYMBOL(kunmap_local_indexed); EXPORT_SYMBOL(kunmap_local_indexed);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment