Commit 3e4d3af5 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

mm: stack based kmap_atomic()

Keep the current interface but ignore the KM_type and use a stack based
approach.

The advantage is that we get rid of crappy code like:

	#define __KM_PTE			\
		(in_nmi() ? KM_NMI_PTE : 	\
		 in_irq() ? KM_IRQ_PTE :	\
		 KM_PTE0)

and in general can stop worrying about what context we're in and what kmap
slots might be appropriate for that.

The downside is that FRV kmap_atomic() gets more expensive.

For now we use a CPP trick suggested by Andrew:

  #define kmap_atomic(page, args...) __kmap_atomic(page)

to avoid having to touch all kmap_atomic() users in a single patch.

[ not compiled on:
  - mn10300: the arch doesn't actually build with highmem to begin with ]

[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: fix up drivers/gpu/drm/i915/intel_overlay.c]
Acked-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarChris Metcalf <cmetcalf@tilera.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Dave Airlie <airlied@linux.ie>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 61ecdb80
...@@ -35,9 +35,9 @@ extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); ...@@ -35,9 +35,9 @@ extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
extern void *kmap(struct page *page); extern void *kmap(struct page *page);
extern void kunmap(struct page *page); extern void kunmap(struct page *page);
extern void *kmap_atomic(struct page *page, enum km_type type); extern void *__kmap_atomic(struct page *page);
extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(const void *ptr); extern struct page *kmap_atomic_to_page(const void *ptr);
#endif #endif
......
...@@ -36,18 +36,17 @@ void kunmap(struct page *page) ...@@ -36,18 +36,17 @@ void kunmap(struct page *page)
} }
EXPORT_SYMBOL(kunmap); EXPORT_SYMBOL(kunmap);
void *kmap_atomic(struct page *page, enum km_type type) void *__kmap_atomic(struct page *page)
{ {
unsigned int idx; unsigned int idx;
unsigned long vaddr; unsigned long vaddr;
void *kmap; void *kmap;
int type;
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic(type);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
/* /*
* There is no cache coherency issue when non VIVT, so force the * There is no cache coherency issue when non VIVT, so force the
...@@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type) ...@@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
if (kmap) if (kmap)
return kmap; return kmap;
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id(); idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
...@@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type) ...@@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type)
return (void *)vaddr; return (void *)vaddr;
} }
EXPORT_SYMBOL(kmap_atomic); EXPORT_SYMBOL(__kmap_atomic);
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) void __kunmap_atomic(void *kvaddr)
{ {
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); int idx, type;
if (kvaddr >= (void *)FIXADDR_START) { if (kvaddr >= (void *)FIXADDR_START) {
type = kmap_atomic_idx_pop();
idx = type + KM_TYPE_NR * smp_processor_id();
if (cache_is_vivt()) if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
...@@ -103,15 +107,16 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) ...@@ -103,15 +107,16 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
} }
pagefault_enable(); pagefault_enable();
} }
EXPORT_SYMBOL(kunmap_atomic_notypecheck); EXPORT_SYMBOL(__kunmap_atomic);
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) void *kmap_atomic_pfn(unsigned long pfn)
{ {
unsigned int idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
pagefault_disable(); pagefault_disable();
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id(); idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
......
...@@ -112,12 +112,11 @@ extern struct page *kmap_atomic_to_page(void *ptr); ...@@ -112,12 +112,11 @@ extern struct page *kmap_atomic_to_page(void *ptr);
(void *) damlr; \ (void *) damlr; \
}) })
static inline void *kmap_atomic(struct page *page, enum km_type type) static inline void *kmap_atomic_primary(struct page *page, enum km_type type)
{ {
unsigned long paddr; unsigned long paddr;
pagefault_disable(); pagefault_disable();
debug_kmap_atomic(type);
paddr = page_to_phys(page); paddr = page_to_phys(page);
switch (type) { switch (type) {
...@@ -125,14 +124,6 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) ...@@ -125,14 +124,6 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
case 1: return __kmap_atomic_primary(1, paddr, 3); case 1: return __kmap_atomic_primary(1, paddr, 3);
case 2: return __kmap_atomic_primary(2, paddr, 4); case 2: return __kmap_atomic_primary(2, paddr, 4);
case 3: return __kmap_atomic_primary(3, paddr, 5); case 3: return __kmap_atomic_primary(3, paddr, 5);
case 4: return __kmap_atomic_primary(4, paddr, 6);
case 5: return __kmap_atomic_primary(5, paddr, 7);
case 6: return __kmap_atomic_primary(6, paddr, 8);
case 7: return __kmap_atomic_primary(7, paddr, 9);
case 8: return __kmap_atomic_primary(8, paddr, 10);
case 9 ... 9 + NR_TLB_LINES - 1:
return __kmap_atomic_secondary(type - 9, paddr);
default: default:
BUG(); BUG();
...@@ -152,22 +143,13 @@ do { \ ...@@ -152,22 +143,13 @@ do { \
asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \ asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
} while(0) } while(0)
static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
{ {
switch (type) { switch (type) {
case 0: __kunmap_atomic_primary(0, 2); break; case 0: __kunmap_atomic_primary(0, 2); break;
case 1: __kunmap_atomic_primary(1, 3); break; case 1: __kunmap_atomic_primary(1, 3); break;
case 2: __kunmap_atomic_primary(2, 4); break; case 2: __kunmap_atomic_primary(2, 4); break;
case 3: __kunmap_atomic_primary(3, 5); break; case 3: __kunmap_atomic_primary(3, 5); break;
case 4: __kunmap_atomic_primary(4, 6); break;
case 5: __kunmap_atomic_primary(5, 7); break;
case 6: __kunmap_atomic_primary(6, 8); break;
case 7: __kunmap_atomic_primary(7, 9); break;
case 8: __kunmap_atomic_primary(8, 10); break;
case 9 ... 9 + NR_TLB_LINES - 1:
__kunmap_atomic_secondary(type - 9, kvaddr);
break;
default: default:
BUG(); BUG();
...@@ -175,6 +157,9 @@ static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) ...@@ -175,6 +157,9 @@ static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
pagefault_enable(); pagefault_enable();
} }
void *__kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -61,14 +61,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -61,14 +61,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
dampr2 = __get_DAMPR(2); dampr2 = __get_DAMPR(2);
for (i = 0; i < nents; i++) { for (i = 0; i < nents; i++) {
vaddr = kmap_atomic(sg_page(&sg[i]), __KM_CACHE); vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE);
frv_dcache_writeback((unsigned long) vaddr, frv_dcache_writeback((unsigned long) vaddr,
(unsigned long) vaddr + PAGE_SIZE); (unsigned long) vaddr + PAGE_SIZE);
} }
kunmap_atomic(vaddr, __KM_CACHE); kunmap_atomic_primary(vaddr, __KM_CACHE);
if (dampr2) { if (dampr2) {
__set_DAMPR(2, dampr2); __set_DAMPR(2, dampr2);
__set_IAMPR(2, dampr2); __set_IAMPR(2, dampr2);
......
...@@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page) ...@@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page)
dampr2 = __get_DAMPR(2); dampr2 = __get_DAMPR(2);
vaddr = kmap_atomic(page, __KM_CACHE); vaddr = kmap_atomic_primary(page, __KM_CACHE);
frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);
kunmap_atomic(vaddr, __KM_CACHE); kunmap_atomic_primary(vaddr, __KM_CACHE);
if (dampr2) { if (dampr2) {
__set_DAMPR(2, dampr2); __set_DAMPR(2, dampr2);
...@@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, ...@@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
dampr2 = __get_DAMPR(2); dampr2 = __get_DAMPR(2);
vaddr = kmap_atomic(page, __KM_CACHE); vaddr = kmap_atomic_primary(page, __KM_CACHE);
start = (start & ~PAGE_MASK) | (unsigned long) vaddr; start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
frv_cache_wback_inv(start, start + len); frv_cache_wback_inv(start, start + len);
kunmap_atomic(vaddr, __KM_CACHE); kunmap_atomic_primary(vaddr, __KM_CACHE);
if (dampr2) { if (dampr2) {
__set_DAMPR(2, dampr2); __set_DAMPR(2, dampr2);
......
...@@ -36,3 +36,53 @@ struct page *kmap_atomic_to_page(void *ptr) ...@@ -36,3 +36,53 @@ struct page *kmap_atomic_to_page(void *ptr)
{ {
return virt_to_page(ptr); return virt_to_page(ptr);
} }
void *__kmap_atomic(struct page *page)
{
unsigned long paddr;
int type;
pagefault_disable();
type = kmap_atomic_idx_push();
paddr = page_to_phys(page);
switch (type) {
/*
* The first 4 primary maps are reserved for architecture code
*/
case 0: return __kmap_atomic_primary(4, paddr, 6);
case 1: return __kmap_atomic_primary(5, paddr, 7);
case 2: return __kmap_atomic_primary(6, paddr, 8);
case 3: return __kmap_atomic_primary(7, paddr, 9);
case 4: return __kmap_atomic_primary(8, paddr, 10);
case 5 ... 5 + NR_TLB_LINES - 1:
return __kmap_atomic_secondary(type - 5, paddr);
default:
BUG();
return NULL;
}
}
EXPORT_SYMBOL(__kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
int type = kmap_atomic_idx_pop();
switch (type) {
case 0: __kunmap_atomic_primary(4, 6); break;
case 1: __kunmap_atomic_primary(5, 7); break;
case 2: __kunmap_atomic_primary(6, 8); break;
case 3: __kunmap_atomic_primary(7, 9); break;
case 4: __kunmap_atomic_primary(8, 10); break;
case 5 ... 5 + NR_TLB_LINES - 1:
__kunmap_atomic_secondary(type - 5, kvaddr);
break;
default:
BUG();
}
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
...@@ -45,18 +45,12 @@ extern pte_t *pkmap_page_table; ...@@ -45,18 +45,12 @@ extern pte_t *pkmap_page_table;
extern void * kmap_high(struct page *page); extern void * kmap_high(struct page *page);
extern void kunmap_high(struct page *page); extern void kunmap_high(struct page *page);
extern void *__kmap(struct page *page); extern void *kmap(struct page *page);
extern void __kunmap(struct page *page); extern void kunmap(struct page *page);
extern void *__kmap_atomic(struct page *page, enum km_type type); extern void *__kmap_atomic(struct page *page);
extern void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *__kmap_atomic_to_page(void *ptr); extern struct page *kmap_atomic_to_page(void *ptr);
#define kmap __kmap
#define kunmap __kunmap
#define kmap_atomic __kmap_atomic
#define kunmap_atomic_notypecheck __kunmap_atomic_notypecheck
#define kmap_atomic_to_page __kmap_atomic_to_page
#define flush_cache_kmaps() flush_cache_all() #define flush_cache_kmaps() flush_cache_all()
......
...@@ -9,7 +9,7 @@ static pte_t *kmap_pte; ...@@ -9,7 +9,7 @@ static pte_t *kmap_pte;
unsigned long highstart_pfn, highend_pfn; unsigned long highstart_pfn, highend_pfn;
void *__kmap(struct page *page) void *kmap(struct page *page)
{ {
void *addr; void *addr;
...@@ -21,16 +21,16 @@ void *__kmap(struct page *page) ...@@ -21,16 +21,16 @@ void *__kmap(struct page *page)
return addr; return addr;
} }
EXPORT_SYMBOL(__kmap); EXPORT_SYMBOL(kmap);
void __kunmap(struct page *page) void kunmap(struct page *page)
{ {
BUG_ON(in_interrupt()); BUG_ON(in_interrupt());
if (!PageHighMem(page)) if (!PageHighMem(page))
return; return;
kunmap_high(page); kunmap_high(page);
} }
EXPORT_SYMBOL(__kunmap); EXPORT_SYMBOL(kunmap);
/* /*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
...@@ -41,17 +41,17 @@ EXPORT_SYMBOL(__kunmap); ...@@ -41,17 +41,17 @@ EXPORT_SYMBOL(__kunmap);
* kmaps are appropriate for short, tight code paths only. * kmaps are appropriate for short, tight code paths only.
*/ */
void *__kmap_atomic(struct page *page, enum km_type type) void *__kmap_atomic(struct page *page)
{ {
enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
...@@ -64,17 +64,21 @@ void *__kmap_atomic(struct page *page, enum km_type type) ...@@ -64,17 +64,21 @@ void *__kmap_atomic(struct page *page, enum km_type type)
} }
EXPORT_SYMBOL(__kmap_atomic); EXPORT_SYMBOL(__kmap_atomic);
void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) void __kunmap_atomic(void *kvaddr)
{ {
#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); int type;
if (vaddr < FIXADDR_START) { // FIXME if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable(); pagefault_enable();
return; return;
} }
type = kmap_atomic_idx_pop();
#ifdef CONFIG_DEBUG_HIGHMEM
{
int idx = type + KM_TYPE_NR * smp_processor_id();
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
/* /*
...@@ -83,24 +87,24 @@ void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) ...@@ -83,24 +87,24 @@ void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
*/ */
pte_clear(&init_mm, vaddr, kmap_pte-idx); pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_one(vaddr); local_flush_tlb_one(vaddr);
}
#endif #endif
pagefault_enable(); pagefault_enable();
} }
EXPORT_SYMBOL(__kunmap_atomic_notypecheck); EXPORT_SYMBOL(__kunmap_atomic);
/* /*
* This is the same as kmap_atomic() but can map memory that doesn't * This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it. * have a struct page associated with it.
*/ */
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) void *kmap_atomic_pfn(unsigned long pfn)
{ {
enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
pagefault_disable(); pagefault_disable();
debug_kmap_atomic(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
...@@ -109,7 +113,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) ...@@ -109,7 +113,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
return (void*) vaddr; return (void*) vaddr;
} }
struct page *__kmap_atomic_to_page(void *ptr) struct page *kmap_atomic_to_page(void *ptr)
{ {
unsigned long idx, vaddr = (unsigned long)ptr; unsigned long idx, vaddr = (unsigned long)ptr;
pte_t *pte; pte_t *pte;
......
...@@ -70,15 +70,16 @@ static inline void kunmap(struct page *page) ...@@ -70,15 +70,16 @@ static inline void kunmap(struct page *page)
* be used in IRQ contexts, so in some (very limited) cases we need * be used in IRQ contexts, so in some (very limited) cases we need
* it. * it.
*/ */
static inline unsigned long kmap_atomic(struct page *page, enum km_type type) static inline unsigned long __kmap_atomic(struct page *page)
{ {
enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
pagefault_disable();
if (page < highmem_start_page) if (page < highmem_start_page)
return page_address(page); return page_address(page);
debug_kmap_atomic(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id(); idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#if HIGHMEM_DEBUG #if HIGHMEM_DEBUG
...@@ -91,13 +92,21 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type) ...@@ -91,13 +92,21 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
return vaddr; return vaddr;
} }
static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type type) static inline void __kunmap_atomic(unsigned long vaddr)
{ {
#if HIGHMEM_DEBUG int type;
enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id();
if (vaddr < FIXADDR_START) /* FIXME */ if (vaddr < FIXADDR_START) { /* FIXME */
pagefault_enable();
return; return;
}
type = kmap_atomic_idx_pop();
#if HIGHMEM_DEBUG
{
unsigned int idx;
idx = type + KM_TYPE_NR * smp_processor_id();
if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)) if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
BUG(); BUG();
...@@ -108,9 +117,10 @@ static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type t ...@@ -108,9 +117,10 @@ static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type t
*/ */
pte_clear(kmap_pte - idx); pte_clear(kmap_pte - idx);
__flush_tlb_one(vaddr); __flush_tlb_one(vaddr);
}
#endif #endif
pagefault_enable();
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */ #endif /* _ASM_HIGHMEM_H */
...@@ -60,9 +60,8 @@ extern pte_t *pkmap_page_table; ...@@ -60,9 +60,8 @@ extern pte_t *pkmap_page_table;
extern void *kmap_high(struct page *page); extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page); extern void kunmap_high(struct page *page);
extern void *kmap_atomic_prot(struct page *page, enum km_type type, extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
pgprot_t prot); extern void __kunmap_atomic(void *kvaddr);
extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
static inline void *kmap(struct page *page) static inline void *kmap(struct page *page)
{ {
...@@ -80,9 +79,9 @@ static inline void kunmap(struct page *page) ...@@ -80,9 +79,9 @@ static inline void kunmap(struct page *page)
kunmap_high(page); kunmap_high(page);
} }
static inline void *kmap_atomic(struct page *page, enum km_type type) static inline void *__kmap_atomic(struct page *page)
{ {
return kmap_atomic_prot(page, type, kmap_prot); return kmap_atomic_prot(page, kmap_prot);
} }
static inline struct page *kmap_atomic_to_page(void *ptr) static inline struct page *kmap_atomic_to_page(void *ptr)
......
...@@ -29,17 +29,17 @@ ...@@ -29,17 +29,17 @@
* be used in IRQ contexts, so in some (very limited) cases we need * be used in IRQ contexts, so in some (very limited) cases we need
* it. * it.
*/ */
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{ {
unsigned int idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
...@@ -52,17 +52,23 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) ...@@ -52,17 +52,23 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
} }
EXPORT_SYMBOL(kmap_atomic_prot); EXPORT_SYMBOL(kmap_atomic_prot);
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) void __kunmap_atomic(void *kvaddr)
{ {
#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); int type;
if (vaddr < __fix_to_virt(FIX_KMAP_END)) { if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable(); pagefault_enable();
return; return;
} }
type = kmap_atomic_idx_pop();
#ifdef CONFIG_DEBUG_HIGHMEM
{
unsigned int idx;
idx = type + KM_TYPE_NR * smp_processor_id();
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
/* /*
...@@ -71,7 +77,8 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) ...@@ -71,7 +77,8 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
*/ */
pte_clear(&init_mm, vaddr, kmap_pte-idx); pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_page(NULL, vaddr); local_flush_tlb_page(NULL, vaddr);
}
#endif #endif
pagefault_enable(); pagefault_enable();
} }
EXPORT_SYMBOL(kunmap_atomic_notypecheck); EXPORT_SYMBOL(__kunmap_atomic);
...@@ -70,8 +70,8 @@ static inline void kunmap(struct page *page) ...@@ -70,8 +70,8 @@ static inline void kunmap(struct page *page)
kunmap_high(page); kunmap_high(page);
} }
extern void *kmap_atomic(struct page *page, enum km_type type); extern void *__kmap_atomic(struct page *page);
extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); extern void __kunmap_atomic(void *kvaddr);
extern struct page *kmap_atomic_to_page(void *vaddr); extern struct page *kmap_atomic_to_page(void *vaddr);
#define flush_cache_kmaps() flush_cache_all() #define flush_cache_kmaps() flush_cache_all()
......
...@@ -29,17 +29,17 @@ ...@@ -29,17 +29,17 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
void *kmap_atomic(struct page *page, enum km_type type) void *__kmap_atomic(struct page *page)
{ {
unsigned long idx;
unsigned long vaddr; unsigned long vaddr;
long idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
...@@ -63,22 +63,28 @@ void *kmap_atomic(struct page *page, enum km_type type) ...@@ -63,22 +63,28 @@ void *kmap_atomic(struct page *page, enum km_type type)
return (void*) vaddr; return (void*) vaddr;
} }
EXPORT_SYMBOL(kmap_atomic); EXPORT_SYMBOL(__kmap_atomic);
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) void __kunmap_atomic(void *kvaddr)
{ {
#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); int type;
if (vaddr < FIXADDR_START) { // FIXME if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable(); pagefault_enable();
return; return;
} }
type = kmap_atomic_idx_pop();
#ifdef CONFIG_DEBUG_HIGHMEM
{
unsigned long idx;
idx = type + KM_TYPE_NR * smp_processor_id();
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
/* XXX Fix - Anton */ /* XXX Fix - Anton */
#if 0 #if 0
__flush_cache_one(vaddr); __flush_cache_one(vaddr);
#else #else
...@@ -90,17 +96,17 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) ...@@ -90,17 +96,17 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
* this pte without first remap it * this pte without first remap it
*/ */
pte_clear(&init_mm, vaddr, kmap_pte-idx); pte_clear(&init_mm, vaddr, kmap_pte-idx);
/* XXX Fix - Anton */ /* XXX Fix - Anton */
#if 0 #if 0
__flush_tlb_one(vaddr); __flush_tlb_one(vaddr);
#else #else
flush_tlb_all(); flush_tlb_all();
#endif #endif
}
#endif #endif
pagefault_enable(); pagefault_enable();
} }
EXPORT_SYMBOL(kunmap_atomic_notypecheck); EXPORT_SYMBOL(__kunmap_atomic);
/* We may be fed a pagetable here by ptep_to_xxx and others. */ /* We may be fed a pagetable here by ptep_to_xxx and others. */
struct page *kmap_atomic_to_page(void *ptr) struct page *kmap_atomic_to_page(void *ptr)
......
...@@ -60,12 +60,12 @@ void *kmap_fix_kpte(struct page *page, int finished); ...@@ -60,12 +60,12 @@ void *kmap_fix_kpte(struct page *page, int finished);
/* This macro is used only in map_new_virtual() to map "page". */ /* This macro is used only in map_new_virtual() to map "page". */
#define kmap_prot page_to_kpgprot(page) #define kmap_prot page_to_kpgprot(page)
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); void *__kmap_atomic(struct page *page);
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr); struct page *kmap_atomic_to_page(void *ptr);
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); void *kmap_atomic_prot(struct page *page, pgprot_t prot);
void *kmap_atomic(struct page *page, enum km_type type);
void kmap_atomic_fix_kpte(struct page *page, int finished); void kmap_atomic_fix_kpte(struct page *page, int finished);
#define flush_cache_kmaps() do { } while (0) #define flush_cache_kmaps() do { } while (0)
......
...@@ -56,50 +56,6 @@ void kunmap(struct page *page) ...@@ -56,50 +56,6 @@ void kunmap(struct page *page)
} }
EXPORT_SYMBOL(kunmap); EXPORT_SYMBOL(kunmap);
static void debug_kmap_atomic_prot(enum km_type type)
{
#ifdef CONFIG_DEBUG_HIGHMEM
static unsigned warn_count = 10;
if (unlikely(warn_count == 0))
return;
if (unlikely(in_interrupt())) {
if (in_irq()) {
if (type != KM_IRQ0 && type != KM_IRQ1 &&
type != KM_BIO_SRC_IRQ &&
/* type != KM_BIO_DST_IRQ && */
type != KM_BOUNCE_READ) {
WARN_ON(1);
warn_count--;
}
} else if (!irqs_disabled()) { /* softirq */
if (type != KM_IRQ0 && type != KM_IRQ1 &&
type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
type != KM_SKB_SUNRPC_DATA &&
type != KM_SKB_DATA_SOFTIRQ &&
type != KM_BOUNCE_READ) {
WARN_ON(1);
warn_count--;
}
}
}
if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) {
if (!irqs_disabled()) {
WARN_ON(1);
warn_count--;
}
} else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
if (irq_count() == 0 && !irqs_disabled()) {
WARN_ON(1);
warn_count--;
}
}
#endif
}
/* /*
* Describe a single atomic mapping of a page on a given cpu at a * Describe a single atomic mapping of a page on a given cpu at a
* given address, and allow it to be linked into a list. * given address, and allow it to be linked into a list.
...@@ -240,10 +196,10 @@ void kmap_atomic_fix_kpte(struct page *page, int finished) ...@@ -240,10 +196,10 @@ void kmap_atomic_fix_kpte(struct page *page, int finished)
* When holding an atomic kmap is is not legal to sleep, so atomic * When holding an atomic kmap is is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only. * kmaps are appropriate for short, tight code paths only.
*/ */
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{ {
enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
pte_t *pte; pte_t *pte;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
...@@ -255,8 +211,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) ...@@ -255,8 +211,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic_prot(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
pte = kmap_get_pte(vaddr); pte = kmap_get_pte(vaddr);
...@@ -269,25 +224,31 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) ...@@ -269,25 +224,31 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
} }
EXPORT_SYMBOL(kmap_atomic_prot); EXPORT_SYMBOL(kmap_atomic_prot);
void *kmap_atomic(struct page *page, enum km_type type) void *__kmap_atomic(struct page *page)
{ {
/* PAGE_NONE is a magic value that tells us to check immutability. */ /* PAGE_NONE is a magic value that tells us to check immutability. */
return kmap_atomic_prot(page, type, PAGE_NONE); return kmap_atomic_prot(page, type, PAGE_NONE);
} }
EXPORT_SYMBOL(kmap_atomic); EXPORT_SYMBOL(__kmap_atomic);
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) void __kunmap_atomic(void *kvaddr)
{ {
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
/* if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
* Force other mappings to Oops if they try to access this pte without vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
* first remapping it. Keeping stale mappings around is a bad idea.
*/
if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
pte_t *pte = kmap_get_pte(vaddr); pte_t *pte = kmap_get_pte(vaddr);
pte_t pteval = *pte; pte_t pteval = *pte;
int idx, type;
type = kmap_atomic_idx_pop();
idx = type + KM_TYPE_NR*smp_processor_id();
/*
* Force other mappings to Oops if they try to access this pte
* without first remapping it. Keeping stale mappings around
* is a bad idea.
*/
BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
kmap_atomic_unregister(pte_page(pteval), vaddr); kmap_atomic_unregister(pte_page(pteval), vaddr);
kpte_clear_flush(pte, vaddr); kpte_clear_flush(pte, vaddr);
...@@ -300,19 +261,19 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) ...@@ -300,19 +261,19 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
arch_flush_lazy_mmu_mode(); arch_flush_lazy_mmu_mode();
pagefault_enable(); pagefault_enable();
} }
EXPORT_SYMBOL(kunmap_atomic_notypecheck); EXPORT_SYMBOL(__kunmap_atomic);
/* /*
* This API is supposed to allow us to map memory without a "struct page". * This API is supposed to allow us to map memory without a "struct page".
* Currently we don't support this, though this may change in the future. * Currently we don't support this, though this may change in the future.
*/ */
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) void *kmap_atomic_pfn(unsigned long pfn)
{ {
return kmap_atomic(pfn_to_page(pfn), type); return kmap_atomic(pfn_to_page(pfn));
} }
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{ {
return kmap_atomic_prot(pfn_to_page(pfn), type, prot); return kmap_atomic_prot(pfn_to_page(pfn), prot);
} }
struct page *kmap_atomic_to_page(void *ptr) struct page *kmap_atomic_to_page(void *ptr)
......
...@@ -59,11 +59,12 @@ extern void kunmap_high(struct page *page); ...@@ -59,11 +59,12 @@ extern void kunmap_high(struct page *page);
void *kmap(struct page *page); void *kmap(struct page *page);
void kunmap(struct page *page); void kunmap(struct page *page);
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
void *kmap_atomic(struct page *page, enum km_type type); void *kmap_atomic_prot(struct page *page, pgprot_t prot);
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); void *__kmap_atomic(struct page *page);
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr); struct page *kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() do { } while (0) #define flush_cache_kmaps() do { } while (0)
......
...@@ -27,10 +27,10 @@ ...@@ -27,10 +27,10 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
void __iomem * void __iomem *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
void void
iounmap_atomic(void __iomem *kvaddr, enum km_type type); iounmap_atomic(void __iomem *kvaddr);
int int
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
......
...@@ -61,7 +61,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, ...@@ -61,7 +61,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!is_crashed_pfn_valid(pfn)) if (!is_crashed_pfn_valid(pfn))
return -EFAULT; return -EFAULT;
vaddr = kmap_atomic_pfn(pfn, KM_PTE0); vaddr = kmap_atomic_pfn(pfn);
if (!userbuf) { if (!userbuf) {
memcpy(buf, (vaddr + offset), csize); memcpy(buf, (vaddr + offset), csize);
......
...@@ -9,6 +9,7 @@ void *kmap(struct page *page) ...@@ -9,6 +9,7 @@ void *kmap(struct page *page)
return page_address(page); return page_address(page);
return kmap_high(page); return kmap_high(page);
} }
EXPORT_SYMBOL(kmap);
void kunmap(struct page *page) void kunmap(struct page *page)
{ {
...@@ -18,6 +19,7 @@ void kunmap(struct page *page) ...@@ -18,6 +19,7 @@ void kunmap(struct page *page)
return; return;
kunmap_high(page); kunmap_high(page);
} }
EXPORT_SYMBOL(kunmap);
/* /*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
...@@ -27,10 +29,10 @@ void kunmap(struct page *page) ...@@ -27,10 +29,10 @@ void kunmap(struct page *page)
* However when holding an atomic kmap it is not legal to sleep, so atomic * However when holding an atomic kmap it is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only. * kmaps are appropriate for short, tight code paths only.
*/ */
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{ {
enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable(); pagefault_disable();
...@@ -38,8 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) ...@@ -38,8 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx))); BUG_ON(!pte_none(*(kmap_pte-idx)));
...@@ -47,44 +48,56 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) ...@@ -47,44 +48,56 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
return (void *)vaddr; return (void *)vaddr;
} }
EXPORT_SYMBOL(kmap_atomic_prot);
void *__kmap_atomic(struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
}
EXPORT_SYMBOL(__kmap_atomic);
void *kmap_atomic(struct page *page, enum km_type type) /*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.
*/
void *kmap_atomic_pfn(unsigned long pfn)
{ {
return kmap_atomic_prot(page, type, kmap_prot); return kmap_atomic_prot_pfn(pfn, kmap_prot);
} }
EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) void __kunmap_atomic(void *kvaddr)
{ {
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
int idx, type;
type = kmap_atomic_idx_pop();
idx = type + KM_TYPE_NR * smp_processor_id();
#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#endif
/* /*
* Force other mappings to Oops if they'll try to access this pte * Force other mappings to Oops if they'll try to access this
* without first remap it. Keeping stale mappings around is a bad idea * pte without first remap it. Keeping stale mappings around
* also, in case the page changes cacheability attributes or becomes * is a bad idea also, in case the page changes cacheability
* a protected page in a hypervisor. * attributes or becomes a protected page in a hypervisor.
*/ */
if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
kpte_clear_flush(kmap_pte-idx, vaddr); kpte_clear_flush(kmap_pte-idx, vaddr);
else { }
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
else {
BUG_ON(vaddr < PAGE_OFFSET); BUG_ON(vaddr < PAGE_OFFSET);
BUG_ON(vaddr >= (unsigned long)high_memory); BUG_ON(vaddr >= (unsigned long)high_memory);
#endif
} }
#endif
pagefault_enable(); pagefault_enable();
} }
EXPORT_SYMBOL(__kunmap_atomic);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.
*/
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
{
return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
}
EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
struct page *kmap_atomic_to_page(void *ptr) struct page *kmap_atomic_to_page(void *ptr)
{ {
...@@ -98,12 +111,6 @@ struct page *kmap_atomic_to_page(void *ptr) ...@@ -98,12 +111,6 @@ struct page *kmap_atomic_to_page(void *ptr)
pte = kmap_pte - (idx - FIX_KMAP_BEGIN); pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte); return pte_page(*pte);
} }
EXPORT_SYMBOL(kmap);
EXPORT_SYMBOL(kunmap);
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kunmap_atomic_notypecheck);
EXPORT_SYMBOL(kmap_atomic_prot);
EXPORT_SYMBOL(kmap_atomic_to_page); EXPORT_SYMBOL(kmap_atomic_to_page);
void __init set_highmem_pages_init(void) void __init set_highmem_pages_init(void)
......
...@@ -48,21 +48,20 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) ...@@ -48,21 +48,20 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
} }
EXPORT_SYMBOL_GPL(iomap_create_wc); EXPORT_SYMBOL_GPL(iomap_create_wc);
void void iomap_free(resource_size_t base, unsigned long size)
iomap_free(resource_size_t base, unsigned long size)
{ {
io_free_memtype(base, base + size); io_free_memtype(base, base + size);
} }
EXPORT_SYMBOL_GPL(iomap_free); EXPORT_SYMBOL_GPL(iomap_free);
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{ {
enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
int idx, type;
pagefault_disable(); pagefault_disable();
debug_kmap_atomic(type); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id(); idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
...@@ -72,10 +71,10 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) ...@@ -72,10 +71,10 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
} }
/* /*
* Map 'pfn' using fixed map 'type' and protections 'prot' * Map 'pfn' using protections 'prot'
*/ */
void __iomem * void __iomem *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{ {
/* /*
* For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
...@@ -86,24 +85,33 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) ...@@ -86,24 +85,33 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
prot = PAGE_KERNEL_UC_MINUS; prot = PAGE_KERNEL_UC_MINUS;
return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot); return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
} }
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
void void
iounmap_atomic(void __iomem *kvaddr, enum km_type type) iounmap_atomic(void __iomem *kvaddr)
{ {
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
int idx, type;
type = kmap_atomic_idx_pop();
idx = type + KM_TYPE_NR * smp_processor_id();
#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#endif
/* /*
* Force other mappings to Oops if they'll try to access this pte * Force other mappings to Oops if they'll try to access this
* without first remap it. Keeping stale mappings around is a bad idea * pte without first remap it. Keeping stale mappings around
* also, in case the page changes cacheability attributes or becomes * is a bad idea also, in case the page changes cacheability
* a protected page in a hypervisor. * attributes or becomes a protected page in a hypervisor.
*/ */
if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
kpte_clear_flush(kmap_pte-idx, vaddr); kpte_clear_flush(kmap_pte-idx, vaddr);
}
pagefault_enable(); pagefault_enable();
} }
......
...@@ -155,11 +155,11 @@ fast_shmem_read(struct page **pages, ...@@ -155,11 +155,11 @@ fast_shmem_read(struct page **pages,
char __iomem *vaddr; char __iomem *vaddr;
int unwritten; int unwritten;
vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
if (vaddr == NULL) if (vaddr == NULL)
return -ENOMEM; return -ENOMEM;
unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
kunmap_atomic(vaddr, KM_USER0); kunmap_atomic(vaddr);
if (unwritten) if (unwritten)
return -EFAULT; return -EFAULT;
...@@ -509,10 +509,10 @@ fast_user_write(struct io_mapping *mapping, ...@@ -509,10 +509,10 @@ fast_user_write(struct io_mapping *mapping,
char *vaddr_atomic; char *vaddr_atomic;
unsigned long unwritten; unsigned long unwritten;
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0); vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
user_data, length); user_data, length);
io_mapping_unmap_atomic(vaddr_atomic, KM_USER0); io_mapping_unmap_atomic(vaddr_atomic);
if (unwritten) if (unwritten)
return -EFAULT; return -EFAULT;
return 0; return 0;
...@@ -551,11 +551,11 @@ fast_shmem_write(struct page **pages, ...@@ -551,11 +551,11 @@ fast_shmem_write(struct page **pages,
char __iomem *vaddr; char __iomem *vaddr;
unsigned long unwritten; unsigned long unwritten;
vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
if (vaddr == NULL) if (vaddr == NULL)
return -ENOMEM; return -ENOMEM;
unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
kunmap_atomic(vaddr, KM_USER0); kunmap_atomic(vaddr);
if (unwritten) if (unwritten)
return -EFAULT; return -EFAULT;
...@@ -3346,8 +3346,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, ...@@ -3346,8 +3346,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
reloc_offset = obj_priv->gtt_offset + reloc->offset; reloc_offset = obj_priv->gtt_offset + reloc->offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
(reloc_offset & (reloc_offset &
~(PAGE_SIZE - 1)), ~(PAGE_SIZE - 1)));
KM_USER0);
reloc_entry = (uint32_t __iomem *)(reloc_page + reloc_entry = (uint32_t __iomem *)(reloc_page +
(reloc_offset & (PAGE_SIZE - 1))); (reloc_offset & (PAGE_SIZE - 1)));
reloc_val = target_obj_priv->gtt_offset + reloc->delta; reloc_val = target_obj_priv->gtt_offset + reloc->delta;
...@@ -3358,7 +3357,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, ...@@ -3358,7 +3357,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
readl(reloc_entry), reloc_val); readl(reloc_entry), reloc_val);
#endif #endif
writel(reloc_val, reloc_entry); writel(reloc_val, reloc_entry);
io_mapping_unmap_atomic(reloc_page, KM_USER0); io_mapping_unmap_atomic(reloc_page);
/* The updated presumed offset for this entry will be /* The updated presumed offset for this entry will be
* copied back out to the user. * copied back out to the user.
...@@ -4772,11 +4771,11 @@ void i915_gem_detach_phys_object(struct drm_device *dev, ...@@ -4772,11 +4771,11 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_count = obj->size / PAGE_SIZE; page_count = obj->size / PAGE_SIZE;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0); char *dst = kmap_atomic(obj_priv->pages[i]);
char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE); memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(dst, KM_USER0); kunmap_atomic(dst);
} }
drm_clflush_pages(obj_priv->pages, page_count); drm_clflush_pages(obj_priv->pages, page_count);
drm_agp_chipset_flush(dev); drm_agp_chipset_flush(dev);
...@@ -4833,11 +4832,11 @@ i915_gem_attach_phys_object(struct drm_device *dev, ...@@ -4833,11 +4832,11 @@ i915_gem_attach_phys_object(struct drm_device *dev,
page_count = obj->size / PAGE_SIZE; page_count = obj->size / PAGE_SIZE;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
char *src = kmap_atomic(obj_priv->pages[i], KM_USER0); char *src = kmap_atomic(obj_priv->pages[i]);
char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE); memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src, KM_USER0); kunmap_atomic(src);
} }
i915_gem_object_put_pages(obj); i915_gem_object_put_pages(obj);
......
...@@ -456,10 +456,9 @@ i915_error_object_create(struct drm_device *dev, ...@@ -456,10 +456,9 @@ i915_error_object_create(struct drm_device *dev,
local_irq_save(flags); local_irq_save(flags);
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
reloc_offset, reloc_offset);
KM_IRQ0);
memcpy_fromio(d, s, PAGE_SIZE); memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s, KM_IRQ0); io_mapping_unmap_atomic(s);
local_irq_restore(flags); local_irq_restore(flags);
dst->pages[page] = d; dst->pages[page] = d;
......
...@@ -187,8 +187,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over ...@@ -187,8 +187,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
if (OVERLAY_NONPHYSICAL(overlay->dev)) { if (OVERLAY_NONPHYSICAL(overlay->dev)) {
regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
overlay->reg_bo->gtt_offset, overlay->reg_bo->gtt_offset);
KM_USER0);
if (!regs) { if (!regs) {
DRM_ERROR("failed to map overlay regs in GTT\n"); DRM_ERROR("failed to map overlay regs in GTT\n");
...@@ -203,7 +202,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over ...@@ -203,7 +202,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
{ {
if (OVERLAY_NONPHYSICAL(overlay->dev)) if (OVERLAY_NONPHYSICAL(overlay->dev))
io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0); io_mapping_unmap_atomic(overlay->virt_addr);
overlay->virt_addr = NULL; overlay->virt_addr = NULL;
......
...@@ -2167,11 +2167,11 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb, ...@@ -2167,11 +2167,11 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb,
if (off < pci_resource_len(dev->pdev, 1)) { if (off < pci_resource_len(dev->pdev, 1)) {
uint8_t __iomem *p = uint8_t __iomem *p =
io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
val = ioread32(p + (off & ~PAGE_MASK)); val = ioread32(p + (off & ~PAGE_MASK));
io_mapping_unmap_atomic(p, KM_USER0); io_mapping_unmap_atomic(p);
} }
return val; return val;
...@@ -2183,12 +2183,12 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb, ...@@ -2183,12 +2183,12 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb,
{ {
if (off < pci_resource_len(dev->pdev, 1)) { if (off < pci_resource_len(dev->pdev, 1)) {
uint8_t __iomem *p = uint8_t __iomem *p =
io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
iowrite32(val, p + (off & ~PAGE_MASK)); iowrite32(val, p + (off & ~PAGE_MASK));
wmb(); wmb();
io_mapping_unmap_atomic(p, KM_USER0); io_mapping_unmap_atomic(p);
} }
} }
......
...@@ -170,7 +170,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, ...@@ -170,7 +170,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
#ifdef CONFIG_X86 #ifdef CONFIG_X86
dst = kmap_atomic_prot(d, KM_USER0, prot); dst = kmap_atomic_prot(d, prot);
#else #else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
dst = vmap(&d, 1, 0, prot); dst = vmap(&d, 1, 0, prot);
...@@ -183,7 +183,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, ...@@ -183,7 +183,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
memcpy_fromio(dst, src, PAGE_SIZE); memcpy_fromio(dst, src, PAGE_SIZE);
#ifdef CONFIG_X86 #ifdef CONFIG_X86
kunmap_atomic(dst, KM_USER0); kunmap_atomic(dst);
#else #else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(dst); vunmap(dst);
...@@ -206,7 +206,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, ...@@ -206,7 +206,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
#ifdef CONFIG_X86 #ifdef CONFIG_X86
src = kmap_atomic_prot(s, KM_USER0, prot); src = kmap_atomic_prot(s, prot);
#else #else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
src = vmap(&s, 1, 0, prot); src = vmap(&s, 1, 0, prot);
...@@ -219,7 +219,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, ...@@ -219,7 +219,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
memcpy_toio(dst, src, PAGE_SIZE); memcpy_toio(dst, src, PAGE_SIZE);
#ifdef CONFIG_X86 #ifdef CONFIG_X86
kunmap_atomic(src, KM_USER0); kunmap_atomic(src);
#else #else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(src); vunmap(src);
......
...@@ -28,18 +28,6 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size) ...@@ -28,18 +28,6 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#ifdef CONFIG_DEBUG_HIGHMEM
void debug_kmap_atomic(enum km_type type);
#else
static inline void debug_kmap_atomic(enum km_type type)
{
}
#endif
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
#include <asm/highmem.h> #include <asm/highmem.h>
...@@ -49,6 +37,27 @@ extern unsigned long totalhigh_pages; ...@@ -49,6 +37,27 @@ extern unsigned long totalhigh_pages;
void kmap_flush_unused(void); void kmap_flush_unused(void);
DECLARE_PER_CPU(int, __kmap_atomic_idx);
static inline int kmap_atomic_idx_push(void)
{
int idx = __get_cpu_var(__kmap_atomic_idx)++;
#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(in_irq() && !irqs_disabled());
BUG_ON(idx > KM_TYPE_NR);
#endif
return idx;
}
static inline int kmap_atomic_idx_pop(void)
{
int idx = --__get_cpu_var(__kmap_atomic_idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(idx < 0);
#endif
return idx;
}
#else /* CONFIG_HIGHMEM */ #else /* CONFIG_HIGHMEM */
static inline unsigned int nr_free_highpages(void) { return 0; } static inline unsigned int nr_free_highpages(void) { return 0; }
...@@ -66,19 +75,19 @@ static inline void kunmap(struct page *page) ...@@ -66,19 +75,19 @@ static inline void kunmap(struct page *page)
{ {
} }
static inline void *kmap_atomic(struct page *page, enum km_type idx) static inline void *__kmap_atomic(struct page *page)
{ {
pagefault_disable(); pagefault_disable();
return page_address(page); return page_address(page);
} }
#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) #define kmap_atomic_prot(page, prot) __kmap_atomic(page)
static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx) static inline void __kunmap_atomic(void *addr)
{ {
pagefault_enable(); pagefault_enable();
} }
#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr) #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#define kmap_flush_unused() do {} while(0) #define kmap_flush_unused() do {} while(0)
...@@ -86,12 +95,20 @@ static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx) ...@@ -86,12 +95,20 @@ static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
#endif /* CONFIG_HIGHMEM */ #endif /* CONFIG_HIGHMEM */
/* Prevent people trying to call kunmap_atomic() as if it were kunmap() */ /*
/* kunmap_atomic() should get the return value of kmap_atomic, not the page. */ * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
#define kunmap_atomic(addr, idx) do { \ */
#define kmap_atomic(page, args...) __kmap_atomic(page)
/*
* Prevent people trying to call kunmap_atomic() as if it were kunmap()
* kunmap_atomic() should get the return value of kmap_atomic, not the page.
*/
#define kunmap_atomic(addr, args...) \
do { \
BUILD_BUG_ON(__same_type((addr), struct page *)); \ BUILD_BUG_ON(__same_type((addr), struct page *)); \
kunmap_atomic_notypecheck((addr), (idx)); \ __kunmap_atomic(addr); \
} while (0) } while (0)
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage #ifndef clear_user_highpage
......
...@@ -81,8 +81,7 @@ io_mapping_free(struct io_mapping *mapping) ...@@ -81,8 +81,7 @@ io_mapping_free(struct io_mapping *mapping)
/* Atomic map/unmap */ /* Atomic map/unmap */
static inline void __iomem * static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping, io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset, unsigned long offset)
int slot)
{ {
resource_size_t phys_addr; resource_size_t phys_addr;
unsigned long pfn; unsigned long pfn;
...@@ -90,13 +89,13 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, ...@@ -90,13 +89,13 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
BUG_ON(offset >= mapping->size); BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset; phys_addr = mapping->base + offset;
pfn = (unsigned long) (phys_addr >> PAGE_SHIFT); pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
return iomap_atomic_prot_pfn(pfn, slot, mapping->prot); return iomap_atomic_prot_pfn(pfn, mapping->prot);
} }
static inline void static inline void
io_mapping_unmap_atomic(void __iomem *vaddr, int slot) io_mapping_unmap_atomic(void __iomem *vaddr)
{ {
iounmap_atomic(vaddr, slot); iounmap_atomic(vaddr);
} }
static inline void __iomem * static inline void __iomem *
...@@ -137,14 +136,13 @@ io_mapping_free(struct io_mapping *mapping) ...@@ -137,14 +136,13 @@ io_mapping_free(struct io_mapping *mapping)
/* Atomic map/unmap */ /* Atomic map/unmap */
static inline void __iomem * static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping, io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset, unsigned long offset)
int slot)
{ {
return ((char __force __iomem *) mapping) + offset; return ((char __force __iomem *) mapping) + offset;
} }
static inline void static inline void
io_mapping_unmap_atomic(void __iomem *vaddr, int slot) io_mapping_unmap_atomic(void __iomem *vaddr)
{ {
} }
......
...@@ -42,6 +42,10 @@ ...@@ -42,6 +42,10 @@
unsigned long totalhigh_pages __read_mostly; unsigned long totalhigh_pages __read_mostly;
EXPORT_SYMBOL(totalhigh_pages); EXPORT_SYMBOL(totalhigh_pages);
DEFINE_PER_CPU(int, __kmap_atomic_idx);
EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
unsigned int nr_free_highpages (void) unsigned int nr_free_highpages (void)
{ {
pg_data_t *pgdat; pg_data_t *pgdat;
...@@ -422,61 +426,3 @@ void __init page_address_init(void) ...@@ -422,61 +426,3 @@ void __init page_address_init(void)
} }
#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
#ifdef CONFIG_DEBUG_HIGHMEM
void debug_kmap_atomic(enum km_type type)
{
static int warn_count = 10;
if (unlikely(warn_count < 0))
return;
if (unlikely(in_interrupt())) {
if (in_nmi()) {
if (type != KM_NMI && type != KM_NMI_PTE) {
WARN_ON(1);
warn_count--;
}
} else if (in_irq()) {
if (type != KM_IRQ0 && type != KM_IRQ1 &&
type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
type != KM_BOUNCE_READ && type != KM_IRQ_PTE) {
WARN_ON(1);
warn_count--;
}
} else if (!irqs_disabled()) { /* softirq */
if (type != KM_IRQ0 && type != KM_IRQ1 &&
type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
type != KM_SKB_SUNRPC_DATA &&
type != KM_SKB_DATA_SOFTIRQ &&
type != KM_BOUNCE_READ) {
WARN_ON(1);
warn_count--;
}
}
}
if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ ||
type == KM_IRQ_PTE || type == KM_NMI ||
type == KM_NMI_PTE ) {
if (!irqs_disabled()) {
WARN_ON(1);
warn_count--;
}
} else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
if (irq_count() == 0 && !irqs_disabled()) {
WARN_ON(1);
warn_count--;
}
}
#ifdef CONFIG_KGDB_KDB
if (unlikely(type == KM_KDB && atomic_read(&kgdb_active) == -1)) {
WARN_ON(1);
warn_count--;
}
#endif /* CONFIG_KGDB_KDB */
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment