Commit 4b19c940 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] atomic copy_*_user infrastructure

The patch implements the atomic copy_*_user() function.

If the kernel takes a pagefault while running copy_*_user() in an
atomic region, the copy_*_user() will fail (return a short value).

And with this patch, holding an atomic kmap() puts the CPU into an
atomic region.

- Increment preempt_count() in kmap_atomic() regardless of the
  setting of CONFIG_PREEMPT.  The pagefault handler recognises this as
  an atomic region and refuses to service the fault.  copy_*_user will
  return a non-zero value.

- Attempts to propagate the in_atomic() predicate to all the other
  highmem-capable architectures' pagefault handlers.  But the code is
  only tested on x86.

- Fixed a PPC bug in kunmap_atomic(): it forgot to reenable
  preemption if HIGHMEM_DEBUG is turned on.

- Fixed a sparc bug in kunmap_atomic(): it forgot to reenable
  preemption all the time, for non-fixmap pages.

- Fix an error in <linux/highmem.h> - in the CONFIG_HIGHMEM=n case,
  kunmap_atomic() takes an address, not a page *.
parent 5f607d6e
...@@ -102,7 +102,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -102,7 +102,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
#endif /* !CONFIG_4xx */ #endif /* !CONFIG_4xx */
#endif /* CONFIG_XMON || CONFIG_KGDB */ #endif /* CONFIG_XMON || CONFIG_KGDB */
if (in_interrupt() || mm == NULL) { if (in_atomic() || mm == NULL) {
bad_page_fault(regs, address, SIGSEGV); bad_page_fault(regs, address, SIGSEGV);
return; return;
} }
......
...@@ -233,7 +233,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -233,7 +233,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* If we're in an interrupt or have no user * If we're in an interrupt or have no user
* context, we must not take the fault.. * context, we must not take the fault..
*/ */
if (in_interrupt() || !mm) if (in_atomic() || !mm)
goto no_context; goto no_context;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
......
...@@ -81,7 +81,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) ...@@ -81,7 +81,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
enum fixed_addresses idx; enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
preempt_disable(); inc_preempt_count();
if (page < highmem_start_page) if (page < highmem_start_page)
return page_address(page); return page_address(page);
...@@ -104,7 +104,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -104,7 +104,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < FIXADDR_START) { // FIXME if (vaddr < FIXADDR_START) { // FIXME
preempt_enable(); dec_preempt_count();
return; return;
} }
...@@ -119,7 +119,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -119,7 +119,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
__flush_tlb_one(vaddr); __flush_tlb_one(vaddr);
#endif #endif
preempt_enable(); dec_preempt_count();
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -85,8 +85,10 @@ typedef struct { ...@@ -85,8 +85,10 @@ typedef struct {
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET) #define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#if CONFIG_PREEMPT #if CONFIG_PREEMPT
# define in_atomic() (preempt_count() != kernel_locked())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else #else
# define in_atomic() (preempt_count() != 0)
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif #endif
#define irq_exit() \ #define irq_exit() \
......
...@@ -88,6 +88,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) ...@@ -88,6 +88,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
unsigned int idx; unsigned int idx;
unsigned long vaddr; unsigned long vaddr;
inc_preempt_count();
if (page < highmem_start_page) if (page < highmem_start_page)
return page_address(page); return page_address(page);
...@@ -109,8 +110,10 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -109,8 +110,10 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
unsigned int idx = type + KM_TYPE_NR*smp_processor_id(); unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < KMAP_FIX_BEGIN) // FIXME if (vaddr < KMAP_FIX_BEGIN) { // FIXME
dec_preempt_count();
return; return;
}
if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE) if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE)
BUG(); BUG();
...@@ -122,6 +125,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -122,6 +125,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
pte_clear(kmap_pte+idx); pte_clear(kmap_pte+idx);
flush_tlb_page(0, vaddr); flush_tlb_page(0, vaddr);
#endif #endif
dec_preempt_count();
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -113,6 +113,12 @@ do { \ ...@@ -113,6 +113,12 @@ do { \
#define irq_exit() br_read_unlock(BR_GLOBALIRQ_LOCK) #define irq_exit() br_read_unlock(BR_GLOBALIRQ_LOCK)
#endif #endif
#if CONFIG_PREEMPT
# define in_atomic() (preempt_count() != kernel_locked())
#else
# define in_atomic() (preempt_count() != 0)
#endif
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
#define synchronize_irq() barrier() #define synchronize_irq() barrier()
......
...@@ -83,6 +83,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) ...@@ -83,6 +83,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
unsigned long idx; unsigned long idx;
unsigned long vaddr; unsigned long vaddr;
inc_preempt_count();
if (page < highmem_start_page) if (page < highmem_start_page)
return page_address(page); return page_address(page);
...@@ -116,8 +117,10 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -116,8 +117,10 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned long vaddr = (unsigned long) kvaddr; unsigned long vaddr = (unsigned long) kvaddr;
unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < FIX_KMAP_BEGIN) // FIXME if (vaddr < FIX_KMAP_BEGIN) { // FIXME
dec_preempt_count();
return; return;
}
if (vaddr != FIX_KMAP_BEGIN + idx * PAGE_SIZE) if (vaddr != FIX_KMAP_BEGIN + idx * PAGE_SIZE)
BUG(); BUG();
...@@ -142,6 +145,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -142,6 +145,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
flush_tlb_all(); flush_tlb_all();
#endif #endif
#endif #endif
dec_preempt_count();
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -24,8 +24,8 @@ static inline void *kmap(struct page *page) { return page_address(page); } ...@@ -24,8 +24,8 @@ static inline void *kmap(struct page *page) { return page_address(page); }
#define kunmap(page) do { (void) (page); } while (0) #define kunmap(page) do { (void) (page); } while (0)
#define kmap_atomic(page,idx) kmap(page) #define kmap_atomic(page, idx) page_address(page)
#define kunmap_atomic(page,idx) kunmap(page) #define kunmap_atomic(addr, idx) do { } while (0)
#endif /* CONFIG_HIGHMEM */ #endif /* CONFIG_HIGHMEM */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment