Commit d26e0b89 authored by Tony Luck's avatar Tony Luck

Merge agluck-lia64.sc.intel.com:/data/home/aegl/BK/work/ian

into agluck-lia64.sc.intel.com:/data/home/aegl/BK/linux-ia64-release-2.6.9
parents e3c14a19 70e2700a
...@@ -28,36 +28,6 @@ ...@@ -28,36 +28,6 @@
#include <asm/processor.h> #include <asm/processor.h>
#define MMU_CONTEXT_DEBUG 0
#if MMU_CONTEXT_DEBUG
#include <ia64intrin.h>
extern struct mmu_trace_entry {
char op;
u8 cpu;
u32 context;
void *mm;
} mmu_tbuf[1024];
extern volatile int mmu_tbuf_index;
# define MMU_TRACE(_op,_cpu,_mm,_ctx) \
do { \
int i = __sync_fetch_and_add(&mmu_tbuf_index, 1) % ARRAY_SIZE(mmu_tbuf); \
struct mmu_trace_entry e; \
e.op = (_op); \
e.cpu = (_cpu); \
e.mm = (_mm); \
e.context = (_ctx); \
mmu_tbuf[i] = e; \
} while (0)
#else
# define MMU_TRACE(op,cpu,mm,ctx) do { ; } while (0)
#endif
struct ia64_ctx { struct ia64_ctx {
spinlock_t lock; spinlock_t lock;
unsigned int next; /* next context number to use */ unsigned int next; /* next context number to use */
...@@ -123,7 +93,6 @@ get_mmu_context (struct mm_struct *mm) ...@@ -123,7 +93,6 @@ get_mmu_context (struct mm_struct *mm)
static inline int static inline int
init_new_context (struct task_struct *p, struct mm_struct *mm) init_new_context (struct task_struct *p, struct mm_struct *mm)
{ {
MMU_TRACE('N', smp_processor_id(), mm, 0);
mm->context = 0; mm->context = 0;
return 0; return 0;
} }
...@@ -132,7 +101,6 @@ static inline void ...@@ -132,7 +101,6 @@ static inline void
destroy_context (struct mm_struct *mm) destroy_context (struct mm_struct *mm)
{ {
/* Nothing to do. */ /* Nothing to do. */
MMU_TRACE('D', smp_processor_id(), mm, mm->context);
} }
static inline void static inline void
...@@ -171,19 +139,14 @@ activate_context (struct mm_struct *mm) ...@@ -171,19 +139,14 @@ activate_context (struct mm_struct *mm)
do { do {
context = get_mmu_context(mm); context = get_mmu_context(mm);
MMU_TRACE('A', smp_processor_id(), mm, context);
if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
cpu_set(smp_processor_id(), mm->cpu_vm_mask); cpu_set(smp_processor_id(), mm->cpu_vm_mask);
reload_context(context); reload_context(context);
MMU_TRACE('a', smp_processor_id(), mm, context);
/* in the unlikely event of a TLB-flush by another thread, redo the load: */ /* in the unlikely event of a TLB-flush by another thread, redo the load: */
} while (unlikely(context != mm->context)); } while (unlikely(context != mm->context));
} }
#define deactivate_mm(tsk,mm) \ #define deactivate_mm(tsk,mm) do { } while (0)
do { \
MMU_TRACE('d', smp_processor_id(), mm, mm->context); \
} while (0)
/* /*
* Switch from address space PREV to address space NEXT. * Switch from address space PREV to address space NEXT.
......
...@@ -84,6 +84,13 @@ extern int ia64_pfn_valid (unsigned long pfn); ...@@ -84,6 +84,13 @@ extern int ia64_pfn_valid (unsigned long pfn);
#endif #endif
#ifndef CONFIG_DISCONTIGMEM #ifndef CONFIG_DISCONTIGMEM
# ifdef CONFIG_VIRTUAL_MEM_MAP
extern struct page *vmem_map;
# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
# define page_to_pfn(page) ((unsigned long) (page - vmem_map))
# define pfn_to_page(pfn) (vmem_map + (pfn))
# endif
#else /* !CONFIG_VIRTUAL_MEM_MAP */
#define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) #define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
#define page_to_pfn(page) ((unsigned long) (page - mem_map)) #define page_to_pfn(page) ((unsigned long) (page - mem_map))
#define pfn_to_page(pfn) (mem_map + (pfn)) #define pfn_to_page(pfn) (mem_map + (pfn))
......
...@@ -48,22 +48,19 @@ local_finish_flush_tlb_mm (struct mm_struct *mm) ...@@ -48,22 +48,19 @@ local_finish_flush_tlb_mm (struct mm_struct *mm)
static inline void static inline void
flush_tlb_mm (struct mm_struct *mm) flush_tlb_mm (struct mm_struct *mm)
{ {
MMU_TRACE('F', smp_processor_id(), mm, mm->context);
if (!mm) if (!mm)
goto out; return;
mm->context = 0; mm->context = 0;
if (atomic_read(&mm->mm_users) == 0) if (atomic_read(&mm->mm_users) == 0)
goto out; /* happens as a result of exit_mmap() */ return; /* happens as a result of exit_mmap() */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_flush_tlb_mm(mm); smp_flush_tlb_mm(mm);
#else #else
local_finish_flush_tlb_mm(mm); local_finish_flush_tlb_mm(mm);
#endif #endif
out:
MMU_TRACE('f', smp_processor_id(), mm, mm->context);
} }
extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment