Commit 78ebfa88 authored by Michal Simek's avatar Michal Simek

microblaze: Adding likely macros

On the base on GCOV analytics is helpful to add likely/unlikely
macros.
Signed-off-by: default avatarMichal Simek <monstr@monstr.eu>
parent 13851966
...@@ -37,7 +37,7 @@ static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, ...@@ -37,7 +37,7 @@ static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
static unsigned long get_dma_direct_offset(struct device *dev) static unsigned long get_dma_direct_offset(struct device *dev)
{ {
if (dev) if (likely(dev))
return (unsigned long)dev->archdata.dma_data; return (unsigned long)dev->archdata.dma_data;
return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
......
...@@ -75,7 +75,10 @@ __setup("hlt", hlt_setup); ...@@ -75,7 +75,10 @@ __setup("hlt", hlt_setup);
void default_idle(void) void default_idle(void)
{ {
if (!hlt_counter) { if (likely(hlt_counter)) {
while (!need_resched())
cpu_relax();
} else {
clear_thread_flag(TIF_POLLING_NRFLAG); clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
local_irq_disable(); local_irq_disable();
...@@ -83,9 +86,7 @@ void default_idle(void) ...@@ -83,9 +86,7 @@ void default_idle(void)
cpu_sleep(); cpu_sleep();
local_irq_enable(); local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG); set_thread_flag(TIF_POLLING_NRFLAG);
} else }
while (!need_resched())
cpu_relax();
} }
void cpu_idle(void) void cpu_idle(void)
......
...@@ -53,7 +53,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) ...@@ -53,7 +53,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
const uint32_t *i_src; const uint32_t *i_src;
uint32_t *i_dst; uint32_t *i_dst;
if (c >= 4) { if (likely(c >= 4)) {
unsigned value, buf_hold; unsigned value, buf_hold;
/* Align the dstination to a word boundry. */ /* Align the dstination to a word boundry. */
......
...@@ -33,22 +33,23 @@ ...@@ -33,22 +33,23 @@
#ifdef __HAVE_ARCH_MEMSET #ifdef __HAVE_ARCH_MEMSET
void *memset(void *v_src, int c, __kernel_size_t n) void *memset(void *v_src, int c, __kernel_size_t n)
{ {
char *src = v_src; char *src = v_src;
#ifdef CONFIG_OPT_LIB_FUNCTION #ifdef CONFIG_OPT_LIB_FUNCTION
uint32_t *i_src; uint32_t *i_src;
uint32_t w32; uint32_t w32 = 0;
#endif #endif
/* Truncate c to 8 bits */ /* Truncate c to 8 bits */
c = (c & 0xFF); c = (c & 0xFF);
#ifdef CONFIG_OPT_LIB_FUNCTION #ifdef CONFIG_OPT_LIB_FUNCTION
/* Make a repeating word out of it */ if (unlikely(c)) {
w32 = c; /* Make a repeating word out of it */
w32 |= w32 << 8; w32 = c;
w32 |= w32 << 16; w32 |= w32 << 8;
w32 |= w32 << 16;
}
if (n >= 4) { if (likely(n >= 4)) {
/* Align the destination to a word boundary */ /* Align the destination to a word boundary */
/* This is done in an endian independant manner */ /* This is done in an endian independant manner */
switch ((unsigned) src & 3) { switch ((unsigned) src & 3) {
......
...@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
regs->esr = error_code; regs->esr = error_code;
/* On a kernel SLB miss we can only check for a valid exception entry */ /* On a kernel SLB miss we can only check for a valid exception entry */
if (kernel_mode(regs) && (address >= TASK_SIZE)) { if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
printk(KERN_WARNING "kernel task_size exceed"); printk(KERN_WARNING "kernel task_size exceed");
_exception(SIGSEGV, regs, code, address); _exception(SIGSEGV, regs, code, address);
} }
...@@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
} }
#endif /* CONFIG_KGDB */ #endif /* CONFIG_KGDB */
if (in_atomic() || !mm) { if (unlikely(in_atomic() || !mm)) {
if (kernel_mode(regs)) if (kernel_mode(regs))
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
...@@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* source. If this is invalid we can skip the address space check, * source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock. * thus avoiding the deadlock.
*/ */
if (!down_read_trylock(&mm->mmap_sem)) { if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
if (kernel_mode(regs) && !search_exception_tables(regs->pc)) if (kernel_mode(regs) && !search_exception_tables(regs->pc))
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
...@@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
} }
vma = find_vma(mm, address); vma = find_vma(mm, address);
if (!vma) if (unlikely(!vma))
goto bad_area; goto bad_area;
if (vma->vm_start <= address) if (vma->vm_start <= address)
goto good_area; goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN)) if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
goto bad_area; goto bad_area;
if (!is_write) if (unlikely(!is_write))
goto bad_area; goto bad_area;
/* /*
...@@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* before setting the user r1. Thus we allow the stack to * before setting the user r1. Thus we allow the stack to
* expand to 1MB without further checks. * expand to 1MB without further checks.
*/ */
if (address + 0x100000 < vma->vm_end) { if (unlikely(address + 0x100000 < vma->vm_end)) {
/* get user regs even if this fault is in kernel mode */ /* get user regs even if this fault is in kernel mode */
struct pt_regs *uregs = current->thread.regs; struct pt_regs *uregs = current->thread.regs;
...@@ -209,15 +209,15 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -209,15 +209,15 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
code = SEGV_ACCERR; code = SEGV_ACCERR;
/* a write */ /* a write */
if (is_write) { if (unlikely(is_write)) {
if (!(vma->vm_flags & VM_WRITE)) if (unlikely(!(vma->vm_flags & VM_WRITE)))
goto bad_area; goto bad_area;
/* a read */ /* a read */
} else { } else {
/* protection fault */ /* protection fault */
if (error_code & 0x08000000) if (unlikely(error_code & 0x08000000))
goto bad_area; goto bad_area;
if (!(vma->vm_flags & (VM_READ | VM_EXEC))) if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
goto bad_area; goto bad_area;
} }
...@@ -235,7 +235,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -235,7 +235,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
} }
if (fault & VM_FAULT_MAJOR) if (unlikely(fault & VM_FAULT_MAJOR))
current->maj_flt++; current->maj_flt++;
else else
current->min_flt++; current->min_flt++;
......
...@@ -154,7 +154,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) ...@@ -154,7 +154,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
err = 0; err = 0;
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags))); __pgprot(flags)));
if (mem_init_done) if (unlikely(mem_init_done))
flush_HPTE(0, va, pmd_val(*pd)); flush_HPTE(0, va, pmd_val(*pd));
/* flush_HPTE(0, va, pg); */ /* flush_HPTE(0, va, pg); */
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment