Commit 652050ae authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] mark several functions __always_inline

      Arjan van de Ven <arjan@infradead.org>

Mark a number of functions as 'must inline'.  The functions affected by this
patch need to be inlined because they use knowledge that their arguments are
constant so that most of the function optimizes away.  At this point this
patch does not change behavior, it's for documentation only (and for future
patches in the inline series)
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarArjan van de Ven <arjan@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 9ab34fe7
......@@ -247,7 +247,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
static int test_bit(int nr, const volatile void * addr);
#endif
static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
}
......
......@@ -5,7 +5,7 @@
struct task_struct;
static inline struct task_struct * get_current(void)
static __always_inline struct task_struct * get_current(void)
{
return current_thread_info()->task;
}
......
......@@ -201,7 +201,7 @@ __asm__ __volatile__(
return __res;
}
static inline void * __memcpy(void * to, const void * from, size_t n)
static __always_inline void * __memcpy(void * to, const void * from, size_t n)
{
int d0, d1, d2;
__asm__ __volatile__(
......@@ -223,7 +223,7 @@ return (to);
* This looks ugly, but the compiler can optimize it totally,
* as the count is constant.
*/
static inline void * __constant_memcpy(void * to, const void * from, size_t n)
static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n)
{
long esi, edi;
if (!n) return to;
......@@ -367,7 +367,7 @@ return s;
* things 32 bits at a time even when we don't know the size of the
* area at compile-time..
*/
static inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
{
int d0, d1;
__asm__ __volatile__(
......@@ -416,7 +416,7 @@ extern char *strstr(const char *cs, const char *ct);
* This looks horribly ugly, but the compiler can optimize it totally,
* as we by now know that both pattern and count is constant..
*/
static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
{
switch (count) {
case 0:
......
......@@ -411,7 +411,7 @@ unsigned long __must_check __copy_from_user_ll(void *to,
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
static inline unsigned long __must_check
static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
......@@ -432,7 +432,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
return __copy_to_user_ll(to, from, n);
}
static inline unsigned long __must_check
static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_sleep();
......@@ -456,7 +456,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
static inline unsigned long
static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
......@@ -477,7 +477,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return __copy_from_user_ll(to, from, n);
}
static inline unsigned long
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_sleep();
......
......@@ -76,7 +76,7 @@ extern void __this_fixmap_does_not_exist(void);
* directly without translation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static inline unsigned long fix_to_virt(const unsigned int idx)
static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* this branch gets completely eliminated after inlining,
......
......@@ -244,7 +244,7 @@ extern unsigned long copy_to_user(void __user *to, const void *from, unsigned le
extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len);
extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len);
static inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
static __always_inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
int ret = 0;
if (!__builtin_constant_p(size))
......@@ -273,7 +273,7 @@ static inline int __copy_from_user(void *dst, const void __user *src, unsigned s
}
}
static inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
static __always_inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
int ret = 0;
if (!__builtin_constant_p(size))
......@@ -305,7 +305,7 @@ static inline int __copy_to_user(void __user *dst, const void *src, unsigned siz
}
static inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
static __always_inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{
int ret = 0;
if (!__builtin_constant_p(size))
......
......@@ -512,7 +512,7 @@ static inline void set_page_links(struct page *page, unsigned long zone,
extern struct page *mem_map;
#endif
static inline void *lowmem_page_address(struct page *page)
static __always_inline void *lowmem_page_address(struct page *page)
{
return __va(page_to_pfn(page) << PAGE_SHIFT);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment