Commit 793d74a8 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/vdso64: Switch from __get_datapage() to get_datapage inline macro

On the same way as already done on PPC32, drop __get_datapage()
function and use get_datapage inline macro instead.

See commit ec0895f0 ("powerpc/vdso32: inline __get_datapage()")
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/e13d95312e0b9792556b19b4bb8955cc1ff19fc7.1588079622.git.christophe.leroy@c-s.fr
parent 96032f98
......@@ -8,6 +8,7 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
#include <asm/asm-offsets.h>
.text
......@@ -24,14 +25,12 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache)
.cfi_startproc
mflr r12
.cfi_register lr,r12
mr r11,r3
bl V_LOCAL_FUNC(__get_datapage)
get_datapage r10, r0
mtlr r12
mr r10,r3
lwz r7,CFG_DCACHE_BLOCKSZ(r10)
addi r5,r7,-1
andc r6,r11,r5 /* round low to line bdy */
andc r6,r3,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10)
......@@ -48,7 +47,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache)
lwz r7,CFG_ICACHE_BLOCKSZ(r10)
addi r5,r7,-1
andc r6,r11,r5 /* round low to line bdy */
andc r6,r3,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5
lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10)
......
......@@ -10,35 +10,13 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
.text
.global __kernel_datapage_offset;
__kernel_datapage_offset:
.long 0
V_FUNCTION_BEGIN(__get_datapage)
.cfi_startproc
/* We don't want that exposed or overridable as we want other objects
* to be able to bl directly to here
*/
.protected __get_datapage
.hidden __get_datapage
mflr r0
.cfi_register lr,r0
bcl 20,31,data_page_branch
data_page_branch:
mflr r3
mtlr r0
addi r3, r3, __kernel_datapage_offset-data_page_branch
lwz r0,0(r3)
.cfi_restore lr
add r3,r0,r3
blr
.cfi_endproc
V_FUNCTION_END(__get_datapage)
/*
* void *__kernel_get_syscall_map(unsigned int *syscall_count) ;
*
......@@ -53,7 +31,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
mflr r12
.cfi_register lr,r12
mr r4,r3
bl V_LOCAL_FUNC(__get_datapage)
get_datapage r3, r0
mtlr r12
addi r3,r3,CFG_SYSCALL_MAP64
cmpldi cr0,r4,0
......@@ -75,7 +53,7 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq)
.cfi_startproc
mflr r12
.cfi_register lr,r12
bl V_LOCAL_FUNC(__get_datapage)
get_datapage r3, r0
ld r3,CFG_TB_TICKS_PER_SEC(r3)
mtlr r12
crclr cr0*4+so
......
......@@ -9,6 +9,7 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
......@@ -26,7 +27,7 @@ V_FUNCTION_BEGIN(__kernel_gettimeofday)
mr r11,r3 /* r11 holds tv */
mr r10,r4 /* r10 holds tz */
bl V_LOCAL_FUNC(__get_datapage) /* get data page */
get_datapage r3, r0
cmpldi r11,0 /* check if tv is NULL */
beq 2f
lis r7,1000000@ha /* load up USEC_PER_SEC */
......@@ -71,7 +72,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
mflr r12 /* r12 saves lr */
.cfi_register lr,r12
mr r11,r4 /* r11 saves tp */
bl V_LOCAL_FUNC(__get_datapage) /* get data page */
get_datapage r3, r0
lis r7,NSEC_PER_SEC@h /* want nanoseconds */
ori r7,r7,NSEC_PER_SEC@l
beq cr5,70f
......@@ -188,7 +189,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
mflr r12
.cfi_register lr,r12
bl V_LOCAL_FUNC(__get_datapage)
get_datapage r3, r0
lwz r5, CLOCK_HRTIMER_RES(r3)
mtlr r12
li r3,0
......@@ -221,7 +222,7 @@ V_FUNCTION_BEGIN(__kernel_time)
.cfi_register lr,r12
mr r11,r3 /* r11 holds t */
bl V_LOCAL_FUNC(__get_datapage)
get_datapage r3, r0
ld r4,STAMP_XTIME_SEC(r3)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment