Commit 069b9186 authored by Nathan Lynch's avatar Nathan Lynch Committed by Catalin Marinas

arm64: vdso: fix coarse clock handling

When __kernel_clock_gettime is called with a CLOCK_MONOTONIC_COARSE or
CLOCK_REALTIME_COARSE clock id, it returns incorrectly to whatever the
caller has placed in x2 ("ret x2" to return from the fast path).  Fix
this by saving x30/LR to x2 only in code that will call
__do_get_tspec, restoring x30 afterward, and using a plain "ret" to
return from the routine.

Also: while the resulting tv_nsec value for CLOCK_REALTIME and
CLOCK_MONOTONIC must be computed using intermediate values that are
left-shifted by cs_shift (x12, set by __do_get_tspec), the results for
coarse clocks should be calculated using unshifted values
(xtime_coarse_nsec is in units of actual nanoseconds).  The current
code shifts intermediate values by x12 unconditionally, but x12 is
uninitialized when servicing a coarse clock.  Fix this by setting x12
to 0 once we know we are dealing with a coarse clock id.
Signed-off-by: default avatarNathan Lynch <nathan_lynch@mentor.com>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 883d50a0
...@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime) ...@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
bl __do_get_tspec bl __do_get_tspec
seqcnt_check w9, 1b seqcnt_check w9, 1b
mov x30, x2
cmp w0, #CLOCK_MONOTONIC cmp w0, #CLOCK_MONOTONIC
b.ne 6f b.ne 6f
...@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime) ...@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
b.ne 8f b.ne 8f
/* xtime_coarse_nsec is already right-shifted */
mov x12, #0
/* Get coarse timespec. */ /* Get coarse timespec. */
adr vdso_data, _vdso_data adr vdso_data, _vdso_data
3: seqcnt_acquire 3: seqcnt_acquire
...@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime) ...@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
lsr x11, x11, x12 lsr x11, x11, x12
stp x10, x11, [x1, #TSPEC_TV_SEC] stp x10, x11, [x1, #TSPEC_TV_SEC]
mov x0, xzr mov x0, xzr
ret x2 ret
7: 7:
mov x30, x2 mov x30, x2
8: /* Syscall fallback. */ 8: /* Syscall fallback. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment