Commit 16c186ac authored by David S. Miller's avatar David S. Miller

Merge davem@nuts.davemloft.net:/disk1/BK/sparc-2.6

into kernel.bkbits.net:/home/davem/sparc-2.6
parents 0f180260 60c527ad
...@@ -12,6 +12,10 @@ config MMU ...@@ -12,6 +12,10 @@ config MMU
bool bool
default y default y
config TIME_INTERPOLATION
bool
default y
source "init/Kconfig" source "init/Kconfig"
menu "General machine setup" menu "General machine setup"
......
...@@ -17,7 +17,8 @@ ...@@ -17,7 +17,8 @@
#define TASK_REGOFF (THREAD_SIZE-TRACEREG_SZ-STACKFRAME_SZ) #define TASK_REGOFF (THREAD_SIZE-TRACEREG_SZ-STACKFRAME_SZ)
#define ETRAP_PSTATE1 (PSTATE_RMO | PSTATE_PRIV) #define ETRAP_PSTATE1 (PSTATE_RMO | PSTATE_PRIV)
#define ETRAP_PSTATE2 (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE) #define ETRAP_PSTATE2 \
(PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE)
/* /*
* On entry, %g7 is return address - 0x4. * On entry, %g7 is return address - 0x4.
...@@ -27,91 +28,91 @@ ...@@ -27,91 +28,91 @@
.text .text
.align 64 .align 64
.globl etrap, etrap_irq, etraptl1 .globl etrap, etrap_irq, etraptl1
etrap: rdpr %pil, %g2 ! Single Group etrap: rdpr %pil, %g2
etrap_irq: etrap_irq:
rdpr %tstate, %g1 ! Single Group rdpr %tstate, %g1
sllx %g2, 20, %g3 ! IEU0 Group sllx %g2, 20, %g3
andcc %g1, TSTATE_PRIV, %g0 ! IEU1 andcc %g1, TSTATE_PRIV, %g0
or %g1, %g3, %g1 ! IEU0 Group or %g1, %g3, %g1
bne,pn %xcc, 1f ! CTI bne,pn %xcc, 1f
sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2 ! IEU1 sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2
wrpr %g0, 7, %cleanwin ! Single Group+4bubbles wrpr %g0, 7, %cleanwin
sethi %hi(TASK_REGOFF), %g2 ! IEU0 Group sethi %hi(TASK_REGOFF), %g2
sethi %hi(TSTATE_PEF), %g3 ! IEU1 sethi %hi(TSTATE_PEF), %g3
or %g2, %lo(TASK_REGOFF), %g2 ! IEU0 Group or %g2, %lo(TASK_REGOFF), %g2
and %g1, %g3, %g3 ! IEU1 and %g1, %g3, %g3
brnz,pn %g3, 1f ! CTI+IEU1 Group brnz,pn %g3, 1f
add %g6, %g2, %g2 ! IEU0 add %g6, %g2, %g2
wr %g0, 0, %fprs ! Single Group+4bubbles wr %g0, 0, %fprs
1: rdpr %tpc, %g3 ! Single Group 1: rdpr %tpc, %g3
stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE] ! Store Group stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE]
rdpr %tnpc, %g1 ! Single Group rdpr %tnpc, %g1
stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC] ! Store Group stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC]
rd %y, %g3 ! Single Group+4bubbles rd %y, %g3
stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC] ! Store Group stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y] ! Store Group st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y]
save %g2, -STACK_BIAS, %sp ! Ordering here is critical ! Single Group save %g2, -STACK_BIAS, %sp ! Ordering here is critical
mov %g6, %l6 ! IEU0 Group mov %g6, %l6
bne,pn %xcc, 3f ! CTI bne,pn %xcc, 3f
mov PRIMARY_CONTEXT, %l4 ! IEU1 mov PRIMARY_CONTEXT, %l4
rdpr %canrestore, %g3 ! Single Group+4bubbles rdpr %canrestore, %g3
rdpr %wstate, %g2 ! Single Group+4bubbles rdpr %wstate, %g2
wrpr %g0, 0, %canrestore ! Single Group+4bubbles wrpr %g0, 0, %canrestore
sll %g2, 3, %g2 ! IEU0 Group sll %g2, 3, %g2
mov 1, %l5 ! IEU1 mov 1, %l5
stb %l5, [%l6 + TI_FPDEPTH] ! Store stb %l5, [%l6 + TI_FPDEPTH]
wrpr %g3, 0, %otherwin ! Single Group+4bubbles wrpr %g3, 0, %otherwin
wrpr %g2, 0, %wstate ! Single Group+4bubbles wrpr %g2, 0, %wstate
stxa %g0, [%l4] ASI_DMMU ! Store Group stxa %g0, [%l4] ASI_DMMU
flush %l6 ! Single Group+9bubbles flush %l6
wr %g0, ASI_AIUS, %asi ! Single Group+4bubbles wr %g0, ASI_AIUS, %asi
2: wrpr %g0, 0x0, %tl ! Single Group+4bubbles 2: wrpr %g0, 0x0, %tl
mov %g4, %l4 ! IEU1 mov %g4, %l4
mov %g5, %l5 ! IEU0 Group mov %g5, %l5
mov %g7, %l2 ! IEU1 mov %g7, %l2
wrpr %g0, ETRAP_PSTATE1, %pstate ! Single Group+4bubbles wrpr %g0, ETRAP_PSTATE1, %pstate
stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] ! Store Group stx %g1, [%sp + PTREGS_OFF + PT_V9_G1]
stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] ! Store Group stx %g2, [%sp + PTREGS_OFF + PT_V9_G2]
stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] ! Store Group stx %g3, [%sp + PTREGS_OFF + PT_V9_G3]
stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] ! Store Group stx %g4, [%sp + PTREGS_OFF + PT_V9_G4]
stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] ! Store Group stx %g5, [%sp + PTREGS_OFF + PT_V9_G5]
stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] ! Store Group stx %g6, [%sp + PTREGS_OFF + PT_V9_G6]
stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] ! Store Group stx %g7, [%sp + PTREGS_OFF + PT_V9_G7]
stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] ! Store Group stx %i0, [%sp + PTREGS_OFF + PT_V9_I0]
stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] ! Store Group stx %i1, [%sp + PTREGS_OFF + PT_V9_I1]
stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] ! Store Group stx %i2, [%sp + PTREGS_OFF + PT_V9_I2]
stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] ! Store Group stx %i3, [%sp + PTREGS_OFF + PT_V9_I3]
stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] ! Store Group stx %i4, [%sp + PTREGS_OFF + PT_V9_I4]
stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] ! Store Group stx %i5, [%sp + PTREGS_OFF + PT_V9_I5]
stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] ! Store Group stx %i6, [%sp + PTREGS_OFF + PT_V9_I6]
stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] ! Store Group stx %i7, [%sp + PTREGS_OFF + PT_V9_I7]
wrpr %g0, ETRAP_PSTATE2, %pstate ! Single Group+4bubbles wrpr %g0, ETRAP_PSTATE2, %pstate
mov %l6, %g6 ! IEU0 mov %l6, %g6
jmpl %l2 + 0x4, %g0 ! CTI Group jmpl %l2 + 0x4, %g0
ldx [%g6 + TI_TASK], %g4 ! Load ldx [%g6 + TI_TASK], %g4
nop nop
nop nop
nop nop
3: ldub [%l6 + TI_FPDEPTH], %l5 ! Load Group 3: ldub [%l6 + TI_FPDEPTH], %l5
add %l6, TI_FPSAVED + 1, %l4 ! IEU0 add %l6, TI_FPSAVED + 1, %l4
srl %l5, 1, %l3 ! IEU0 Group srl %l5, 1, %l3
add %l5, 2, %l5 ! IEU1 add %l5, 2, %l5
stb %l5, [%l6 + TI_FPDEPTH] ! Store stb %l5, [%l6 + TI_FPDEPTH]
ba,pt %xcc, 2b ! CTI ba,pt %xcc, 2b
stb %g0, [%l4 + %l3] ! Store Group stb %g0, [%l4 + %l3]
nop nop
etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
* We place this right after pt_regs on the trap stack. The layout * We place this right after pt_regs on the trap stack.
* is: * The layout is:
* 0x00 TL1's TSTATE * 0x00 TL1's TSTATE
* 0x08 TL1's TPC * 0x08 TL1's TPC
* 0x10 TL1's TNPC * 0x10 TL1's TNPC
...@@ -166,81 +167,81 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. ...@@ -166,81 +167,81 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
wrpr %g1, %tl wrpr %g1, %tl
stx %g1, [%g2 + STACK_BIAS + 0x80] stx %g1, [%g2 + STACK_BIAS + 0x80]
rdpr %tstate, %g1 ! Single Group+4bubbles rdpr %tstate, %g1
sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2 ! IEU1 sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2
ba,pt %xcc, 1b ! CTI Group ba,pt %xcc, 1b
andcc %g1, TSTATE_PRIV, %g0 ! IEU0 andcc %g1, TSTATE_PRIV, %g0
.align 64 .align 64
.globl scetrap .globl scetrap
scetrap: rdpr %pil, %g2 ! Single Group scetrap: rdpr %pil, %g2
rdpr %tstate, %g1 ! Single Group rdpr %tstate, %g1
sllx %g2, 20, %g3 ! IEU0 Group sllx %g2, 20, %g3
andcc %g1, TSTATE_PRIV, %g0 ! IEU1 andcc %g1, TSTATE_PRIV, %g0
or %g1, %g3, %g1 ! IEU0 Group or %g1, %g3, %g1
bne,pn %xcc, 1f ! CTI bne,pn %xcc, 1f
sub %sp, (STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS), %g2 ! IEU1 sub %sp, (STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS), %g2
wrpr %g0, 7, %cleanwin ! Single Group+4bubbles wrpr %g0, 7, %cleanwin
sllx %g1, 51, %g3 ! IEU0 Group sllx %g1, 51, %g3
sethi %hi(TASK_REGOFF), %g2 ! IEU1 sethi %hi(TASK_REGOFF), %g2
or %g2, %lo(TASK_REGOFF), %g2 ! IEU0 Group or %g2, %lo(TASK_REGOFF), %g2
brlz,pn %g3, 1f ! CTI+IEU1 brlz,pn %g3, 1f
add %g6, %g2, %g2 ! IEU0 Group add %g6, %g2, %g2
wr %g0, 0, %fprs ! Single Group+4bubbles wr %g0, 0, %fprs
1: rdpr %tpc, %g3 ! Single Group 1: rdpr %tpc, %g3
stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE] ! Store Group stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE]
rdpr %tnpc, %g1 ! Single Group rdpr %tnpc, %g1
stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC] ! Store Group stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC]
stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC] ! Store Group stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
save %g2, -STACK_BIAS, %sp ! Ordering here is critical ! Single Group save %g2, -STACK_BIAS, %sp ! Ordering here is critical
mov %g6, %l6 ! IEU0 Group mov %g6, %l6
bne,pn %xcc, 2f ! CTI bne,pn %xcc, 2f
mov ASI_P, %l7 ! IEU1 mov ASI_P, %l7
rdpr %canrestore, %g3 ! Single Group+4bubbles rdpr %canrestore, %g3
rdpr %wstate, %g2 ! Single Group+4bubbles rdpr %wstate, %g2
wrpr %g0, 0, %canrestore ! Single Group+4bubbles wrpr %g0, 0, %canrestore
sll %g2, 3, %g2 ! IEU0 Group sll %g2, 3, %g2
mov PRIMARY_CONTEXT, %l4 ! IEU1 mov PRIMARY_CONTEXT, %l4
wrpr %g3, 0, %otherwin ! Single Group+4bubbles wrpr %g3, 0, %otherwin
wrpr %g2, 0, %wstate ! Single Group+4bubbles wrpr %g2, 0, %wstate
stxa %g0, [%l4] ASI_DMMU ! Store stxa %g0, [%l4] ASI_DMMU
flush %l6 ! Single Group+9bubbles flush %l6
mov ASI_AIUS, %l7 ! IEU0 Group mov ASI_AIUS, %l7
2: mov %g4, %l4 ! IEU1 2: mov %g4, %l4
mov %g5, %l5 ! IEU0 Group mov %g5, %l5
add %g7, 0x4, %l2 ! IEU1 add %g7, 0x4, %l2
wrpr %g0, ETRAP_PSTATE1, %pstate ! Single Group+4bubbles wrpr %g0, ETRAP_PSTATE1, %pstate
stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] ! Store Group stx %g1, [%sp + PTREGS_OFF + PT_V9_G1]
stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] ! Store Group stx %g2, [%sp + PTREGS_OFF + PT_V9_G2]
sllx %l7, 24, %l7 ! IEU0 sllx %l7, 24, %l7
stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] ! Store Group stx %g3, [%sp + PTREGS_OFF + PT_V9_G3]
rdpr %cwp, %l0 ! Single Group rdpr %cwp, %l0
stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] ! Store Group stx %g4, [%sp + PTREGS_OFF + PT_V9_G4]
stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] ! Store Group stx %g5, [%sp + PTREGS_OFF + PT_V9_G5]
stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] ! Store Group stx %g6, [%sp + PTREGS_OFF + PT_V9_G6]
stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] ! Store Group stx %g7, [%sp + PTREGS_OFF + PT_V9_G7]
or %l7, %l0, %l7 ! IEU0 or %l7, %l0, %l7
sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0 ! IEU1 sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0
or %l7, %l0, %l7 ! IEU0 Group or %l7, %l0, %l7
wrpr %l2, %tnpc ! Single Group+4bubbles wrpr %l2, %tnpc
wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate ! Single Group+4bubbles wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate
stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] ! Store Group stx %i0, [%sp + PTREGS_OFF + PT_V9_I0]
stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] ! Store Group stx %i1, [%sp + PTREGS_OFF + PT_V9_I1]
stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] ! Store Group stx %i2, [%sp + PTREGS_OFF + PT_V9_I2]
stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] ! Store Group stx %i3, [%sp + PTREGS_OFF + PT_V9_I3]
stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] ! Store Group stx %i4, [%sp + PTREGS_OFF + PT_V9_I4]
stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] ! Store Group stx %i5, [%sp + PTREGS_OFF + PT_V9_I5]
stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] ! Store Group stx %i6, [%sp + PTREGS_OFF + PT_V9_I6]
mov %l6, %g6 ! IEU1 mov %l6, %g6
stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] ! Store Group stx %i7, [%sp + PTREGS_OFF + PT_V9_I7]
ldx [%g6 + TI_TASK], %g4 ! Load Group ldx [%g6 + TI_TASK], %g4
done done
nop nop
nop nop
......
...@@ -431,7 +431,6 @@ static struct sparc64_tick_ops hbtick_operations = { ...@@ -431,7 +431,6 @@ static struct sparc64_tick_ops hbtick_operations = {
unsigned long timer_tick_offset; unsigned long timer_tick_offset;
unsigned long timer_tick_compare; unsigned long timer_tick_compare;
static unsigned long timer_ticks_per_usec_quotient;
static unsigned long timer_ticks_per_nsec_quotient; static unsigned long timer_ticks_per_nsec_quotient;
#define TICK_SIZE (tick_nsec / 1000) #define TICK_SIZE (tick_nsec / 1000)
...@@ -1025,18 +1024,22 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val ...@@ -1025,18 +1024,22 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val
static struct notifier_block sparc64_cpufreq_notifier_block = { static struct notifier_block sparc64_cpufreq_notifier_block = {
.notifier_call = sparc64_cpufreq_notifier .notifier_call = sparc64_cpufreq_notifier
}; };
#endif
#endif /* CONFIG_CPU_FREQ */
static struct time_interpolator sparc64_cpu_interpolator = {
.source = TIME_SOURCE_CPU,
.shift = 16,
};
/* The quotient formula is taken from the IA64 port. */ /* The quotient formula is taken from the IA64 port. */
#define SPARC64_USEC_PER_CYC_SHIFT 30UL
#define SPARC64_NSEC_PER_CYC_SHIFT 30UL #define SPARC64_NSEC_PER_CYC_SHIFT 30UL
void __init time_init(void) void __init time_init(void)
{ {
unsigned long clock = sparc64_init_timers(timer_interrupt); unsigned long clock = sparc64_init_timers(timer_interrupt);
timer_ticks_per_usec_quotient = sparc64_cpu_interpolator.frequency = clock;
(((1000000UL << SPARC64_USEC_PER_CYC_SHIFT) + register_time_interpolator(&sparc64_cpu_interpolator);
(clock / 2)) / clock);
timer_ticks_per_nsec_quotient = timer_ticks_per_nsec_quotient =
(((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) + (((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) +
...@@ -1048,17 +1051,6 @@ void __init time_init(void) ...@@ -1048,17 +1051,6 @@ void __init time_init(void)
#endif #endif
} }
static __inline__ unsigned long do_gettimeoffset(void)
{
unsigned long ticks = tick_ops->get_tick();
ticks += timer_tick_offset;
ticks -= timer_tick_compare;
return (ticks * timer_ticks_per_usec_quotient)
>> SPARC64_USEC_PER_CYC_SHIFT;
}
unsigned long long sched_clock(void) unsigned long long sched_clock(void)
{ {
unsigned long ticks = tick_ops->get_tick(); unsigned long ticks = tick_ops->get_tick();
...@@ -1067,100 +1059,6 @@ unsigned long long sched_clock(void) ...@@ -1067,100 +1059,6 @@ unsigned long long sched_clock(void)
>> SPARC64_NSEC_PER_CYC_SHIFT; >> SPARC64_NSEC_PER_CYC_SHIFT;
} }
int do_settimeofday(struct timespec *tv)
{
time_t wtm_sec, sec = tv->tv_sec;
long wtm_nsec, nsec = tv->tv_nsec;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
if (this_is_starfire)
return 0;
write_seqlock_irq(&xtime_lock);
/*
* This is revolting. We need to set "xtime" correctly. However, the
* value in this location is the value at the most recent update of
* wall time. Discover what correction gettimeofday() would have
* made, and then undo it!
*/
nsec -= do_gettimeoffset() * 1000;
nsec -= (jiffies - wall_jiffies) * (NSEC_PER_SEC / HZ);
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT;
time_esterror = NTP_PHASE_LIMIT;
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
EXPORT_SYMBOL(do_settimeofday);
/* Ok, my cute asm atomicity trick doesn't work anymore.
* There are just too many variables that need to be protected
* now (both members of xtime, wall_jiffies, et al.)
*/
void do_gettimeofday(struct timeval *tv)
{
unsigned long seq;
unsigned long usec, sec;
unsigned long max_ntp_tick = tick_usec - tickadj;
do {
unsigned long lost;
seq = read_seqbegin(&xtime_lock);
usec = do_gettimeoffset();
lost = jiffies - wall_jiffies;
/*
* If time_adjust is negative then NTP is slowing the clock
* so make sure not to go into next possible interval.
* Better to lose some accuracy than have time go backwards..
*/
if (unlikely(time_adjust < 0)) {
usec = min(usec, max_ntp_tick);
if (lost)
usec += lost * max_ntp_tick;
}
else if (unlikely(lost))
usec += lost * tick_usec;
sec = xtime.tv_sec;
/* Believe it or not, this divide shows up on
* kernel profiles. The problem is that it is
* both 64-bit and signed. Happily, 32-bits
* of precision is all we really need and in
* doing so gcc ends up emitting a cheap multiply.
*
* XXX Why is tv_nsec 'long' and 'signed' in
* XXX the first place, can it even be negative?
*/
usec += ((unsigned int) xtime.tv_nsec / 1000U);
} while (read_seqretry(&xtime_lock, seq));
while (usec >= 1000000) {
usec -= 1000000;
sec++;
}
tv->tv_sec = sec;
tv->tv_usec = usec;
}
EXPORT_SYMBOL(do_gettimeofday);
static int set_rtc_mmss(unsigned long nowtime) static int set_rtc_mmss(unsigned long nowtime)
{ {
int real_seconds, real_minutes, chip_minutes; int real_seconds, real_minutes, chip_minutes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment