From 0b66f15894802f646e20500562566e53e66cba68 Mon Sep 17 00:00:00 2001 From: mrg Date: Fri, 12 Jan 2018 06:01:33 +0000 Subject: [PATCH] fix time goes backwards problems on sparc. there are a few things here: - there's a race between reading the limit register (which clears the interrupt and the limit bit) and increasing the latest offset. this can happen easily if an interrupt comes between the read and the call to tickle_tc() that increases the offset (i obverved this actually happening.) - in early boot, sometimes the counter can cycle twice before the tickle happens. to handle these issues, add two workarounds: - if the limit bit isn't set, but the counter value is less than the previous value, and the offset hasn't changed, use the same fixup as if the limit bit was set. this handles the first case above. - add a hard-workaround for never allowing returning a smaller value (except during 32 bit overflow): if the result is less than the last result, add fixups until it does (or until it would overflow.) the first workaround fixes general run-time issues, and the second fixes issues only seen during boot. also expand some comments in timer_sun4m.c and re-enable the sun4m sub-microsecond tmr_ustolim4m() support (but it's always called with at least 'tick' microseconds, so the end result is the same.) --- sys/arch/sparc/sparc/timer.c | 102 ++++++++++++++++++++++------- sys/arch/sparc/sparc/timer_sun4m.c | 12 ++-- sys/arch/sparc/sparc/timerreg.h | 9 ++- 3 files changed, 93 insertions(+), 30 deletions(-) diff --git a/sys/arch/sparc/sparc/timer.c b/sys/arch/sparc/sparc/timer.c index c2da47d940cf..84e3d5244b0f 100644 --- a/sys/arch/sparc/sparc/timer.c +++ b/sys/arch/sparc/sparc/timer.c @@ -1,4 +1,4 @@ -/* $NetBSD: timer.c,v 1.32 2014/01/19 00:22:33 mrg Exp $ */ +/* $NetBSD: timer.c,v 1.33 2018/01/12 06:01:33 mrg Exp $ */ /* * Copyright (c) 1992, 1993 @@ -60,7 +60,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: timer.c,v 1.32 2014/01/19 00:22:33 mrg Exp $"); +__KERNEL_RCSID(0, "$NetBSD: timer.c,v 1.33 2018/01/12 06:01:33 mrg Exp $"); #include #include @@ -85,55 +85,98 @@ void *sched_cookie; * timecounter local state */ static struct counter { - volatile u_int *cntreg; /* counter register */ + __cpu_simple_lock_t lock; /* protects access to offset, reg, last* */ + volatile u_int *cntreg; /* counter register to read */ u_int limit; /* limit we count up to */ u_int offset; /* accumulated offset due to wraps */ u_int shift; /* scaling for valid bits */ u_int mask; /* valid bit mask */ -} cntr; + u_int lastcnt; /* the last* values are used to notice */ + u_int lastres; /* and fix up cases where it would appear */ + u_int lastoffset; /* time went backwards. */ +} cntr __aligned(CACHE_LINE_SIZE); /* * define timecounter */ static struct timecounter counter_timecounter = { - timer_get_timecount, /* get_timecount */ - 0, /* no poll_pps */ - ~0u, /* counter_mask */ - 0, /* frequency - set at initialisation */ - "timer-counter", /* name */ - 100, /* quality */ - &cntr /* private reference */ + .tc_get_timecount = timer_get_timecount, + .tc_poll_pps = NULL, + .tc_counter_mask = ~0u, + .tc_frequency = 0, + .tc_name = "timer-counter", + .tc_quality = 100, + .tc_priv = &cntr, }; /* * timer_get_timecount provide current counter value */ +__attribute__((__optimize__("Os"))) static u_int timer_get_timecount(struct timecounter *tc) { - struct counter *ctr = (struct counter *)tc->tc_priv; - - u_int c, res, r; + u_int cnt, res, fixup, offset; int s; + /* + * We use splhigh/__cpu_simple_lock here as we don't want + * any mutex or lockdebug overhead. The lock protects a + * bunch of the members of cntr that are written here to + * deal with the various minor races to be observed and + * worked around. + */ s = splhigh(); - - res = c = *ctr->cntreg; + __cpu_simple_lock(&cntr.lock); + res = cnt = *cntr.cntreg; res &= ~TMR_LIMIT; + offset = cntr.offset; - if (c != res) { - r = ctr->limit; + /* + * There are 3 cases here: + * - limit reached, interrupt not yet processed. + * - count reset but offset the same, race between handling + * the interrupt and tickle_tc() updating the offset. + * - normal case. + * + * For the first two cases, add the limit so that we avoid + * time going backwards. + */ + if (cnt != res) { + fixup = cntr.limit; + } else if (res < cntr.lastcnt && offset == cntr.lastoffset) { + fixup = cntr.limit; } else { - r = 0; + fixup = 0; } + + cntr.lastcnt = res; + cntr.lastoffset = offset; - res >>= ctr->shift; - res &= ctr->mask; + res >>= cntr.shift; + res &= cntr.mask; - res += r + ctr->offset; + res += fixup + offset; + /* + * This handles early-boot cases where the counter resets twice + * before the offset is updated. + */ + if (res < cntr.lastres) { + if (fixup == 0) + fixup = cntr.limit; + while (res < cntr.lastres) { + if (res > UINT_MAX - fixup) + break; + res += fixup; + } + } + + cntr.lastres = res; + + __cpu_simple_unlock(&cntr.lock); splx(s); return res; @@ -142,7 +185,15 @@ timer_get_timecount(struct timecounter *tc) void tickle_tc(void) { + if (timecounter->tc_get_timecount == timer_get_timecount) { + /* + * This could be protected by cntr.lock/splhigh but the update + * happens at IPL10 already and as a 32 bit value it should + * never be seen as a partial update, so skip it here. This + * also probably slows down the actual offset update, making + * one of the cases above more likely to need the workaround. + */ cntr.offset += cntr.limit; } } @@ -190,13 +241,14 @@ timerattach(volatile int *cntreg, volatile int *limreg) if ((1 << t0) & prec) break; + __cpu_simple_lock_init(&cntr.lock); + cntr.shift = t0; cntr.mask = (1 << (31-t0))-1; counter_timecounter.tc_frequency = 1000000 * (TMR_SHIFT - t0 + 1); printf(": delay constant %d, frequency = %" PRIu64 " Hz\n", timerblurb, counter_timecounter.tc_frequency); -printf("timer: limit %u shift %u mask %x\n", cntr.limit, cntr.shift, cntr.mask); #if defined(SUN4) || defined(SUN4C) if (CPU_ISSUN4 || CPU_ISSUN4C) { @@ -214,6 +266,7 @@ printf("timer: limit %u shift %u mask %x\n", cntr.limit, cntr.shift, cntr.mask); cntr.limit = tmr_ustolim4m(tick); } #endif + /* link interrupt handlers */ intr_establish(10, 0, &level10, NULL, true); intr_establish(14, 0, &level14, NULL, true); @@ -226,6 +279,9 @@ printf("timer: limit %u shift %u mask %x\n", cntr.limit, cntr.shift, cntr.mask); cntr.cntreg = cntreg; cntr.limit >>= cntr.shift; + /* start at non-zero, so that cntr.oldoffset is less */ + cntr.offset = cntr.limit; + tc_init(&counter_timecounter); } diff --git a/sys/arch/sparc/sparc/timer_sun4m.c b/sys/arch/sparc/sparc/timer_sun4m.c index 568bb8615abc..1144db2dedcb 100644 --- a/sys/arch/sparc/sparc/timer_sun4m.c +++ b/sys/arch/sparc/sparc/timer_sun4m.c @@ -1,4 +1,4 @@ -/* $NetBSD: timer_sun4m.c,v 1.30 2014/01/19 00:22:33 mrg Exp $ */ +/* $NetBSD: timer_sun4m.c,v 1.31 2018/01/12 06:01:33 mrg Exp $ */ /* * Copyright (c) 1992, 1993 @@ -58,7 +58,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: timer_sun4m.c,v 1.30 2014/01/19 00:22:33 mrg Exp $"); +__KERNEL_RCSID(0, "$NetBSD: timer_sun4m.c,v 1.31 2018/01/12 06:01:33 mrg Exp $"); #include #include @@ -74,7 +74,7 @@ __KERNEL_RCSID(0, "$NetBSD: timer_sun4m.c,v 1.30 2014/01/19 00:22:33 mrg Exp $") #include #include -struct timer_4m *timerreg4m; +static struct timer_4m *timerreg4m; #define counterreg4m cpuinfo.counterreg_4m /* @@ -160,9 +160,13 @@ clockintr_4m(void *cap) */ if (cold) return 0; + kpreempt_disable(); - /* read the limit register to clear the interrupt */ + + /* Read the limit register to clear the interrupt. */ *((volatile int *)&timerreg4m->t_limit); + + /* Update the timecounter offset. */ tickle_tc(); /* diff --git a/sys/arch/sparc/sparc/timerreg.h b/sys/arch/sparc/sparc/timerreg.h index d161a9e1b328..94fedc52c2b3 100644 --- a/sys/arch/sparc/sparc/timerreg.h +++ b/sys/arch/sparc/sparc/timerreg.h @@ -1,4 +1,4 @@ -/* $NetBSD: timerreg.h,v 1.9 2005/11/16 03:00:23 uwe Exp $ */ +/* $NetBSD: timerreg.h,v 1.10 2018/01/12 06:01:33 mrg Exp $ */ /* * Copyright (c) 1992, 1993 @@ -120,9 +120,12 @@ struct counter_4m { /* counter that interrupts at ipl 14 */ */ #define tmr_ustolim(n) (((n) + 1) << TMR_SHIFT) -/*efine TMR_SHIFT4M 9 -* shift to obtain microseconds */ -/*efine tmr_ustolim4m(n) (((2*(n)) + 1) << TMR_SHIFT4M)*/ +#define TMR_SHIFT4M 9 /* shift to obtain microseconds */ +#if 1 +#define tmr_ustolim4m(n) (((2*(n)) + 1) << TMR_SHIFT4M) +#else #define tmr_ustolim4m(n) ((n) << TMR_SHIFT) +#endif /* The number of microseconds represented by a counter register value */ #define tmr_cnttous(c) ((((c) >> TMR_SHIFT) & TMR_MASK) - 1)