Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
searKing committed Oct 31, 2019
1 parent 5e96aba commit 944febe
Showing 1 changed file with 288 additions and 0 deletions.
288 changes: 288 additions & 0 deletions go/time/sleep_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,65 @@ package time_test

import (
"errors"
"fmt"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"time"

time_ "github.com/searKing/golang/go/time"
)

// Go runtime uses different Windows timers for time.Now and sleeping.
// These can tick at different frequencies and can arrive out of sync.
// The effect can be seen, for example, as time.Sleep(100ms) is actually
// shorter then 100ms when measured as difference between time.Now before and
// after time.Sleep call. This was observed on Windows XP SP3 (windows/386).
// windowsInaccuracy is to ignore such errors.
const windowsInaccuracy = 17 * time.Millisecond

// Test the basic function calling behavior. Correct queueing
// behavior is tested elsewhere, since After and AfterFunc share
// the same code.
func TestAfterFunc(t *testing.T) {
i := 10
c := make(chan bool)
var f func()
f = func() {
i--
if i >= 0 {
time_.AfterFunc(0, f)
time.Sleep(1 * time.Second)
} else {
c <- true
}
}

time_.AfterFunc(0, f)
<-c
}

func TestAfterStress(t *testing.T) {
stop := uint32(0)
go func() {
for atomic.LoadUint32(&stop) == 0 {
runtime.GC()
// Yield so that the OS can wake up the timer thread,
// so that it can generate channel sends for the main goroutine,
// which will eventually set stop = 1 for us.
time.Sleep(time.Nanosecond)
}
}()
ticker := time.NewTicker(1)
for i := 0; i < 100; i++ {
<-ticker.C
}
ticker.Stop()
atomic.StoreUint32(&stop, 1)
}

func benchmark(b *testing.B, bench func(n int)) {
// Create equal number of garbage timers on each P before starting
// the benchmark.
Expand Down Expand Up @@ -43,6 +93,65 @@ func benchmark(b *testing.B, bench func(n int)) {
}
}
}

func BenchmarkAfterFunc(b *testing.B) {
benchmark(b, func(n int) {
c := make(chan bool)
var f func()
f = func() {
n--
if n >= 0 {
time_.AfterFunc(0, f)
} else {
c <- true
}
}

time_.AfterFunc(0, f)
<-c
})
}

func BenchmarkAfter(b *testing.B) {
benchmark(b, func(n int) {
for i := 0; i < n; i++ {
<-time_.After(1)
}
})
}

func BenchmarkStop(b *testing.B) {
benchmark(b, func(n int) {
for i := 0; i < n; i++ {
time_.NewTimer(1 * time.Second).Stop()
}
})
}

func BenchmarkSimultaneousAfterFunc(b *testing.B) {
benchmark(b, func(n int) {
var wg sync.WaitGroup
wg.Add(n)
for i := 0; i < n; i++ {
time_.AfterFunc(0, wg.Done)
}
wg.Wait()
})
}

func BenchmarkStartStop(b *testing.B) {
benchmark(b, func(n int) {
timers := make([]*time_.Timer, n)
for i := 0; i < n; i++ {
timers[i] = time_.AfterFunc(time.Hour, nil)
}

for i := 0; i < n; i++ {
timers[i].Stop()
}
})
}

func BenchmarkReset(b *testing.B) {
benchmark(b, func(n int) {
t := time_.NewTimer(time.Hour)
Expand All @@ -53,6 +162,185 @@ func BenchmarkReset(b *testing.B) {
})
}

func TestAfter(t *testing.T) {
const delay = 100 * time.Millisecond
start := time.Now()
end := <-time_.After(delay)
delayadj := delay
if runtime.GOOS == "windows" {
delayadj -= windowsInaccuracy
}
if duration := time.Now().Sub(start); duration < delayadj {
t.Fatalf("After(%s) slept for only %d ns", delay, duration)
}
if min := start.Add(delayadj); end.Before(min) {
t.Fatalf("After(%s) expect >= %s, got %s", delay, min, end)
}
}

func TestAfterTick(t *testing.T) {
const Count = 10
Delta := 100 * time.Millisecond
if testing.Short() {
Delta = 10 * time.Millisecond
}
t0 := time.Now()
for i := 0; i < Count; i++ {
<-time_.After(Delta)
}
t1 := time.Now()
d := t1.Sub(t0)
target := Delta * Count
if d < target*9/10 {
t.Fatalf("%d ticks of %s too fast: took %s, expected %s", Count, Delta, d, target)
}
if !testing.Short() && d > target*30/10 {
t.Fatalf("%d ticks of %s too slow: took %s, expected %s", Count, Delta, d, target)
}
}

func TestAfterStop(t *testing.T) {
time_.AfterFunc(100*time.Millisecond, func() {})
t0 := time_.NewTimer(50 * time.Millisecond)
c1 := make(chan bool, 1)
t1 := time_.AfterFunc(150*time.Millisecond, func() { c1 <- true })
c2 := time_.After(200 * time.Millisecond)
if !t0.Stop() {
t.Fatalf("failed to stop event 0")
}
if !t1.Stop() {
t.Fatalf("failed to stop event 1")
}
<-c2
select {
case <-t0.C:
t.Fatalf("event 0 was not stopped")
case <-c1:
t.Fatalf("event 1 was not stopped")
default:
}
if t1.Stop() {
t.Fatalf("Stop returned true twice")
}
}

func TestAfterQueuing(t *testing.T) {
// This test flakes out on some systems,
// so we'll try it a few times before declaring it a failure.
const attempts = 5
err := errors.New("!=nil")
for i := 0; i < attempts && err != nil; i++ {
delta := time.Duration(20+i*50) * time.Millisecond
if err = testAfterQueuing(delta); err != nil {
t.Logf("attempt %v failed: %v", i, err)
}
}
if err != nil {
t.Fatal(err)
}
}

var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8, 0}

type afterResult struct {
slot int
t time.Time
}

func await(slot int, result chan<- afterResult, ac <-chan time.Time) {
result <- afterResult{slot, <-ac}
}

func testAfterQueuing(delta time.Duration) error {
// make the result channel buffered because we don't want
// to depend on channel queueing semantics that might
// possibly change in the future.
result := make(chan afterResult, len(slots))

t0 := time.Now()
for _, slot := range slots {
go await(slot, result, time_.After(time.Duration(slot)*delta))
}
var order []int
var times []time.Time
for range slots {
r := <-result
order = append(order, r.slot)
times = append(times, r.t)
}
for i := range order {
if i > 0 && order[i] < order[i-1] {
return fmt.Errorf("After calls returned out of order: %v", order)
}
}
for i, t := range times {
dt := t.Sub(t0)
target := time.Duration(order[i]) * delta
if dt < target-delta/2 || dt > target+delta*10 {
return fmt.Errorf("After(%s) arrived at %s, expected [%s,%s]", target, dt, target-delta/2, target+delta*10)
}
}
return nil
}

func TestTimerStopStress(t *testing.T) {
if testing.Short() {
return
}
for i := 0; i < 100; i++ {
go func(i int) {
timer := time_.AfterFunc(2*time.Second, func() {
t.Fatalf("timer %d was not stopped", i)
})
time.Sleep(1 * time.Second)
timer.Stop()
}(i)
}
time.Sleep(3 * time.Second)
}

// Test that a panic while deleting a timer does not leave
// the timers mutex held, deadlocking a ticker.Stop in a defer.
func TestIssue5745(t *testing.T) {
ticker := time.NewTicker(time.Hour)
defer func() {
// would deadlock here before the fix due to
// lock taken before the segfault.
ticker.Stop()

if r := recover(); r == nil {
t.Error("Expected panic, but none happened.")
}
}()

// cause a panic due to a segfault
var timer *time_.Timer
timer.Stop()
t.Error("Should be unreachable.")
}

func TestSleepZeroDeadlock(t *testing.T) {
// Sleep(0) used to hang, the sequence of events was as follows.
// Sleep(0) sets G's status to Gwaiting, but then immediately returns leaving the status.
// Then the goroutine calls e.g. new and falls down into the scheduler due to pending GC.
// After the GC nobody wakes up the goroutine from Gwaiting status.
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
c := make(chan bool)
go func() {
for i := 0; i < 100; i++ {
runtime.GC()
}
c <- true
}()
for i := 0; i < 100; i++ {
time.Sleep(0)
tmp := make(chan bool, 1)
tmp <- true
<-tmp
}
<-c
}

func testReset(d time.Duration) error {
t0 := time.NewTimer(2 * d)
time.Sleep(d)
Expand Down

0 comments on commit 944febe

Please sign in to comment.