Skip to content

Commit

Permalink
Move tests into a separate package
Browse files Browse the repository at this point in the history
Moving the test code out of the workerpool package allows to write tests
from the perspective of a real user of the package. We cannot possibly
fiddle around with internals and keep tests focussed on the exposed API.

Currently, there's still one case where we want to check internal state:
the (*WorkerPool).tasks capacity check. For that, export a
(*WorkerPool).TasksCap method for testing only.

Signed-off-by: Tobias Klauser <[email protected]>
  • Loading branch information
tklauser committed Feb 26, 2021
1 parent f3a0dbc commit bc60e57
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 27 deletions.
22 changes: 22 additions & 0 deletions export_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// Copyright 2021 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package workerpool

// export for testing only

// TasksCap returns the tasks channel capacity.
func (wp *WorkerPool) TasksCap() int {
return cap(wp.tasks)
}
56 changes: 29 additions & 27 deletions workerpool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

package workerpool
package workerpool_test

import (
"context"
Expand All @@ -21,6 +21,8 @@ import (
"sync"
"testing"
"time"

"github.com/cilium/workerpool"
)

func TestWorkerPoolNewPanics(t *testing.T) {
Expand All @@ -31,45 +33,45 @@ func TestWorkerPoolNewPanics(t *testing.T) {
t.Errorf("New(%d) should panic()", n)
}
}()
_ = New(n)
_ = workerpool.New(n)
}

testWorkerPoolNewPanics(0)
testWorkerPoolNewPanics(-1)
}

func TestWorkerPoolTasksCapacity(t *testing.T) {
wp := New(runtime.NumCPU())
wp := workerpool.New(runtime.NumCPU())
defer wp.Close()

if c := cap(wp.tasks); c != 0 {
if c := wp.TasksCap(); c != 0 {
t.Errorf("tasks channel capacity is %d; want 0 (an unbuffered channel)", c)
}
}

func TestWorkerPoolCap(t *testing.T) {
one := New(1)
one := workerpool.New(1)
defer one.Close()
if c := one.Cap(); c != 1 {
t.Errorf("got %d; want %d", c, 1)
}

n := runtime.NumCPU()
ncpu := New(n)
ncpu := workerpool.New(n)
defer ncpu.Close()
if c := ncpu.Cap(); c != n {
t.Errorf("got %d; want %d", c, n)
}

fortyTwo := New(42)
fortyTwo := workerpool.New(42)
defer fortyTwo.Close()
if c := fortyTwo.Cap(); c != 42 {
t.Errorf("got %d; want %d", c, 42)
}
}

func TestWorkerPoolLen(t *testing.T) {
wp := New(1)
wp := workerpool.New(1)
defer wp.Close()
if l := wp.Len(); l != 0 {
t.Errorf("got %d; want %d", l, 0)
Expand All @@ -93,7 +95,7 @@ func TestWorkerPoolLen(t *testing.T) {
// submitted.
func TestWorkerPoolConcurrentTasksCount(t *testing.T) {
n := runtime.NumCPU()
wp := New(n)
wp := workerpool.New(n)
defer func() {
if err := wp.Close(); err != nil {
t.Fatalf("unexpected error %v", err)
Expand Down Expand Up @@ -139,7 +141,7 @@ func TestWorkerPoolConcurrentTasksCount(t *testing.T) {

func TestWorkerPool(t *testing.T) {
n := runtime.NumCPU()
wp := New(n)
wp := workerpool.New(n)

numTasks := n + 2
done := make(chan struct{})
Expand Down Expand Up @@ -228,7 +230,7 @@ func TestWorkerPool(t *testing.T) {

func TestConcurrentDrain(t *testing.T) {
n := runtime.NumCPU()
wp := New(n)
wp := workerpool.New(n)

numTasks := n + 1
done := make(chan struct{})
Expand Down Expand Up @@ -273,13 +275,13 @@ func TestConcurrentDrain(t *testing.T) {
<-ready
time.Sleep(10 * time.Millisecond)

if err := wp.Submit("", nil); err != ErrDraining {
t.Errorf("submit: got '%v', want '%v'", err, ErrDraining)
if err := wp.Submit("", nil); err != workerpool.ErrDraining {
t.Errorf("submit: got '%v', want '%v'", err, workerpool.ErrDraining)
}

results, err := wp.Drain()
if err != ErrDraining {
t.Errorf("drain: got '%v', want '%v'", err, ErrDraining)
if err != workerpool.ErrDraining {
t.Errorf("drain: got '%v', want '%v'", err, workerpool.ErrDraining)
}
if results != nil {
t.Errorf("drain: got '%v', want '%v'", results, nil)
Expand All @@ -306,45 +308,45 @@ func TestConcurrentDrain(t *testing.T) {
}

func TestWorkerPoolDrainAfterClose(t *testing.T) {
wp := New(runtime.NumCPU())
wp := workerpool.New(runtime.NumCPU())
wp.Close()
tasks, err := wp.Drain()
if err != ErrClosed {
t.Errorf("got %v; want %v", err, ErrClosed)
if err != workerpool.ErrClosed {
t.Errorf("got %v; want %v", err, workerpool.ErrClosed)
}
if tasks != nil {
t.Errorf("got %v as tasks; want %v", tasks, nil)
}
}

func TestWorkerPoolSubmitAfterClose(t *testing.T) {
wp := New(runtime.NumCPU())
wp := workerpool.New(runtime.NumCPU())
wp.Close()
if err := wp.Submit("dummy", nil); err != ErrClosed {
t.Fatalf("got %v; want %v", err, ErrClosed)
if err := wp.Submit("dummy", nil); err != workerpool.ErrClosed {
t.Fatalf("got %v; want %v", err, workerpool.ErrClosed)
}
}

func TestWorkerPoolManyClose(t *testing.T) {
wp := New(runtime.NumCPU())
wp := workerpool.New(runtime.NumCPU())

// first call to Close() should not return an error.
if err := wp.Close(); err != nil {
t.Fatalf("unexpected error on Close(): %s", err)
}

// calling Close() more than once should always return an error.
if err := wp.Close(); err != ErrClosed {
t.Fatalf("got %v; want %v", err, ErrClosed)
if err := wp.Close(); err != workerpool.ErrClosed {
t.Fatalf("got %v; want %v", err, workerpool.ErrClosed)
}
if err := wp.Close(); err != ErrClosed {
t.Fatalf("got %v; want %v", err, ErrClosed)
if err := wp.Close(); err != workerpool.ErrClosed {
t.Fatalf("got %v; want %v", err, workerpool.ErrClosed)
}
}

func TestWorkerPoolClose(t *testing.T) {
n := runtime.NumCPU()
wp := New(n)
wp := workerpool.New(n)

// working is written to by each task as soon as possible.
working := make(chan struct{})
Expand Down

0 comments on commit bc60e57

Please sign in to comment.