Skip to content

Commit

Permalink
Merge pull request #5349 from hashicorp/port-tests-20190221
Browse files Browse the repository at this point in the history
Port some 0.8.7 alloc runner tests
  • Loading branch information
notnoop authored Feb 22, 2019
2 parents 74c270d + e1e5053 commit eecae58
Show file tree
Hide file tree
Showing 3 changed files with 470 additions and 1 deletion.
152 changes: 152 additions & 0 deletions client/allocrunner/alloc_runner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,15 @@ package allocrunner

import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"

"github.com/hashicorp/consul/api"
"github.com/hashicorp/nomad/client/allochealth"
"github.com/hashicorp/nomad/client/allocwatcher"
cconsul "github.com/hashicorp/nomad/client/consul"
"github.com/hashicorp/nomad/client/state"
"github.com/hashicorp/nomad/command/agent/consul"
Expand Down Expand Up @@ -625,3 +628,152 @@ func TestAllocRunner_Destroy(t *testing.T) {
require.Failf(t, "expected NotExist error", "found %v", err)
}
}

func TestAllocRunner_SimpleRun(t *testing.T) {
t.Parallel()

alloc := mock.BatchAlloc()

conf, cleanup := testAllocRunnerConfig(t, alloc)
defer cleanup()
ar, err := NewAllocRunner(conf)
require.NoError(t, err)
go ar.Run()
defer destroy(ar)

// Wait for alloc to be running
testutil.WaitForResult(func() (bool, error) {
state := ar.AllocState()

if state.ClientStatus != structs.AllocClientStatusComplete {
return false, fmt.Errorf("got status %v; want %v", state.ClientStatus, structs.AllocClientStatusComplete)
}

for t, s := range state.TaskStates {
if s.FinishedAt.IsZero() {
return false, fmt.Errorf("task %q has zero FinishedAt value", t)
}
}

return true, nil
}, func(err error) {
require.NoError(t, err)
})

}

// TestAllocRunner_MoveAllocDir asserts that a rescheduled
// allocation copies ephemeral disk content from previous alloc run
func TestAllocRunner_MoveAllocDir(t *testing.T) {
t.Parallel()

// Step 1: start and run a task
alloc := mock.BatchAlloc()
conf, cleanup := testAllocRunnerConfig(t, alloc)
defer cleanup()
ar, err := NewAllocRunner(conf)
require.NoError(t, err)
ar.Run()
defer destroy(ar)

require.Equal(t, structs.AllocClientStatusComplete, ar.AllocState().ClientStatus)

// Step 2. Modify its directory
task := alloc.Job.TaskGroups[0].Tasks[0]
dataFile := filepath.Join(ar.allocDir.SharedDir, "data", "data_file")
ioutil.WriteFile(dataFile, []byte("hello world"), os.ModePerm)
taskDir := ar.allocDir.TaskDirs[task.Name]
taskLocalFile := filepath.Join(taskDir.LocalDir, "local_file")
ioutil.WriteFile(taskLocalFile, []byte("good bye world"), os.ModePerm)

// Step 3. Start a new alloc
alloc2 := mock.BatchAlloc()
alloc2.PreviousAllocation = alloc.ID
alloc2.Job.TaskGroups[0].EphemeralDisk.Sticky = true

conf2, cleanup := testAllocRunnerConfig(t, alloc2)
conf2.PrevAllocWatcher, conf2.PrevAllocMigrator = allocwatcher.NewAllocWatcher(allocwatcher.Config{
Alloc: alloc2,
PreviousRunner: ar,
Logger: conf2.Logger,
})
defer cleanup()
ar2, err := NewAllocRunner(conf2)
require.NoError(t, err)

ar2.Run()
defer destroy(ar2)

require.Equal(t, structs.AllocClientStatusComplete, ar2.AllocState().ClientStatus)

// Ensure that data from ar was moved to ar2
dataFile = filepath.Join(ar2.allocDir.SharedDir, "data", "data_file")
fileInfo, _ := os.Stat(dataFile)
require.NotNilf(t, fileInfo, "file %q not found", dataFile)

taskDir = ar2.allocDir.TaskDirs[task.Name]
taskLocalFile = filepath.Join(taskDir.LocalDir, "local_file")
fileInfo, _ = os.Stat(taskLocalFile)
require.NotNilf(t, fileInfo, "file %q not found", dataFile)

}

// TestAllocRuner_HandlesArtifactFailure ensures that if one task in a task group is
// retrying fetching an artifact, other tasks in the group should be able
// to proceed.
func TestAllocRunner_HandlesArtifactFailure(t *testing.T) {
t.Parallel()

alloc := mock.BatchAlloc()
alloc.Job.TaskGroups[0].RestartPolicy = &structs.RestartPolicy{
Mode: structs.RestartPolicyModeFail,
Attempts: 1,
Delay: time.Nanosecond,
Interval: time.Hour,
}

// Create a new task with a bad artifact
badtask := alloc.Job.TaskGroups[0].Tasks[0].Copy()
badtask.Name = "bad"
badtask.Artifacts = []*structs.TaskArtifact{
{GetterSource: "http://127.0.0.1:0/foo/bar/baz"},
}

alloc.Job.TaskGroups[0].Tasks = append(alloc.Job.TaskGroups[0].Tasks, badtask)
alloc.AllocatedResources.Tasks["bad"] = &structs.AllocatedTaskResources{
Cpu: structs.AllocatedCpuResources{
CpuShares: 500,
},
Memory: structs.AllocatedMemoryResources{
MemoryMB: 256,
},
}

conf, cleanup := testAllocRunnerConfig(t, alloc)
defer cleanup()
ar, err := NewAllocRunner(conf)
require.NoError(t, err)
go ar.Run()
defer destroy(ar)

testutil.WaitForResult(func() (bool, error) {
state := ar.AllocState()

switch state.ClientStatus {
case structs.AllocClientStatusComplete, structs.AllocClientStatusFailed:
return true, nil
default:
return false, fmt.Errorf("got status %v but want terminal", state.ClientStatus)
}

}, func(err error) {
require.NoError(t, err)
})

state := ar.AllocState()
require.Equal(t, structs.AllocClientStatusFailed, state.ClientStatus)
require.Equal(t, structs.TaskStateDead, state.TaskStates["web"].State)
require.True(t, state.TaskStates["web"].Successful())
require.Equal(t, structs.TaskStateDead, state.TaskStates["bad"].State)
require.True(t, state.TaskStates["bad"].Failed)
}
Loading

0 comments on commit eecae58

Please sign in to comment.