Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Driver.Prestart method #2054

Merged
merged 10 commits into from
Dec 21, 2016
44 changes: 23 additions & 21 deletions api/tasks.go
Original file line number Diff line number Diff line change
Expand Up @@ -252,30 +252,32 @@ const (
TaskSiblingFailed = "Sibling task failed"
TaskSignaling = "Signaling"
TaskRestartSignal = "Restart Signaled"
TaskInitializing = "Initializing"
)

// TaskEvent is an event that effects the state of a task and contains meta-data
// appropriate to the events type.
type TaskEvent struct {
Type string
Time int64
FailsTask bool
RestartReason string
SetupError string
DriverError string
ExitCode int
Signal int
Message string
KillReason string
KillTimeout time.Duration
KillError string
StartDelay int64
DownloadError string
ValidationError string
DiskLimit int64
DiskSize int64
FailedSibling string
VaultError string
TaskSignalReason string
TaskSignal string
Type string
Time int64
FailsTask bool
RestartReason string
SetupError string
DriverError string
ExitCode int
Signal int
Message string
KillReason string
KillTimeout time.Duration
KillError string
StartDelay int64
DownloadError string
ValidationError string
DiskLimit int64
DiskSize int64
FailedSibling string
VaultError string
TaskSignalReason string
TaskSignal string
InitializationMessage string
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Change to DriverMsg as the emitEvent function can be used generically by the driver?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Any thoughts on what I should change the TaskInitializing const to? All of the other consts are verbs, but I don't think it'd be confusing to just have a "Driver" message type.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

"Driver" is fine with me

}
2 changes: 1 addition & 1 deletion client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -792,7 +792,7 @@ func (c *Client) setupDrivers() error {

var avail []string
var skipped []string
driverCtx := driver.NewDriverContext("", c.config, c.config.Node, c.logger, nil)
driverCtx := driver.NewDriverContext("", c.config, c.config.Node, c.logger, nil, nil)
for name := range driver.BuiltinDrivers {
// Skip fingerprinting drivers that are not in the whitelist if it is
// enabled.
Expand Down
54 changes: 35 additions & 19 deletions client/driver/docker.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,10 @@ const (

type DockerDriver struct {
DriverContext

imageID string
waitClient *docker.Client
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You don't have to pass this as it is a shared variable that is initialized once per client

Copy link
Member Author

@schmichael schmichael Dec 20, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Tangent: Hm, should I be using the waitClient for image downloading?

driverConfig *DockerDriverConfig
}

type DockerDriverAuth struct {
Expand Down Expand Up @@ -339,46 +343,57 @@ func (d *DockerDriver) Abilities() DriverAbilities {
}
}

func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
func (d *DockerDriver) Prestart(ctx *ExecContext, task *structs.Task) error {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you emit a message for when it downloads an image?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can't believe I missed that in this PR! Added (inside the createImage method so it's not emitted if the image already exists).

// Set environment variables.
d.taskEnv.SetAllocDir(allocdir.SharedAllocContainerPath).
SetTaskLocalDir(allocdir.TaskLocalContainerPath).SetSecretsDir(allocdir.TaskSecretsContainerPath).Build()

driverConfig, err := NewDockerDriverConfig(task, d.taskEnv)
if err != nil {
return nil, err
return err
}

cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true)

taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
return fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

taskDir is a variable in the task_runner now. Pass it through the context?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's in the next PR

}

// Initialize docker API clients
client, waitClient, err := d.dockerClients()
if err != nil {
return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err)
return fmt.Errorf("Failed to connect to docker daemon: %s", err)
}

if err := d.createImage(driverConfig, client, taskDir); err != nil {
return nil, err
return err
}

image := driverConfig.ImageName
// Now that we have the image we can get the image id
dockerImage, err := client.InspectImage(image)
if err != nil {
d.logger.Printf("[ERR] driver.docker: failed getting image id for %s: %s", image, err)
return nil, fmt.Errorf("Failed to determine image id for `%s`: %s", image, err)
return fmt.Errorf("Failed to determine image id for `%s`: %s", image, err)
}
d.logger.Printf("[DEBUG] driver.docker: identified image %s as %s", image, dockerImage.ID)

// Set state needed by Start()
d.imageID = dockerImage.ID
d.waitClient = waitClient
d.driverConfig = driverConfig
return nil
}

func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}

taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
Expand All @@ -404,9 +419,9 @@ func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle

// Only launch syslog server if we're going to use it!
syslogAddr := ""
if runtime.GOOS == "darwin" && len(driverConfig.Logging) == 0 {
if runtime.GOOS == "darwin" && len(d.driverConfig.Logging) == 0 {
d.logger.Printf("[DEBUG] driver.docker: disabling syslog driver as Docker for Mac workaround")
} else if len(driverConfig.Logging) == 0 || driverConfig.Logging[0].Type == "syslog" {
} else if len(d.driverConfig.Logging) == 0 || d.driverConfig.Logging[0].Type == "syslog" {
ss, err := exec.LaunchSyslogServer()
if err != nil {
pluginClient.Kill()
Expand All @@ -415,11 +430,11 @@ func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle
syslogAddr = ss.Addr
}

config, err := d.createContainerConfig(ctx, task, driverConfig, syslogAddr)
config, err := d.createContainerConfig(ctx, task, d.driverConfig, syslogAddr)
if err != nil {
d.logger.Printf("[ERR] driver.docker: failed to create container configuration for image %s: %s", image, err)
d.logger.Printf("[ERR] driver.docker: failed to create container configuration for image %s: %s", d.imageID, err)
pluginClient.Kill()
return nil, fmt.Errorf("Failed to create container configuration for image %s: %s", image, err)
return nil, fmt.Errorf("Failed to create container configuration for image %s: %s", d.imageID, err)
}

container, rerr := d.createContainer(config)
Expand All @@ -432,17 +447,17 @@ func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle

d.logger.Printf("[INFO] driver.docker: created container %s", container.ID)

cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true)

// We don't need to start the container if the container is already running
// since we don't create containers which are already present on the host
// and are running
if !container.State.Running {
// Start the container
err := d.startContainer(container)
if err != nil {
if err := client.StartContainer(container.ID, container.HostConfig); err != nil {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why did you change from d.startContainer?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yikes. No clue. Thanks for catching this. Fixed.

d.logger.Printf("[ERR] driver.docker: failed to start container %s: %s", container.ID, err)
pluginClient.Kill()
err.Err = fmt.Sprintf("Failed to start container %s: %s", container.ID, err)
return nil, err
return nil, fmt.Errorf("Failed to start container %s: %s", container.ID, err)
}
d.logger.Printf("[INFO] driver.docker: started container %s", container.ID)
} else {
Expand All @@ -454,12 +469,12 @@ func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle
maxKill := d.DriverContext.config.MaxKillTimeout
h := &DockerHandle{
client: client,
waitClient: waitClient,
waitClient: d.waitClient,
executor: exec,
pluginClient: pluginClient,
cleanupImage: cleanupImage,
logger: d.logger,
imageID: dockerImage.ID,
imageID: d.imageID,
containerID: container.ID,
version: d.config.Version,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
Expand Down Expand Up @@ -935,6 +950,7 @@ func (d *DockerDriver) pullImage(driverConfig *DockerDriverConfig, client *docke
}
}

d.emitEvent("Downloading image")
Copy link
Contributor

@dadgar dadgar Dec 20, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

d.emitEventfmt.Sprintf(`Downloading image "%s:%s"`, repo, tag)) 

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good call; fixed.

err := client.PullImage(pullOptions, authOptions)
if err != nil {
d.logger.Printf("[ERR] driver.docker: failed pulling container %s:%s: %s", repo, tag, err)
Expand Down
45 changes: 42 additions & 3 deletions client/driver/docker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,10 @@ func dockerSetup(t *testing.T, task *structs.Task) (*docker.Client, DriverHandle
driver := NewDockerDriver(driverCtx)
copyImage(execCtx, task, "busybox.tar", t)

if err := driver.Prestart(execCtx, task); err != nil {
execCtx.AllocDir.Destroy()
t.Fatalf("error in prestart: %v", err)
}
handle, err := driver.Start(execCtx, task)
if err != nil {
execCtx.AllocDir.Destroy()
Expand Down Expand Up @@ -167,6 +171,9 @@ func TestDockerDriver_StartOpen_Wait(t *testing.T) {
d := NewDockerDriver(driverCtx)
copyImage(execCtx, task, "busybox.tar", t)

if err := d.Prestart(execCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
Expand Down Expand Up @@ -256,6 +263,9 @@ func TestDockerDriver_Start_LoadImage(t *testing.T) {
// Copy the image into the task's directory
copyImage(execCtx, task, "busybox.tar", t)

if err := d.Prestart(execCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
Expand Down Expand Up @@ -316,9 +326,9 @@ func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) {
defer execCtx.AllocDir.Destroy()
d := NewDockerDriver(driverCtx)

_, err := d.Start(execCtx, task)
err := d.Prestart(execCtx, task)
if err == nil {
t.Fatalf("want err: %v", err)
t.Fatalf("want error in prestart: %v", err)
}

if rerr, ok := err.(*structs.RecoverableError); !ok {
Expand Down Expand Up @@ -366,6 +376,9 @@ func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
d := NewDockerDriver(driverCtx)
copyImage(execCtx, task, "busybox.tar", t)

if err := d.Prestart(execCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
Expand Down Expand Up @@ -456,6 +469,9 @@ func TestDockerDriver_StartN(t *testing.T) {
d := NewDockerDriver(driverCtx)
copyImage(execCtx, task, "busybox.tar", t)

if err := d.Prestart(execCtx, task); err != nil {
t.Fatalf("error in prestart #%d: %v", idx+1, err)
}
handles[idx], err = d.Start(execCtx, task)
if err != nil {
t.Errorf("Failed starting task #%d: %s", idx+1, err)
Expand Down Expand Up @@ -513,6 +529,9 @@ func TestDockerDriver_StartNVersions(t *testing.T) {
copyImage(execCtx, task, "busybox_musl.tar", t)
copyImage(execCtx, task, "busybox_glibc.tar", t)

if err := d.Prestart(execCtx, task); err != nil {
t.Fatalf("error in prestart #%d: %v", idx+1, err)
}
handles[idx], err = d.Start(execCtx, task)
if err != nil {
t.Errorf("Failed starting task #%d: %s", idx+1, err)
Expand Down Expand Up @@ -804,6 +823,10 @@ func TestDockerDriver_User(t *testing.T) {
defer execCtx.AllocDir.Destroy()
copyImage(execCtx, task, "busybox.tar", t)

if err := driver.Prestart(execCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}

// It should fail because the user "alice" does not exist on the given
// image.
handle, err := driver.Start(execCtx, task)
Expand Down Expand Up @@ -953,6 +976,9 @@ done
fmt.Errorf("Failed to write data")
}

if err := d.Prestart(execCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
Expand Down Expand Up @@ -1035,7 +1061,11 @@ func setupDockerVolumes(t *testing.T, cfg *config.Config, hostpath string) (*str
t.Fatalf("Failed to get task env: %v", err)
}

driverCtx := NewDriverContext(task.Name, cfg, cfg.Node, testLogger(), taskEnv)
logger := testLogger()
emitter := func(m string, args ...interface{}) {
logger.Printf("[EVENT] "+m, args...)
}
driverCtx := NewDriverContext(task.Name, cfg, cfg.Node, testLogger(), taskEnv, emitter)
driver := NewDockerDriver(driverCtx)
copyImage(execCtx, task, "busybox.tar", t)

Expand All @@ -1058,6 +1088,9 @@ func TestDockerDriver_VolumesDisabled(t *testing.T) {
task, driver, execCtx, _, cleanup := setupDockerVolumes(t, cfg, tmpvol)
defer cleanup()

if err := driver.Prestart(execCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
if _, err := driver.Start(execCtx, task); err == nil {
t.Fatalf("Started driver successfully when volumes should have been disabled.")
}
Expand All @@ -1068,6 +1101,9 @@ func TestDockerDriver_VolumesDisabled(t *testing.T) {
task, driver, execCtx, fn, cleanup := setupDockerVolumes(t, cfg, ".")
defer cleanup()

if err := driver.Prestart(execCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := driver.Start(execCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
Expand Down Expand Up @@ -1106,6 +1142,9 @@ func TestDockerDriver_VolumesEnabled(t *testing.T) {
task, driver, execCtx, hostpath, cleanup := setupDockerVolumes(t, cfg, tmpvol)
defer cleanup()

if err := driver.Prestart(execCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := driver.Start(execCtx, task)
if err != nil {
t.Fatalf("Failed to start docker driver: %v", err)
Expand Down
Loading