-
Notifications
You must be signed in to change notification settings - Fork 2k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
client: Add volume_hook for mounting volumes
- Loading branch information
1 parent
39a9bd2
commit 4a1d99c
Showing
3 changed files
with
130 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,123 @@ | ||
package taskrunner | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
|
||
log "github.com/hashicorp/go-hclog" | ||
multierror "github.com/hashicorp/go-multierror" | ||
"github.com/hashicorp/nomad/client/allocrunner/interfaces" | ||
"github.com/hashicorp/nomad/nomad/structs" | ||
"github.com/hashicorp/nomad/plugins/drivers" | ||
) | ||
|
||
type volumeHook struct { | ||
alloc *structs.Allocation | ||
runner *TaskRunner | ||
logger log.Logger | ||
} | ||
|
||
func newVolumeHook(runner *TaskRunner, logger log.Logger) *volumeHook { | ||
h := &volumeHook{ | ||
alloc: runner.Alloc(), | ||
runner: runner, | ||
} | ||
h.logger = logger.Named(h.Name()) | ||
return h | ||
} | ||
|
||
func (*volumeHook) Name() string { | ||
return "volumes" | ||
} | ||
|
||
func validateHostVolumes(requested map[string]*structs.VolumeRequest, client map[string]*structs.ClientHostVolumeConfig) error { | ||
var result error | ||
|
||
for n, req := range requested { | ||
if req.Volume.Type != "host" { | ||
continue | ||
} | ||
|
||
cfg, err := structs.ParseHostVolumeConfig(req.Config) | ||
if err != nil { | ||
result = multierror.Append(result, fmt.Errorf("failed to parse config for %s: %v", n, err)) | ||
} | ||
|
||
_, ok := client[cfg.Source] | ||
if !ok { | ||
result = multierror.Append(result, fmt.Errorf("missing %s", cfg.Source)) | ||
} | ||
} | ||
|
||
return result | ||
} | ||
|
||
func (h *volumeHook) hostVolumeMountConfigurations(vmounts []*structs.VolumeMount, volumes map[string]*structs.VolumeRequest, client map[string]*structs.ClientHostVolumeConfig) ([]*drivers.MountConfig, error) { | ||
var mounts []*drivers.MountConfig | ||
for _, m := range vmounts { | ||
req, ok := volumes[m.Volume] | ||
if !ok { | ||
// Should never happen unless we misvalidated on job submission | ||
return nil, fmt.Errorf("No group volume declaration found named: %s", m.Volume) | ||
} | ||
|
||
cfg, err := structs.ParseHostVolumeConfig(req.Config) | ||
if err != nil { | ||
return nil, fmt.Errorf("failed to parse config for %s: %v", m.Volume, err) | ||
} | ||
|
||
hostVolume, ok := client[cfg.Source] | ||
if !ok { | ||
// Should never happen, but unless the client volumes were mutated during | ||
// the execution of this hook. | ||
return nil, fmt.Errorf("No host volume named: %s", cfg.Source) | ||
} | ||
|
||
mcfg := &drivers.MountConfig{ | ||
HostPath: hostVolume.Source, | ||
TaskPath: m.Destination, | ||
Readonly: hostVolume.ReadOnly || req.Volume.ReadOnly || m.ReadOnly, | ||
} | ||
mounts = append(mounts, mcfg) | ||
} | ||
|
||
return mounts, nil | ||
} | ||
|
||
func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { | ||
volumes := h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup).Volumes | ||
mounts := h.runner.hookResources.getMounts() | ||
|
||
hostVolumes := h.runner.clientConfig.Node.HostVolumes | ||
|
||
// Always validate volumes to ensure that we do not allow volumes to be used | ||
// if a host is restarted and loses the host volume configuration. | ||
if err := validateHostVolumes(volumes, hostVolumes); err != nil { | ||
h.logger.Error("Requested Host Volume does not exist", "existing", hostVolumes, "requested", volumes) | ||
return fmt.Errorf("host volume validation error: %v", err) | ||
} | ||
|
||
requestedMounts, err := h.hostVolumeMountConfigurations(req.Task.VolumeMounts, volumes, hostVolumes) | ||
if err != nil { | ||
h.logger.Error("Failed to generate volume mounts", "error", err) | ||
return err | ||
} | ||
|
||
// Because this hook is also ran on restores, we only add mounts that do not | ||
// already exist. Although this loop is somewhat expensive, there are only | ||
// a small number of mounts that exist within most individual tasks. We may | ||
// want to revisit this using a `hookdata` param to be "mount only once" | ||
REQUESTED: | ||
for _, m := range requestedMounts { | ||
for _, em := range mounts { | ||
if em.IsEqual(m) { | ||
continue REQUESTED | ||
} | ||
} | ||
|
||
mounts = append(mounts, m) | ||
} | ||
|
||
h.runner.hookResources.setMounts(mounts) | ||
return nil | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters