From c97711ef34837d49848622d2fb554eaddfdcde2f Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 19 Oct 2016 13:06:28 -0700 Subject: [PATCH 1/4] Add set contains --- jobspec/parse.go | 8 +++++ jobspec/parse_test.go | 19 +++++++++++ .../test-fixtures/set-contains-constraint.hcl | 6 ++++ nomad/structs/structs.go | 1 + scheduler/feasible.go | 34 +++++++++++++++++++ scheduler/feasible_test.go | 10 ++++++ website/source/docs/jobspec/index.html.md | 6 ++++ website/source/docs/jobspec/json.html.md | 3 ++ 8 files changed, 87 insertions(+) create mode 100644 jobspec/test-fixtures/set-contains-constraint.hcl diff --git a/jobspec/parse.go b/jobspec/parse.go index 7d01f09e6e9..f9910e4b53f 100644 --- a/jobspec/parse.go +++ b/jobspec/parse.go @@ -407,6 +407,7 @@ func parseConstraints(result *[]*structs.Constraint, list *ast.ObjectList) error "version", "regexp", "distinct_hosts", + "set_contains", } if err := checkHCLKeys(o.Val, valid); err != nil { return err @@ -435,6 +436,13 @@ func parseConstraints(result *[]*structs.Constraint, list *ast.ObjectList) error m["RTarget"] = constraint } + // If "set_contains" is provided, set the operand + // to "set_contains" and the value to the "RTarget" + if constraint, ok := m[structs.ConstraintSetContains]; ok { + m["Operand"] = structs.ConstraintSetContains + m["RTarget"] = constraint + } + if value, ok := m[structs.ConstraintDistinctHosts]; ok { enabled, err := parseBool(value) if err != nil { diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index 66996adc7b1..1e2a282a0f9 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -282,6 +282,25 @@ func TestParse(t *testing.T) { false, }, + { + "set-contains-constraint.hcl", + &structs.Job{ + ID: "foo", + Name: "foo", + Priority: 50, + Region: "global", + Type: "service", + Constraints: []*structs.Constraint{ + &structs.Constraint{ + LTarget: "$meta.data", + RTarget: "foo,bar,baz", + Operand: structs.ConstraintSetContains, + }, + }, + }, + false, + }, + { "distinctHosts-constraint.hcl", &structs.Job{ diff --git a/jobspec/test-fixtures/set-contains-constraint.hcl b/jobspec/test-fixtures/set-contains-constraint.hcl new file mode 100644 index 00000000000..170f72118c0 --- /dev/null +++ b/jobspec/test-fixtures/set-contains-constraint.hcl @@ -0,0 +1,6 @@ +job "foo" { + constraint { + attribute = "$meta.data" + set_contains = "foo,bar,baz" + } +} diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index b6d3e258182..7459b88fd8d 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -2729,6 +2729,7 @@ const ( ConstraintDistinctHosts = "distinct_hosts" ConstraintRegex = "regexp" ConstraintVersion = "version" + ConstraintSetContains = "set_contains" ) // Constraints are used to restrict placement options. diff --git a/scheduler/feasible.go b/scheduler/feasible.go index 9ff2d2b36c2..2ca75ee17d4 100644 --- a/scheduler/feasible.go +++ b/scheduler/feasible.go @@ -344,6 +344,8 @@ func checkConstraint(ctx Context, operand string, lVal, rVal interface{}) bool { return checkVersionConstraint(ctx, lVal, rVal) case structs.ConstraintRegex: return checkRegexpConstraint(ctx, lVal, rVal) + case structs.ConstraintSetContains: + return checkSetContainsConstraint(ctx, lVal, rVal) default: return false } @@ -451,6 +453,38 @@ func checkRegexpConstraint(ctx Context, lVal, rVal interface{}) bool { return re.MatchString(lStr) } +// checkSetContainsConstraint is used to see if the left hand side contains the +// string on the right hand side +func checkSetContainsConstraint(ctx Context, lVal, rVal interface{}) bool { + // Ensure left-hand is string + lStr, ok := lVal.(string) + if !ok { + return false + } + + // Regexp must be a string + rStr, ok := rVal.(string) + if !ok { + return false + } + + input := strings.Split(lStr, ",") + lookup := make(map[string]struct{}, len(input)) + for _, in := range input { + cleaned := strings.TrimSpace(in) + lookup[cleaned] = struct{}{} + } + + for _, r := range strings.Split(rStr, ",") { + cleaned := strings.TrimSpace(r) + if _, ok := lookup[cleaned]; !ok { + return false + } + } + + return true +} + // FeasibilityWrapper is a FeasibleIterator which wraps both job and task group // FeasibilityCheckers in which feasibility checking can be skipped if the // computed node class has previously been marked as eligible or ineligible. diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index a6e4126c41f..e6fe430e47d 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -304,6 +304,16 @@ func TestCheckConstraint(t *testing.T) { lVal: "foo", rVal: "bar", result: false, }, + { + op: structs.ConstraintSetContains, + lVal: "foo,bar,baz", rVal: "foo, bar ", + result: true, + }, + { + op: structs.ConstraintSetContains, + lVal: "foo,bar,baz", rVal: "foo,bam", + result: false, + }, } for _, tc := range cases { diff --git a/website/source/docs/jobspec/index.html.md b/website/source/docs/jobspec/index.html.md index 3c0dc33ac5b..20b9c5b4fd4 100644 --- a/website/source/docs/jobspec/index.html.md +++ b/website/source/docs/jobspec/index.html.md @@ -419,6 +419,12 @@ The `constraint` object supports the following keys: the attribute. This sets the operator to "regexp" and the `value` to the regular expression. +* `set_contains` - Specifies a set contains constraint against + the attribute. This sets the operator to "set_contains" and the `value` + to the what is specified. This will check that the given attribute contains + each of the specified elements. The attribute and the list being checked are + split using commas. + * `distinct_hosts` - `distinct_hosts` accepts a boolean value and defaults to `false`. If set, the scheduler will not co-locate any task groups on the same machine. This can be specified as a job constraint which applies the diff --git a/website/source/docs/jobspec/json.html.md b/website/source/docs/jobspec/json.html.md index 458e436633c..d15770cb774 100644 --- a/website/source/docs/jobspec/json.html.md +++ b/website/source/docs/jobspec/json.html.md @@ -443,6 +443,9 @@ The `Constraint` object supports the following keys: * `regexp` - Allows the `RTarget` to be a regular expression to be matched. + * `set_contains` - Allows the `RTarget` to be a comma separated list of values + that should be contained in the LTarget's value. + * `distinct_host` - If set, the scheduler will not co-locate any task groups on the same machine. This can be specified as a job constraint which applies the constraint to all task groups in the job, or as a task group constraint which From 008d91aa565537cc2a452939225709468d8dd191 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 19 Oct 2016 15:06:23 -0700 Subject: [PATCH 2/4] Advertise signalling abilities --- client/driver/docker.go | 6 ++++++ client/driver/driver.go | 9 +++++++++ client/driver/exec.go | 6 ++++++ client/driver/java.go | 6 ++++++ client/driver/mock_driver.go | 6 ++++++ client/driver/qemu.go | 6 ++++++ client/driver/raw_exec.go | 6 ++++++ client/driver/rkt.go | 6 ++++++ client/fingerprint/fingerprint.go | 1 + client/fingerprint/signal.go | 33 +++++++++++++++++++++++++++++++ client/fingerprint/signal_test.go | 17 ++++++++++++++++ 11 files changed, 102 insertions(+) create mode 100644 client/fingerprint/signal.go create mode 100644 client/fingerprint/signal_test.go diff --git a/client/driver/docker.go b/client/driver/docker.go index ffc05fe7e6b..b4cc01d92b3 100644 --- a/client/driver/docker.go +++ b/client/driver/docker.go @@ -266,6 +266,12 @@ func (d *DockerDriver) Validate(config map[string]interface{}) error { return nil } +func (d *DockerDriver) Abilities() DriverAbilities { + return DriverAbilities{ + SendSignals: true, + } +} + // dockerClients creates two *docker.Client, one for long running operations and // the other for shorter operations. In test / dev mode we can use ENV vars to // connect to the docker daemon. In production mode we will read docker.endpoint diff --git a/client/driver/driver.go b/client/driver/driver.go index 8774f208ec1..b0f2eb6960a 100644 --- a/client/driver/driver.go +++ b/client/driver/driver.go @@ -59,6 +59,15 @@ type Driver interface { // Drivers must validate their configuration Validate(map[string]interface{}) error + + // Abilities returns the abilities of the driver + Abilities() DriverAbilities +} + +// DriverAbilities marks the abilities the driver has. +type DriverAbilities struct { + // SendSignals marks the driver as being able to send signals + SendSignals bool } // DriverContext is a means to inject dependencies such as loggers, configs, and diff --git a/client/driver/exec.go b/client/driver/exec.go index ddc8325cdac..b5c1f6b4b7c 100644 --- a/client/driver/exec.go +++ b/client/driver/exec.go @@ -82,6 +82,12 @@ func (d *ExecDriver) Validate(config map[string]interface{}) error { return nil } +func (d *ExecDriver) Abilities() DriverAbilities { + return DriverAbilities{ + SendSignals: true, + } +} + func (d *ExecDriver) Periodic() (bool, time.Duration) { return true, 15 * time.Second } diff --git a/client/driver/java.go b/client/driver/java.go index ba0fd239ac5..6d457c628fc 100644 --- a/client/driver/java.go +++ b/client/driver/java.go @@ -94,6 +94,12 @@ func (d *JavaDriver) Validate(config map[string]interface{}) error { return nil } +func (d *JavaDriver) Abilities() DriverAbilities { + return DriverAbilities{ + SendSignals: true, + } +} + func (d *JavaDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { // Get the current status so that we can log any debug messages only if the // state changes diff --git a/client/driver/mock_driver.go b/client/driver/mock_driver.go index e1ef419983d..ad6e2486e7c 100644 --- a/client/driver/mock_driver.go +++ b/client/driver/mock_driver.go @@ -62,6 +62,12 @@ func NewMockDriver(ctx *DriverContext) Driver { return &MockDriver{DriverContext: *ctx} } +func (d *MockDriver) Abilities() DriverAbilities { + return DriverAbilities{ + SendSignals: false, + } +} + // Start starts the mock driver func (m *MockDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) { var driverConfig MockDriverConfig diff --git a/client/driver/qemu.go b/client/driver/qemu.go index 9455e3ac20f..e37f405f8e1 100644 --- a/client/driver/qemu.go +++ b/client/driver/qemu.go @@ -97,6 +97,12 @@ func (d *QemuDriver) Validate(config map[string]interface{}) error { return nil } +func (d *QemuDriver) Abilities() DriverAbilities { + return DriverAbilities{ + SendSignals: false, + } +} + func (d *QemuDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { // Get the current status so that we can log any debug messages only if the // state changes diff --git a/client/driver/raw_exec.go b/client/driver/raw_exec.go index b13208e90a8..2834d8e35df 100644 --- a/client/driver/raw_exec.go +++ b/client/driver/raw_exec.go @@ -81,6 +81,12 @@ func (d *RawExecDriver) Validate(config map[string]interface{}) error { return nil } +func (d *RawExecDriver) Abilities() DriverAbilities { + return DriverAbilities{ + SendSignals: true, + } +} + func (d *RawExecDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { // Get the current status so that we can log any debug messages only if the // state changes diff --git a/client/driver/rkt.go b/client/driver/rkt.go index ac422ec6e36..c4823e8b5b8 100644 --- a/client/driver/rkt.go +++ b/client/driver/rkt.go @@ -128,6 +128,12 @@ func (d *RktDriver) Validate(config map[string]interface{}) error { return nil } +func (d *RktDriver) Abilities() DriverAbilities { + return DriverAbilities{ + SendSignals: false, + } +} + func (d *RktDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { // Get the current status so that we can log any debug messages only if the // state changes diff --git a/client/fingerprint/fingerprint.go b/client/fingerprint/fingerprint.go index 98520b306b3..51ed50145a9 100644 --- a/client/fingerprint/fingerprint.go +++ b/client/fingerprint/fingerprint.go @@ -25,6 +25,7 @@ func init() { builtinFingerprintMap["memory"] = NewMemoryFingerprint builtinFingerprintMap["network"] = NewNetworkFingerprint builtinFingerprintMap["nomad"] = NewNomadFingerprint + builtinFingerprintMap["signal"] = NewSignalFingerprint builtinFingerprintMap["storage"] = NewStorageFingerprint builtinFingerprintMap["vault"] = NewVaultFingerprint diff --git a/client/fingerprint/signal.go b/client/fingerprint/signal.go new file mode 100644 index 00000000000..d20cec07b02 --- /dev/null +++ b/client/fingerprint/signal.go @@ -0,0 +1,33 @@ +package fingerprint + +import ( + "log" + "strings" + + "github.com/hashicorp/consul-template/signals" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/nomad/structs" +) + +// SignalFingerprint is used to fingerprint the available signals +type SignalFingerprint struct { + StaticFingerprinter + logger *log.Logger +} + +// NewSignalFingerprint is used to create a Signal fingerprint +func NewSignalFingerprint(logger *log.Logger) Fingerprint { + f := &SignalFingerprint{logger: logger} + return f +} + +func (f *SignalFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + // Build the list of available signals + sigs := make([]string, 0, len(signals.SignalLookup)) + for signal := range signals.SignalLookup { + sigs = append(sigs, signal) + } + + node.Attributes["os.signals"] = strings.Join(sigs, ",") + return true, nil +} diff --git a/client/fingerprint/signal_test.go b/client/fingerprint/signal_test.go new file mode 100644 index 00000000000..2157cf0c50e --- /dev/null +++ b/client/fingerprint/signal_test.go @@ -0,0 +1,17 @@ +package fingerprint + +import ( + "testing" + + "github.com/hashicorp/nomad/nomad/structs" +) + +func TestSignalFingerprint(t *testing.T) { + fp := NewSignalFingerprint(testLogger()) + node := &structs.Node{ + Attributes: make(map[string]string), + } + + assertFingerprintOK(t, fp, node) + assertNodeAttributeContains(t, node, "os.signals") +} From 1570b3b5fa04e1ba8c4073a76312440e40d263d2 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Thu, 20 Oct 2016 13:55:35 -0700 Subject: [PATCH 3/4] Add implicit signal constraint and validate that a driver can handle the signal. Also fixes a bug with plan and implicit constraints by adding them to the job being planned --- nomad/job_endpoint.go | 120 ++++++++++++++++++++++----- nomad/job_endpoint_test.go | 148 ++++++++++++++++++++++++++++++++++ nomad/structs/funcs.go | 16 ++++ nomad/structs/funcs_test.go | 15 ++++ nomad/structs/structs.go | 50 ++++++++++++ nomad/structs/structs_test.go | 87 ++++++++++++++++++++ 6 files changed, 414 insertions(+), 22 deletions(-) diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index fd27eb606ca..ff70661bd2a 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -52,6 +52,9 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis // Initialize the job fields (sets defaults and any necessary init work). args.Job.Canonicalize() + // Add implicit constraints + setImplicitConstraints(args.Job) + // Validate the job. if err := validateJob(args.Job); err != nil { return err @@ -115,28 +118,6 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis } } } - - // Add implicit constraints that the task groups are run on a Node with - // Vault - for _, tg := range args.Job.TaskGroups { - _, ok := policies[tg.Name] - if !ok { - // Not requesting Vault - continue - } - - found := false - for _, c := range tg.Constraints { - if c.Equal(vaultConstraint) { - found = true - break - } - } - - if !found { - tg.Constraints = append(tg.Constraints, vaultConstraint) - } - } } // Clear the Vault token @@ -188,6 +169,77 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis return nil } +// setImplicitConstraints adds implicit constraints to the job based on the +// features it is requesting. +func setImplicitConstraints(j *structs.Job) { + // Get the required Vault Policies + policies := j.VaultPolicies() + + // Get the required signals + signals := j.RequiredSignals() + + // Hot path + if len(signals) == 0 && len(policies) == 0 { + return + } + + // Add Vault constraints + for _, tg := range j.TaskGroups { + _, ok := policies[tg.Name] + if !ok { + // Not requesting Vault + continue + } + + found := false + for _, c := range tg.Constraints { + if c.Equal(vaultConstraint) { + found = true + break + } + } + + if !found { + tg.Constraints = append(tg.Constraints, vaultConstraint) + } + } + + // Add signal constraints + for _, tg := range j.TaskGroups { + tgSignals, ok := signals[tg.Name] + if !ok { + // Not requesting Vault + continue + } + + // Flatten the signals + required := structs.MapStringStringSliceValueSet(tgSignals) + sigConstraint := getSignalConstraint(required) + + found := false + for _, c := range tg.Constraints { + if c.Equal(sigConstraint) { + found = true + break + } + } + + if !found { + tg.Constraints = append(tg.Constraints, sigConstraint) + } + } +} + +// getSignalConstraint builds a suitable constraint based on the required +// signals +func getSignalConstraint(signals []string) *structs.Constraint { + return &structs.Constraint{ + Operand: structs.ConstraintSetContains, + LTarget: "${attr.os.signals}", + RTarget: strings.Join(signals, ","), + } +} + // Summary retreives the summary of a job func (j *Job) Summary(args *structs.JobSummaryRequest, reply *structs.JobSummaryResponse) error { @@ -556,6 +608,9 @@ func (j *Job) Plan(args *structs.JobPlanRequest, reply *structs.JobPlanResponse) // Initialize the job fields (sets defaults and any necessary init work). args.Job.Canonicalize() + // Add implicit constraints + setImplicitConstraints(args.Job) + // Validate the job. if err := validateJob(args.Job); err != nil { return err @@ -656,8 +711,14 @@ func validateJob(job *structs.Job) error { multierror.Append(validationErrors, err) } + // Get the signals required + signals := job.RequiredSignals() + // Validate the driver configurations. for _, tg := range job.TaskGroups { + // Get the signals for the task group + tgSignals, tgOk := signals[tg.Name] + for _, task := range tg.Tasks { d, err := driver.NewDriver( task.Driver, @@ -673,6 +734,21 @@ func validateJob(job *structs.Job) error { formatted := fmt.Errorf("group %q -> task %q -> config: %v", tg.Name, task.Name, err) multierror.Append(validationErrors, formatted) } + + // The task group didn't have any task that required signals + if !tgOk { + continue + } + + // This task requires signals. Ensure the driver is capable + if required, ok := tgSignals[task.Name]; ok { + abilities := d.Abilities() + if !abilities.SendSignals { + formatted := fmt.Errorf("group %q -> task %q: driver %q doesn't support sending signals. Requested signals are %v", + tg.Name, task.Name, task.Driver, strings.Join(required, ", ")) + multierror.Append(validationErrors, formatted) + } + } } } diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 856283a7cb9..ad6799fd6b7 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -1524,3 +1524,151 @@ func TestJobEndpoint_Plan_NoDiff(t *testing.T) { t.Fatalf("no failed task group alloc metrics") } } + +func TestJobEndpoint_ImplicitConstraints_Vault(t *testing.T) { + s1 := testServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer s1.Shutdown() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + // Enable vault + tr, f := true, false + s1.config.VaultConfig.Enabled = &tr + s1.config.VaultConfig.AllowUnauthenticated = &f + + // Replace the Vault Client on the server + tvc := &TestVaultClient{} + s1.vault = tvc + + policy := "foo" + goodToken := structs.GenerateUUID() + goodPolicies := []string{"foo", "bar", "baz"} + tvc.SetLookupTokenAllowedPolicies(goodToken, goodPolicies) + + // Create the register request with a job asking for a vault policy + job := mock.Job() + job.VaultToken = goodToken + job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{ + Policies: []string{policy}, + ChangeMode: structs.VaultChangeModeRestart, + } + req := &structs.JobRegisterRequest{ + Job: job, + WriteRequest: structs.WriteRequest{Region: "global"}, + } + + // Fetch the response + var resp structs.JobRegisterResponse + if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil { + t.Fatalf("bad: %v", err) + } + + // Check for the job in the FSM + state := s1.fsm.State() + out, err := state.JobByID(job.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("expected job") + } + if out.CreateIndex != resp.JobModifyIndex { + t.Fatalf("index mis-match") + } + + // Check that there is an implicit vault constraint + constraints := out.TaskGroups[0].Constraints + if len(constraints) != 1 { + t.Fatalf("Expected an implicit constraint") + } + + if !constraints[0].Equal(vaultConstraint) { + t.Fatalf("Expected implicit vault constraint") + } +} + +func TestJobEndpoint_ImplicitConstraints_Signals(t *testing.T) { + s1 := testServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer s1.Shutdown() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + // Create the register request with a job asking for a template that sends a + // signal + job := mock.Job() + signal := "SIGUSR1" + job.TaskGroups[0].Tasks[0].Templates = []*structs.Template{ + &structs.Template{ + SourcePath: "foo", + DestPath: "bar", + ChangeMode: structs.TemplateChangeModeSignal, + ChangeSignal: signal, + }, + } + req := &structs.JobRegisterRequest{ + Job: job, + WriteRequest: structs.WriteRequest{Region: "global"}, + } + + // Fetch the response + var resp structs.JobRegisterResponse + if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil { + t.Fatalf("bad: %v", err) + } + + // Check for the job in the FSM + state := s1.fsm.State() + out, err := state.JobByID(job.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("expected job") + } + if out.CreateIndex != resp.JobModifyIndex { + t.Fatalf("index mis-match") + } + + // Check that there is an implicit signal constraint + constraints := out.TaskGroups[0].Constraints + if len(constraints) != 1 { + t.Fatalf("Expected an implicit constraint") + } + + sigConstraint := getSignalConstraint([]string{signal}) + + if !constraints[0].Equal(sigConstraint) { + t.Fatalf("Expected implicit vault constraint") + } +} + +func TestJobEndpoint_ValidateJob_InvalidDriverConf(t *testing.T) { + // Create a mock job with an invalid config + job := mock.Job() + job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{ + "foo": "bar", + } + + if err := validateJob(job); err == nil || !strings.Contains(err.Error(), "-> config") { + t.Fatalf("Expected config error; got %v", err) + } +} + +func TestJobEndpoint_ValidateJob_InvalidSignals(t *testing.T) { + // Create a mock job that wants to send a signal to a driver that can't + job := mock.Job() + job.TaskGroups[0].Tasks[0].Driver = "qemu" + job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{ + Policies: []string{"foo"}, + ChangeMode: structs.VaultChangeModeSignal, + ChangeSignal: "SIGUSR1", + } + + if err := validateJob(job); err == nil || !strings.Contains(err.Error(), "support sending signals") { + t.Fatalf("Expected signal feasibility error; got %v", err) + } +} diff --git a/nomad/structs/funcs.go b/nomad/structs/funcs.go index c4aaef7f512..104bb58b477 100644 --- a/nomad/structs/funcs.go +++ b/nomad/structs/funcs.go @@ -288,3 +288,19 @@ func VaultPoliciesSet(policies map[string]map[string]*Vault) []string { } return flattened } + +// MapStringStringSliceValueSet returns the set of values in a map[string][]string +func MapStringStringSliceValueSet(m map[string][]string) []string { + set := make(map[string]struct{}) + for _, slice := range m { + for _, v := range slice { + set[v] = struct{}{} + } + } + + flat := make([]string, 0, len(set)) + for k := range set { + flat = append(flat, k) + } + return flat +} diff --git a/nomad/structs/funcs_test.go b/nomad/structs/funcs_test.go index a7f54937d49..059314a72b7 100644 --- a/nomad/structs/funcs_test.go +++ b/nomad/structs/funcs_test.go @@ -2,6 +2,7 @@ package structs import ( "fmt" + "reflect" "regexp" "testing" ) @@ -284,3 +285,17 @@ func TestSliceStringIsSubset(t *testing.T) { t.Fatalf("bad %v %v", sub, offending) } } + +func TestMapStringStringSliceValueSet(t *testing.T) { + m := map[string][]string{ + "foo": []string{"1", "2"}, + "bar": []string{"3"}, + "baz": nil, + } + + act := MapStringStringSliceValueSet(m) + exp := []string{"1", "2", "3"} + if !reflect.DeepEqual(act, exp) { + t.Fatalf("Bad") + } +} diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 7459b88fd8d..a034c60570b 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -14,6 +14,7 @@ import ( "path/filepath" "reflect" "regexp" + "sort" "strconv" "strings" "time" @@ -1300,6 +1301,55 @@ func (j *Job) VaultPolicies() map[string]map[string]*Vault { return policies } +// RequiredSignals returns a mapping of task groups to tasks to their required +// set of signals +func (j *Job) RequiredSignals() map[string]map[string][]string { + signals := make(map[string]map[string][]string) + + for _, tg := range j.TaskGroups { + for _, task := range tg.Tasks { + // Use this local one as a set + taskSignals := make(map[string]struct{}) + + // Check if the Vault change mode uses signals + if task.Vault != nil && task.Vault.ChangeMode == VaultChangeModeSignal { + taskSignals[task.Vault.ChangeSignal] = struct{}{} + } + + // Check if any template change mode uses signals + for _, t := range task.Templates { + if t.ChangeMode != TemplateChangeModeSignal { + continue + } + + taskSignals[t.ChangeSignal] = struct{}{} + } + + // Flatten and sort the signals + l := len(taskSignals) + if l == 0 { + continue + } + + flat := make([]string, 0, l) + for sig := range taskSignals { + flat = append(flat, sig) + } + + sort.Strings(flat) + tgSignals, ok := signals[tg.Name] + if !ok { + tgSignals = make(map[string][]string) + signals[tg.Name] = tgSignals + } + tgSignals[task.Name] = flat + } + + } + + return signals +} + // JobListStub is used to return a subset of job information // for the job list type JobListStub struct { diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index 7ce4cfc86c3..58519969e84 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -305,6 +305,93 @@ func TestJob_VaultPolicies(t *testing.T) { } } +func TestJob_RequiredSignals(t *testing.T) { + j0 := &Job{} + e0 := make(map[string]map[string][]string, 0) + + vj1 := &Vault{ + Policies: []string{"p1"}, + ChangeMode: VaultChangeModeNoop, + } + vj2 := &Vault{ + Policies: []string{"p1"}, + ChangeMode: VaultChangeModeSignal, + ChangeSignal: "SIGUSR1", + } + tj1 := &Template{ + SourcePath: "foo", + DestPath: "bar", + ChangeMode: TemplateChangeModeNoop, + } + tj2 := &Template{ + SourcePath: "foo", + DestPath: "bar", + ChangeMode: TemplateChangeModeSignal, + ChangeSignal: "SIGUSR2", + } + j1 := &Job{ + TaskGroups: []*TaskGroup{ + &TaskGroup{ + Name: "foo", + Tasks: []*Task{ + &Task{ + Name: "t1", + }, + &Task{ + Name: "t2", + Vault: vj2, + Templates: []*Template{tj2}, + }, + }, + }, + &TaskGroup{ + Name: "bar", + Tasks: []*Task{ + &Task{ + Name: "t3", + Vault: vj1, + Templates: []*Template{tj1}, + }, + &Task{ + Name: "t4", + Vault: vj2, + }, + }, + }, + }, + } + + e1 := map[string]map[string][]string{ + "foo": map[string][]string{ + "t2": []string{"SIGUSR1", "SIGUSR2"}, + }, + "bar": map[string][]string{ + "t4": []string{"SIGUSR1"}, + }, + } + + cases := []struct { + Job *Job + Expected map[string]map[string][]string + }{ + { + Job: j0, + Expected: e0, + }, + { + Job: j1, + Expected: e1, + }, + } + + for i, c := range cases { + got := c.Job.RequiredSignals() + if !reflect.DeepEqual(got, c.Expected) { + t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) + } + } +} + func TestTaskGroup_Validate(t *testing.T) { tg := &TaskGroup{ Count: -1, From b580a6a72830076709e6ab4b90a544ba9832b609 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Tue, 25 Oct 2016 11:09:22 -0700 Subject: [PATCH 4/4] Upper case signals --- nomad/structs/structs.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index a034c60570b..37855765148 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -2091,6 +2091,14 @@ func (t *Task) Canonicalize(job *Job, tg *TaskGroup) { if t.KillTimeout == 0 { t.KillTimeout = DefaultKillTimeout } + + if t.Vault != nil { + t.Vault.Canonicalize() + } + + for _, template := range t.Templates { + template.Canonicalize() + } } func (t *Task) GoString() string { @@ -2315,6 +2323,12 @@ func (t *Template) Copy() *Template { return copy } +func (t *Template) Canonicalize() { + if t.ChangeSignal != "" { + t.ChangeSignal = strings.ToUpper(t.ChangeSignal) + } +} + func (t *Template) Validate() error { var mErr multierror.Error @@ -2916,6 +2930,12 @@ func (v *Vault) Copy() *Vault { return nv } +func (v *Vault) Canonicalize() { + if v.ChangeSignal != "" { + v.ChangeSignal = strings.ToUpper(v.ChangeSignal) + } +} + // Validate returns if the Vault block is valid. func (v *Vault) Validate() error { if v == nil {