diff --git a/client/consul_template.go b/client/consul_template.go new file mode 100644 index 00000000000..0103a5055ae --- /dev/null +++ b/client/consul_template.go @@ -0,0 +1,446 @@ +package client + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + ctconf "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/consul-template/manager" + "github.com/hashicorp/consul-template/signals" + "github.com/hashicorp/consul-template/watch" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/client/driver/env" + "github.com/hashicorp/nomad/nomad/structs" +) + +var ( + // testRetryRate is used to speed up tests by setting consul-templates retry + // rate to something low + testRetryRate time.Duration = 0 +) + +// TaskHooks is an interface which provides hooks into the tasks life-cycle +type TaskHooks interface { + // Restart is used to restart the task + Restart() + + // Signal is used to signal the task + Signal(os.Signal) + + // UnblockStart is used to unblock the starting of the task. This should be + // called after prestart work is completed + UnblockStart() + + // Kill is used to kill the task because of the passed error. + Kill(error) +} + +// TaskTemplateManager is used to run a set of templates for a given task +type TaskTemplateManager struct { + // templates is the set of templates we are managing + templates []*structs.Template + + // lookup allows looking up the set of Nomad templates by their consul-template ID + lookup map[string][]*structs.Template + + // allRendered marks whether all the templates have been rendered + allRendered bool + + // hooks is used to signal/restart the task as templates are rendered + hook TaskHooks + + // runner is the consul-template runner + runner *manager.Runner + + // signals is a lookup map from the string representation of a signal to its + // actual signal + signals map[string]os.Signal + + // shutdownCh is used to signal and started goroutine to shutdown + shutdownCh chan struct{} + + // shutdown marks whether the manager has been shutdown + shutdown bool + shutdownLock sync.Mutex +} + +func NewTaskTemplateManager(hook TaskHooks, tmpls []*structs.Template, + allRendered bool, config *config.Config, vaultToken, taskDir string, + taskEnv *env.TaskEnvironment) (*TaskTemplateManager, error) { + + // Check pre-conditions + if hook == nil { + return nil, fmt.Errorf("Invalid task hook given") + } else if config == nil { + return nil, fmt.Errorf("Invalid config given") + } else if taskDir == "" { + return nil, fmt.Errorf("Invalid task directory given") + } else if taskEnv == nil { + return nil, fmt.Errorf("Invalid task environment given") + } + + tm := &TaskTemplateManager{ + templates: tmpls, + allRendered: allRendered, + hook: hook, + shutdownCh: make(chan struct{}), + } + + // Parse the signals that we need + for _, tmpl := range tmpls { + if tmpl.ChangeSignal == "" { + continue + } + + sig, err := signals.Parse(tmpl.ChangeSignal) + if err != nil { + return nil, fmt.Errorf("Failed to parse signal %q", tmpl.ChangeSignal) + } + + if tm.signals == nil { + tm.signals = make(map[string]os.Signal) + } + + tm.signals[tmpl.ChangeSignal] = sig + } + + // Build the consul-template runner + runner, lookup, err := templateRunner(tmpls, config, vaultToken, taskDir, taskEnv) + if err != nil { + return nil, err + } + tm.runner = runner + tm.lookup = lookup + + go tm.run() + return tm, nil +} + +// Stop is used to stop the consul-template runner +func (tm *TaskTemplateManager) Stop() { + tm.shutdownLock.Lock() + defer tm.shutdownLock.Unlock() + + if tm.shutdown { + return + } + + close(tm.shutdownCh) + tm.shutdown = true + + // Stop the consul-template runner + if tm.runner != nil { + tm.runner.Stop() + } +} + +// run is the long lived loop that handles errors and templates being rendered +func (tm *TaskTemplateManager) run() { + if tm.runner == nil { + return + } + + // Start the runner + go tm.runner.Start() + + // Track when they have all been rendered so we don't signal the task for + // any render event before hand + var allRenderedTime time.Time + + // Handle the first rendering + if !tm.allRendered { + // Wait till all the templates have been rendered + WAIT: + for { + select { + case <-tm.shutdownCh: + return + case err, ok := <-tm.runner.ErrCh: + if !ok { + continue + } + + tm.hook.Kill(err) + case <-tm.runner.TemplateRenderedCh(): + // A template has been rendered, figure out what to do + events := tm.runner.RenderEvents() + + // Not all templates have been rendered yet + if len(events) < len(tm.lookup) { + continue + } + + for _, event := range events { + // This template hasn't been rendered + if event.LastDidRender.IsZero() { + continue WAIT + } + } + + break WAIT + } + } + + allRenderedTime = time.Now() + tm.hook.UnblockStart() + } + + // If all our templates are change mode no-op, then we can exit here + if tm.allTemplatesNoop() { + return + } + + // A lookup for the last time the template was handled + numTemplates := len(tm.templates) + handledRenders := make(map[string]time.Time, numTemplates) + + for { + select { + case <-tm.shutdownCh: + return + case err, ok := <-tm.runner.ErrCh: + if !ok { + continue + } + + tm.hook.Kill(err) + case <-tm.runner.TemplateRenderedCh(): + // A template has been rendered, figure out what to do + var handling []string + signals := make(map[string]struct{}) + restart := false + var splay time.Duration + + now := time.Now() + for id, event := range tm.runner.RenderEvents() { + + // First time through + if allRenderedTime.After(event.LastDidRender) { + handledRenders[id] = now + continue + } + + // We have already handled this one + if htime := handledRenders[id]; htime.After(event.LastDidRender) { + continue + } + + // Lookup the template and determine what to do + tmpls, ok := tm.lookup[id] + if !ok { + tm.hook.Kill(fmt.Errorf("consul-template runner returned unknown template id %q", id)) + return + } + + for _, tmpl := range tmpls { + switch tmpl.ChangeMode { + case structs.TemplateChangeModeSignal: + signals[tmpl.ChangeSignal] = struct{}{} + case structs.TemplateChangeModeRestart: + restart = true + case structs.TemplateChangeModeNoop: + continue + } + + if tmpl.Splay > splay { + splay = tmpl.Splay + } + } + + handling = append(handling, id) + } + + if restart || len(signals) != 0 { + if splay != 0 { + select { + case <-time.After(time.Duration(splay)): + case <-tm.shutdownCh: + return + } + } + + // Update handle time + now = time.Now() + for _, id := range handling { + handledRenders[id] = now + } + + if restart { + tm.hook.Restart() + } else if len(signals) != 0 { + for signal := range signals { + tm.hook.Signal(tm.signals[signal]) + } + } + } + } + } +} + +// allTemplatesNoop returns whether all the managed templates have change mode noop. +func (tm *TaskTemplateManager) allTemplatesNoop() bool { + for _, tmpl := range tm.templates { + if tmpl.ChangeMode != structs.TemplateChangeModeNoop { + return false + } + } + + return true +} + +// templateRunner returns a consul-template runner for the given templates and a +// lookup by destination to the template. If no templates are given, a nil +// template runner and lookup is returned. +func templateRunner(tmpls []*structs.Template, config *config.Config, + vaultToken, taskDir string, taskEnv *env.TaskEnvironment) ( + *manager.Runner, map[string][]*structs.Template, error) { + + if len(tmpls) == 0 { + return nil, nil, nil + } + + runnerConfig, err := runnerConfig(config, vaultToken) + if err != nil { + return nil, nil, err + } + + // Parse the templates + ctmplMapping := parseTemplateConfigs(tmpls, taskDir, taskEnv) + + // Set the config + flat := make([]*ctconf.ConfigTemplate, 0, len(ctmplMapping)) + for ctmpl := range ctmplMapping { + local := ctmpl + flat = append(flat, &local) + } + runnerConfig.ConfigTemplates = flat + + runner, err := manager.NewRunner(runnerConfig, false, false) + if err != nil { + return nil, nil, err + } + + // Build the lookup + idMap := runner.ConfigTemplateMapping() + lookup := make(map[string][]*structs.Template, len(idMap)) + for id, ctmpls := range idMap { + for _, ctmpl := range ctmpls { + templates := lookup[id] + templates = append(templates, ctmplMapping[ctmpl]) + lookup[id] = templates + } + } + + return runner, lookup, nil +} + +// parseTemplateConfigs converts the tasks templates into consul-templates +func parseTemplateConfigs(tmpls []*structs.Template, taskDir string, taskEnv *env.TaskEnvironment) map[ctconf.ConfigTemplate]*structs.Template { + // Build the task environment + // TODO Should be able to inject the Nomad env vars into Consul-template for + // rendering + taskEnv.Build() + + ctmpls := make(map[ctconf.ConfigTemplate]*structs.Template, len(tmpls)) + for _, tmpl := range tmpls { + var src, dest string + if tmpl.SourcePath != "" { + src = filepath.Join(taskDir, taskEnv.ReplaceEnv(tmpl.SourcePath)) + } + if tmpl.DestPath != "" { + dest = filepath.Join(taskDir, taskEnv.ReplaceEnv(tmpl.DestPath)) + } + + ct := ctconf.ConfigTemplate{ + Source: src, + Destination: dest, + EmbeddedTemplate: tmpl.EmbeddedTmpl, + Perms: ctconf.DefaultFilePerms, + Wait: &watch.Wait{}, + } + + ctmpls[ct] = tmpl + } + + return ctmpls +} + +// runnerConfig returns a consul-template runner configuration, setting the +// Vault and Consul configurations based on the clients configs. +func runnerConfig(config *config.Config, vaultToken string) (*ctconf.Config, error) { + conf := &ctconf.Config{} + + set := func(keys []string) { + for _, k := range keys { + conf.Set(k) + } + } + + if testRetryRate != 0 { + conf.Retry = testRetryRate + conf.Set("retry") + } + + // Setup the Consul config + if config.ConsulConfig != nil { + conf.Consul = config.ConsulConfig.Addr + conf.Token = config.ConsulConfig.Token + set([]string{"consul", "token"}) + + if config.ConsulConfig.EnableSSL { + conf.SSL = &ctconf.SSLConfig{ + Enabled: true, + Verify: config.ConsulConfig.VerifySSL, + Cert: config.ConsulConfig.CertFile, + Key: config.ConsulConfig.KeyFile, + CaCert: config.ConsulConfig.CAFile, + } + set([]string{"ssl", "ssl.enabled", "ssl.verify", "ssl.cert", "ssl.key", "ssl.ca_cert"}) + } + + if config.ConsulConfig.Auth != "" { + parts := strings.SplitN(config.ConsulConfig.Auth, ":", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("Failed to parse Consul Auth config") + } + + conf.Auth = &ctconf.AuthConfig{ + Enabled: true, + Username: parts[0], + Password: parts[1], + } + + set([]string{"auth", "auth.username", "auth.password", "auth.enabled"}) + } + } + + // Setup the Vault config + if config.VaultConfig != nil && config.VaultConfig.Enabled { + conf.Vault = &ctconf.VaultConfig{ + Address: config.VaultConfig.Addr, + Token: vaultToken, + RenewToken: false, + } + set([]string{"vault", "vault.address", "vault.token", "vault.renew_token"}) + + if strings.HasPrefix(config.VaultConfig.Addr, "https") || config.VaultConfig.TLSCertFile != "" { + conf.Vault.SSL = &ctconf.SSLConfig{ + Enabled: true, + Verify: !config.VaultConfig.TLSSkipVerify, + Cert: config.VaultConfig.TLSCertFile, + Key: config.VaultConfig.TLSKeyFile, + CaCert: config.VaultConfig.TLSCaFile, + // TODO need to add this to consul-template: CaPath: config.VaultConfig.TLSCaPath, + } + + set([]string{"vault.ssl", "vault.ssl.enabled", "vault.ssl.verify", + "vault.ssl.cert", "vault.ssl.key", "vault.ssl.ca_cert"}) + } + } + + return conf, nil +} diff --git a/client/consul_template_test.go b/client/consul_template_test.go new file mode 100644 index 00000000000..08f73c1e161 --- /dev/null +++ b/client/consul_template_test.go @@ -0,0 +1,699 @@ +package client + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + "time" + + ctestutil "github.com/hashicorp/consul/testutil" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/client/driver/env" + "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" + sconfig "github.com/hashicorp/nomad/nomad/structs/config" + "github.com/hashicorp/nomad/testutil" +) + +// MockTaskHooks is a mock of the TaskHooks interface useful for testing +type MockTaskHooks struct { + Restarts int + RestartCh chan struct{} + + Signals []os.Signal + SignalCh chan struct{} + + UnblockCh chan struct{} + Unblocked bool + + KillError error +} + +func NewMockTaskHooks() *MockTaskHooks { + return &MockTaskHooks{ + UnblockCh: make(chan struct{}, 1), + RestartCh: make(chan struct{}, 1), + SignalCh: make(chan struct{}, 1), + } +} +func (m *MockTaskHooks) Restart() { + m.Restarts++ + select { + case m.RestartCh <- struct{}{}: + default: + } +} + +func (m *MockTaskHooks) Signal(s os.Signal) { + m.Signals = append(m.Signals, s) + select { + case m.SignalCh <- struct{}{}: + default: + } +} + +func (m *MockTaskHooks) Kill(e error) { m.KillError = e } +func (m *MockTaskHooks) UnblockStart() { + if !m.Unblocked { + close(m.UnblockCh) + } + + m.Unblocked = true +} + +// testHarness is used to test the TaskTemplateManager by spinning up +// Consul/Vault as needed +type testHarness struct { + manager *TaskTemplateManager + mockHooks *MockTaskHooks + templates []*structs.Template + taskEnv *env.TaskEnvironment + node *structs.Node + config *config.Config + vaultToken string + taskDir string + vault *testutil.TestVault + consul *ctestutil.TestServer +} + +// newTestHarness returns a harness starting a dev consul and vault server, +// building the appropriate config and creating a TaskTemplateManager +func newTestHarness(t *testing.T, templates []*structs.Template, allRendered, consul, vault bool) *testHarness { + harness := &testHarness{ + mockHooks: NewMockTaskHooks(), + templates: templates, + node: mock.Node(), + config: &config.Config{}, + } + + // Build the task environment + harness.taskEnv = env.NewTaskEnvironment(harness.node) + + // Make a tempdir + d, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Failed to make tmpdir: %v", err) + } + harness.taskDir = d + + if consul { + harness.consul = ctestutil.NewTestServer(t) + harness.config.ConsulConfig = &sconfig.ConsulConfig{ + Addr: harness.consul.HTTPAddr, + } + } + + if vault { + harness.vault = testutil.NewTestVault(t).Start() + harness.config.VaultConfig = harness.vault.Config + harness.vaultToken = harness.vault.RootToken + } + + manager, err := NewTaskTemplateManager(harness.mockHooks, templates, allRendered, + harness.config, harness.vaultToken, harness.taskDir, harness.taskEnv) + if err != nil { + t.Fatalf("failed to build task template manager: %v", err) + } + + harness.manager = manager + return harness +} + +// stop is used to stop any running Vault or Consul server plus the task manager +func (h *testHarness) stop() { + if h.vault != nil { + h.vault.Stop() + } + if h.consul != nil { + h.consul.Stop() + } + if h.manager != nil { + h.manager.Stop() + } + if h.taskDir != "" { + os.RemoveAll(h.taskDir) + } +} + +func TestTaskTemplateManager_Invalid(t *testing.T) { + hooks := NewMockTaskHooks() + var tmpls []*structs.Template + config := &config.Config{} + taskDir := "foo" + vaultToken := "" + taskEnv := env.NewTaskEnvironment(mock.Node()) + + _, err := NewTaskTemplateManager(nil, nil, false, nil, "", "", nil) + if err == nil { + t.Fatalf("Expected error") + } + + _, err = NewTaskTemplateManager(nil, tmpls, false, config, vaultToken, taskDir, taskEnv) + if err == nil || !strings.Contains(err.Error(), "task hook") { + t.Fatalf("Expected invalid task hook error: %v", err) + } + + _, err = NewTaskTemplateManager(hooks, tmpls, false, nil, vaultToken, taskDir, taskEnv) + if err == nil || !strings.Contains(err.Error(), "config") { + t.Fatalf("Expected invalid config error: %v", err) + } + + _, err = NewTaskTemplateManager(hooks, tmpls, false, config, vaultToken, "", taskEnv) + if err == nil || !strings.Contains(err.Error(), "task directory") { + t.Fatalf("Expected invalid task dir error: %v", err) + } + + _, err = NewTaskTemplateManager(hooks, tmpls, false, config, vaultToken, taskDir, nil) + if err == nil || !strings.Contains(err.Error(), "task environment") { + t.Fatalf("Expected invalid task environment error: %v", err) + } + + tm, err := NewTaskTemplateManager(hooks, tmpls, false, config, vaultToken, taskDir, taskEnv) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } else if tm == nil { + t.Fatalf("Bad %v", tm) + } + + // Build a template with a bad signal + tmpl := &structs.Template{ + DestPath: "foo", + EmbeddedTmpl: "hello, world", + ChangeMode: structs.TemplateChangeModeSignal, + ChangeSignal: "foobarbaz", + } + + tmpls = append(tmpls, tmpl) + tm, err = NewTaskTemplateManager(hooks, tmpls, false, config, vaultToken, taskDir, taskEnv) + if err == nil || !strings.Contains(err.Error(), "Failed to parse signal") { + t.Fatalf("Expected signal parsing error: %v", err) + } +} + +func TestTaskTemplateManager_Unblock_Static(t *testing.T) { + // Make a template that will render immediately + content := "hello, world!" + file := "my.tmpl" + template := &structs.Template{ + EmbeddedTmpl: content, + DestPath: file, + ChangeMode: structs.TemplateChangeModeNoop, + } + + harness := newTestHarness(t, []*structs.Template{template}, false, false, false) + defer harness.stop() + + // Wait for the unblock + select { + case <-harness.mockHooks.UnblockCh: + case <-time.After(time.Duration(5*testutil.TestMultiplier()) * time.Second): + t.Fatalf("Task unblock should have been called") + } + + // Check the file is there + path := filepath.Join(harness.taskDir, file) + raw, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("Failed to read rendered template from %q: %v", path, err) + } + + if s := string(raw); s != content { + t.Fatalf("Unexpected template data; got %q, want %q", s, content) + } +} + +func TestTaskTemplateManager_Unblock_Consul(t *testing.T) { + // Make a template that will render based on a key in Consul + key := "foo" + content := "barbaz" + embedded := fmt.Sprintf(`{{key "%s"}}`, key) + file := "my.tmpl" + template := &structs.Template{ + EmbeddedTmpl: embedded, + DestPath: file, + ChangeMode: structs.TemplateChangeModeNoop, + } + + // Drop the retry rate + testRetryRate = 10 * time.Millisecond + + harness := newTestHarness(t, []*structs.Template{template}, false, true, false) + defer harness.stop() + + // Ensure no unblock + select { + case <-harness.mockHooks.UnblockCh: + t.Fatalf("Task unblock should have not have been called") + case <-time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second): + } + + // Write the key to Consul + harness.consul.SetKV(key, []byte(content)) + + // Wait for the unblock + select { + case <-harness.mockHooks.UnblockCh: + case <-time.After(time.Duration(5*testutil.TestMultiplier()) * time.Second): + t.Fatalf("Task unblock should have been called") + } + + // Check the file is there + path := filepath.Join(harness.taskDir, file) + raw, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("Failed to read rendered template from %q: %v", path, err) + } + + if s := string(raw); s != content { + t.Fatalf("Unexpected template data; got %q, want %q", s, content) + } +} + +func TestTaskTemplateManager_Unblock_Vault(t *testing.T) { + // Make a template that will render based on a key in Vault + vaultPath := "secret/password" + key := "password" + content := "barbaz" + embedded := fmt.Sprintf(`{{with secret "%s"}}{{.Data.%s}}{{end}}`, vaultPath, key) + file := "my.tmpl" + template := &structs.Template{ + EmbeddedTmpl: embedded, + DestPath: file, + ChangeMode: structs.TemplateChangeModeNoop, + } + + // Drop the retry rate + testRetryRate = 10 * time.Millisecond + + harness := newTestHarness(t, []*structs.Template{template}, false, false, true) + defer harness.stop() + + // Ensure no unblock + select { + case <-harness.mockHooks.UnblockCh: + t.Fatalf("Task unblock should not have been called") + case <-time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second): + } + + // Write the secret to Vault + logical := harness.vault.Client.Logical() + logical.Write(vaultPath, map[string]interface{}{key: content}) + + // Wait for the unblock + select { + case <-harness.mockHooks.UnblockCh: + case <-time.After(time.Duration(5*testutil.TestMultiplier()) * time.Second): + t.Fatalf("Task unblock should have been called") + } + + // Check the file is there + path := filepath.Join(harness.taskDir, file) + raw, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("Failed to read rendered template from %q: %v", path, err) + } + + if s := string(raw); s != content { + t.Fatalf("Unexpected template data; got %q, want %q", s, content) + } +} + +func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) { + // Make a template that will render immediately + staticContent := "hello, world!" + staticFile := "my.tmpl" + template := &structs.Template{ + EmbeddedTmpl: staticContent, + DestPath: staticFile, + ChangeMode: structs.TemplateChangeModeNoop, + } + + // Make a template that will render based on a key in Consul + consulKey := "foo" + consulContent := "barbaz" + consulEmbedded := fmt.Sprintf(`{{key "%s"}}`, consulKey) + consulFile := "consul.tmpl" + template2 := &structs.Template{ + EmbeddedTmpl: consulEmbedded, + DestPath: consulFile, + ChangeMode: structs.TemplateChangeModeNoop, + } + + // Drop the retry rate + testRetryRate = 10 * time.Millisecond + + harness := newTestHarness(t, []*structs.Template{template, template2}, false, true, false) + defer harness.stop() + + // Ensure no unblock + select { + case <-harness.mockHooks.UnblockCh: + t.Fatalf("Task unblock should have not have been called") + case <-time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second): + } + + // Check that the static file has been rendered + path := filepath.Join(harness.taskDir, staticFile) + raw, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("Failed to read rendered template from %q: %v", path, err) + } + + if s := string(raw); s != staticContent { + t.Fatalf("Unexpected template data; got %q, want %q", s, staticContent) + } + + // Write the key to Consul + harness.consul.SetKV(consulKey, []byte(consulContent)) + + // Wait for the unblock + select { + case <-harness.mockHooks.UnblockCh: + case <-time.After(time.Duration(5*testutil.TestMultiplier()) * time.Second): + t.Fatalf("Task unblock should have been called") + } + + // Check the consul file is there + path = filepath.Join(harness.taskDir, consulFile) + raw, err = ioutil.ReadFile(path) + if err != nil { + t.Fatalf("Failed to read rendered template from %q: %v", path, err) + } + + if s := string(raw); s != consulContent { + t.Fatalf("Unexpected template data; got %q, want %q", s, consulContent) + } +} + +func TestTaskTemplateManager_Rerender_Noop(t *testing.T) { + // Make a template that will render based on a key in Consul + key := "foo" + content1 := "bar" + content2 := "baz" + embedded := fmt.Sprintf(`{{key "%s"}}`, key) + file := "my.tmpl" + template := &structs.Template{ + EmbeddedTmpl: embedded, + DestPath: file, + ChangeMode: structs.TemplateChangeModeNoop, + } + + // Drop the retry rate + testRetryRate = 10 * time.Millisecond + + harness := newTestHarness(t, []*structs.Template{template}, false, true, false) + defer harness.stop() + + // Ensure no unblock + select { + case <-harness.mockHooks.UnblockCh: + t.Fatalf("Task unblock should have not have been called") + case <-time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second): + } + + // Write the key to Consul + harness.consul.SetKV(key, []byte(content1)) + + // Wait for the unblock + select { + case <-harness.mockHooks.UnblockCh: + case <-time.After(time.Duration(5*testutil.TestMultiplier()) * time.Second): + t.Fatalf("Task unblock should have been called") + } + + // Check the file is there + path := filepath.Join(harness.taskDir, file) + raw, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("Failed to read rendered template from %q: %v", path, err) + } + + if s := string(raw); s != content1 { + t.Fatalf("Unexpected template data; got %q, want %q", s, content1) + } + + // Update the key in Consul + harness.consul.SetKV(key, []byte(content2)) + + select { + case <-harness.mockHooks.RestartCh: + t.Fatalf("Noop ignored: %+v", harness.mockHooks) + case <-harness.mockHooks.SignalCh: + t.Fatalf("Noop ignored: %+v", harness.mockHooks) + case <-time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second): + } + + // Check the file has been updated + path = filepath.Join(harness.taskDir, file) + raw, err = ioutil.ReadFile(path) + if err != nil { + t.Fatalf("Failed to read rendered template from %q: %v", path, err) + } + + if s := string(raw); s != content2 { + t.Fatalf("Unexpected template data; got %q, want %q", s, content2) + } +} + +func TestTaskTemplateManager_Rerender_Signal(t *testing.T) { + // Make a template that renders based on a key in Consul and sends SIGALRM + key1 := "foo" + content1_1 := "bar" + content1_2 := "baz" + embedded1 := fmt.Sprintf(`{{key "%s"}}`, key1) + file1 := "my.tmpl" + template := &structs.Template{ + EmbeddedTmpl: embedded1, + DestPath: file1, + ChangeMode: structs.TemplateChangeModeSignal, + ChangeSignal: "SIGALRM", + } + + // Make a template that renders based on a key in Consul and sends SIGBUS + key2 := "bam" + content2_1 := "cat" + content2_2 := "dog" + embedded2 := fmt.Sprintf(`{{key "%s"}}`, key2) + file2 := "my-second.tmpl" + template2 := &structs.Template{ + EmbeddedTmpl: embedded2, + DestPath: file2, + ChangeMode: structs.TemplateChangeModeSignal, + ChangeSignal: "SIGBUS", + } + + // Drop the retry rate + testRetryRate = 10 * time.Millisecond + + harness := newTestHarness(t, []*structs.Template{template, template2}, false, true, false) + defer harness.stop() + + // Ensure no unblock + select { + case <-harness.mockHooks.UnblockCh: + t.Fatalf("Task unblock should have not have been called") + case <-time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second): + } + + // Write the key to Consul + harness.consul.SetKV(key1, []byte(content1_1)) + harness.consul.SetKV(key2, []byte(content2_1)) + + // Wait for the unblock + select { + case <-harness.mockHooks.UnblockCh: + case <-time.After(time.Duration(5*testutil.TestMultiplier()) * time.Second): + t.Fatalf("Task unblock should have been called") + } + + // Update the keys in Consul + harness.consul.SetKV(key1, []byte(content1_2)) + harness.consul.SetKV(key2, []byte(content2_2)) + + // Wait for signals + timeout := time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second) +OUTER: + for { + select { + case <-harness.mockHooks.RestartCh: + t.Fatalf("Restart with signal policy: %+v", harness.mockHooks) + case <-harness.mockHooks.SignalCh: + if len(harness.mockHooks.Signals) != 2 { + continue + } + break OUTER + case <-timeout: + t.Fatalf("Should have received two signals: %+v", harness.mockHooks) + } + } + + // Check the files have been updated + path := filepath.Join(harness.taskDir, file1) + raw, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("Failed to read rendered template from %q: %v", path, err) + } + + if s := string(raw); s != content1_2 { + t.Fatalf("Unexpected template data; got %q, want %q", s, content1_2) + } + + path = filepath.Join(harness.taskDir, file2) + raw, err = ioutil.ReadFile(path) + if err != nil { + t.Fatalf("Failed to read rendered template from %q: %v", path, err) + } + + if s := string(raw); s != content2_2 { + t.Fatalf("Unexpected template data; got %q, want %q", s, content2_2) + } +} + +func TestTaskTemplateManager_Rerender_Restart(t *testing.T) { + // Make a template that renders based on a key in Consul and sends restart + key1 := "bam" + content1_1 := "cat" + content1_2 := "dog" + embedded1 := fmt.Sprintf(`{{key "%s"}}`, key1) + file1 := "my.tmpl" + template := &structs.Template{ + EmbeddedTmpl: embedded1, + DestPath: file1, + ChangeMode: structs.TemplateChangeModeRestart, + } + + // Drop the retry rate + testRetryRate = 10 * time.Millisecond + + harness := newTestHarness(t, []*structs.Template{template}, false, true, false) + defer harness.stop() + + // Ensure no unblock + select { + case <-harness.mockHooks.UnblockCh: + t.Fatalf("Task unblock should have not have been called") + case <-time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second): + } + + // Write the key to Consul + harness.consul.SetKV(key1, []byte(content1_1)) + + // Wait for the unblock + select { + case <-harness.mockHooks.UnblockCh: + case <-time.After(time.Duration(5*testutil.TestMultiplier()) * time.Second): + t.Fatalf("Task unblock should have been called") + } + + // Update the keys in Consul + harness.consul.SetKV(key1, []byte(content1_2)) + + // Wait for restart + timeout := time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second) +OUTER: + for { + select { + case <-harness.mockHooks.RestartCh: + break OUTER + case <-harness.mockHooks.SignalCh: + t.Fatalf("Signal with restart policy: %+v", harness.mockHooks) + case <-timeout: + t.Fatalf("Should have received a restart: %+v", harness.mockHooks) + } + } + + // Check the files have been updated + path := filepath.Join(harness.taskDir, file1) + raw, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("Failed to read rendered template from %q: %v", path, err) + } + + if s := string(raw); s != content1_2 { + t.Fatalf("Unexpected template data; got %q, want %q", s, content1_2) + } +} + +func TestTaskTemplateManager_Interpolate_Destination(t *testing.T) { + // Make a template that will have its destination interpolated + content := "hello, world!" + file := "${node.unique.id}.tmpl" + template := &structs.Template{ + EmbeddedTmpl: content, + DestPath: file, + ChangeMode: structs.TemplateChangeModeNoop, + } + + harness := newTestHarness(t, []*structs.Template{template}, false, false, false) + defer harness.stop() + + // Ensure unblock + select { + case <-harness.mockHooks.UnblockCh: + case <-time.After(time.Duration(5*testutil.TestMultiplier()) * time.Second): + t.Fatalf("Task unblock should have been called") + } + + // Check the file is there + actual := fmt.Sprintf("%s.tmpl", harness.node.ID) + path := filepath.Join(harness.taskDir, actual) + raw, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("Failed to read rendered template from %q: %v", path, err) + } + + if s := string(raw); s != content { + t.Fatalf("Unexpected template data; got %q, want %q", s, content) + } +} + +func TestTaskTemplateManager_AllRendered_Signal(t *testing.T) { + // Make a template that renders based on a key in Consul and sends SIGALRM + key1 := "foo" + content1_1 := "bar" + embedded1 := fmt.Sprintf(`{{key "%s"}}`, key1) + file1 := "my.tmpl" + template := &structs.Template{ + EmbeddedTmpl: embedded1, + DestPath: file1, + ChangeMode: structs.TemplateChangeModeSignal, + ChangeSignal: "SIGALRM", + } + + // Drop the retry rate + testRetryRate = 10 * time.Millisecond + + harness := newTestHarness(t, []*structs.Template{template}, true, true, false) + defer harness.stop() + + // Write the key to Consul + harness.consul.SetKV(key1, []byte(content1_1)) + + // Wait for restart + timeout := time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second) +OUTER: + for { + select { + case <-harness.mockHooks.RestartCh: + t.Fatalf("Restart with signal policy: %+v", harness.mockHooks) + case <-harness.mockHooks.SignalCh: + break OUTER + case <-timeout: + t.Fatalf("Should have received a signals: %+v", harness.mockHooks) + } + } + + // Check the files have been updated + path := filepath.Join(harness.taskDir, file1) + raw, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("Failed to read rendered template from %q: %v", path, err) + } + + if s := string(raw); s != content1_1 { + t.Fatalf("Unexpected template data; got %q, want %q", s, content1_1) + } +} diff --git a/jobspec/parse.go b/jobspec/parse.go index fd5a192643e..9648706031f 100644 --- a/jobspec/parse.go +++ b/jobspec/parse.go @@ -810,7 +810,7 @@ func parseTemplates(result *[]*structs.Template, list *ast.ObjectList) error { "destination", "data", "change_mode", - "restart_signal", + "change_signal", "splay", "once", } diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index 22ed87667d5..b2ade94f333 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -164,20 +164,18 @@ func TestParse(t *testing.T) { }, Templates: []*structs.Template{ { - SourcePath: "foo", - DestPath: "foo", - ChangeMode: "foo", - RestartSignal: "foo", - Splay: 10 * time.Second, - Once: true, + SourcePath: "foo", + DestPath: "foo", + ChangeMode: "foo", + ChangeSignal: "foo", + Splay: 10 * time.Second, }, { - SourcePath: "bar", - DestPath: "bar", - ChangeMode: structs.TemplateChangeModeRestart, - RestartSignal: "", - Splay: 5 * time.Second, - Once: false, + SourcePath: "bar", + DestPath: "bar", + ChangeMode: structs.TemplateChangeModeRestart, + ChangeSignal: "", + Splay: 5 * time.Second, }, }, }, diff --git a/jobspec/test-fixtures/basic.hcl b/jobspec/test-fixtures/basic.hcl index 8591b8ba099..1250f9a4569 100644 --- a/jobspec/test-fixtures/basic.hcl +++ b/jobspec/test-fixtures/basic.hcl @@ -138,9 +138,8 @@ job "binstore-storagelocker" { source = "foo" destination = "foo" change_mode = "foo" - restart_signal = "foo" + change_signal = "foo" splay = "10s" - once = true } template { diff --git a/nomad/structs/diff_test.go b/nomad/structs/diff_test.go index ce83f70941c..57ee3ab1ce9 100644 --- a/nomad/structs/diff_test.go +++ b/nomad/structs/diff_test.go @@ -3208,44 +3208,40 @@ func TestTaskDiff(t *testing.T) { Old: &Task{ Templates: []*Template{ { - SourcePath: "foo", - DestPath: "bar", - EmbededTmpl: "baz", - ChangeMode: "bam", - RestartSignal: "SIGHUP", - Splay: 1, - Once: true, + SourcePath: "foo", + DestPath: "bar", + EmbeddedTmpl: "baz", + ChangeMode: "bam", + ChangeSignal: "SIGHUP", + Splay: 1, }, { - SourcePath: "foo2", - DestPath: "bar2", - EmbededTmpl: "baz2", - ChangeMode: "bam2", - RestartSignal: "SIGHUP2", - Splay: 2, - Once: false, + SourcePath: "foo2", + DestPath: "bar2", + EmbeddedTmpl: "baz2", + ChangeMode: "bam2", + ChangeSignal: "SIGHUP2", + Splay: 2, }, }, }, New: &Task{ Templates: []*Template{ { - SourcePath: "foo", - DestPath: "bar", - EmbededTmpl: "baz", - ChangeMode: "bam", - RestartSignal: "SIGHUP", - Splay: 1, - Once: true, - }, - { - SourcePath: "foo3", - DestPath: "bar3", - EmbededTmpl: "baz3", - ChangeMode: "bam3", - RestartSignal: "SIGHUP3", - Splay: 3, - Once: true, + SourcePath: "foo", + DestPath: "bar", + EmbeddedTmpl: "baz", + ChangeMode: "bam", + ChangeSignal: "SIGHUP", + Splay: 1, + }, + { + SourcePath: "foo3", + DestPath: "bar3", + EmbeddedTmpl: "baz3", + ChangeMode: "bam3", + ChangeSignal: "SIGHUP3", + Splay: 3, }, }, }, diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index fa40f84a94e..2eb487ac5bc 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -1753,12 +1753,12 @@ func (sc *ServiceCheck) validate() error { switch sc.InitialStatus { case "": - case api.HealthUnknown: + // case api.HealthUnknown: TODO: Add when Consul releases 0.7.1 case api.HealthPassing: case api.HealthWarning: case api.HealthCritical: default: - return fmt.Errorf(`invalid initial check state (%s), must be one of %q, %q, %q, %q or empty`, sc.InitialStatus, api.HealthUnknown, api.HealthPassing, api.HealthWarning, api.HealthCritical) + return fmt.Errorf(`invalid initial check state (%s), must be one of %q, %q, %q, %q or empty`, sc.InitialStatus, api.HealthPassing, api.HealthWarning, api.HealthCritical) } @@ -2222,24 +2222,21 @@ type Template struct { // DestPath is the path to where the template should be rendered DestPath string `mapstructure:"destination"` - // EmbededTmpl store the raw template. This is useful for smaller templates + // EmbeddedTmpl store the raw template. This is useful for smaller templates // where they are embeded in the job file rather than sent as an artificat - EmbededTmpl string `mapstructure:"data"` + EmbeddedTmpl string `mapstructure:"data"` // ChangeMode indicates what should be done if the template is re-rendered ChangeMode string `mapstructure:"change_mode"` - // RestartSignal is the signal that should be sent if the change mode + // ChangeSignal is the signal that should be sent if the change mode // requires it. - RestartSignal string `mapstructure:"restart_signal"` + ChangeSignal string `mapstructure:"change_signal"` // Splay is used to avoid coordinated restarts of processes by applying a // random wait between 0 and the given splay value before signalling the // application of a change Splay time.Duration `mapstructure:"splay"` - - // Once mode is used to indicate that template should be rendered only once - Once bool `mapstructure:"once"` } // DefaultTemplate returns a default template. @@ -2247,7 +2244,6 @@ func DefaultTemplate() *Template { return &Template{ ChangeMode: TemplateChangeModeRestart, Splay: 5 * time.Second, - Once: false, } } @@ -2264,7 +2260,7 @@ func (t *Template) Validate() error { var mErr multierror.Error // Verify we have something to render - if t.SourcePath == "" && t.EmbededTmpl == "" { + if t.SourcePath == "" && t.EmbeddedTmpl == "" { multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embeded template")) } @@ -2285,7 +2281,7 @@ func (t *Template) Validate() error { switch t.ChangeMode { case TemplateChangeModeNoop, TemplateChangeModeRestart: case TemplateChangeModeSignal: - if t.RestartSignal == "" { + if t.ChangeSignal == "" { multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal")) } default: diff --git a/scripts/install_consul.sh b/scripts/install_consul.sh index e1c93a64519..9623e6288a1 100755 --- a/scripts/install_consul.sh +++ b/scripts/install_consul.sh @@ -2,7 +2,7 @@ set -ex -CONSUL_VERSION="0.6.4" +CONSUL_VERSION="0.7.0" CURDIR=`pwd` echo Fetching Consul... diff --git a/scripts/install_vault.sh b/scripts/install_vault.sh index e84296fc4f5..f439f938972 100755 --- a/scripts/install_vault.sh +++ b/scripts/install_vault.sh @@ -2,7 +2,7 @@ set -ex -VAULT_VERSION="0.6.1" +VAULT_VERSION="0.6.2" CURDIR=`pwd` echo Fetching Vault ${VAULT_VERSION}... diff --git a/vendor/github.com/burntsushi/toml/COMPATIBLE b/vendor/github.com/burntsushi/toml/COMPATIBLE new file mode 100644 index 00000000000..21e0938caef --- /dev/null +++ b/vendor/github.com/burntsushi/toml/COMPATIBLE @@ -0,0 +1,3 @@ +Compatible with TOML version +[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) + diff --git a/vendor/github.com/burntsushi/toml/COPYING b/vendor/github.com/burntsushi/toml/COPYING new file mode 100644 index 00000000000..5a8e332545f --- /dev/null +++ b/vendor/github.com/burntsushi/toml/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/vendor/github.com/burntsushi/toml/Makefile b/vendor/github.com/burntsushi/toml/Makefile new file mode 100644 index 00000000000..3600848d331 --- /dev/null +++ b/vendor/github.com/burntsushi/toml/Makefile @@ -0,0 +1,19 @@ +install: + go install ./... + +test: install + go test -v + toml-test toml-test-decoder + toml-test -encoder toml-test-encoder + +fmt: + gofmt -w *.go */*.go + colcheck *.go */*.go + +tags: + find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS + +push: + git push origin master + git push github master + diff --git a/vendor/github.com/burntsushi/toml/README.md b/vendor/github.com/burntsushi/toml/README.md new file mode 100644 index 00000000000..5a5df637094 --- /dev/null +++ b/vendor/github.com/burntsushi/toml/README.md @@ -0,0 +1,220 @@ +## TOML parser and encoder for Go with reflection + +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. This package also supports the `encoding.TextUnmarshaler` and +`encoding.TextMarshaler` interfaces so that you can define custom data +representations. (There is an example of this below.) + +Spec: https://github.com/mojombo/toml + +Compatible with TOML version +[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md) + +Documentation: http://godoc.org/github.com/BurntSushi/toml + +Installation: + +```bash +go get github.com/BurntSushi/toml +``` + +Try the toml validator: + +```bash +go get github.com/BurntSushi/toml/cmd/tomlv +tomlv some-toml-file.toml +``` + +[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml) + + +### Testing + +This package passes all tests in +[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder +and the encoder. + +### Examples + +This package works similarly to how the Go standard library handles `XML` +and `JSON`. Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys +and values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +if _, err := toml.Decode(tomlData, &conf); err != nil { + // handle error +} +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +### Using the `encoding.TextUnmarshaler` interface + +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +### More complex usage + +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_examples/example.{go,toml}`. + diff --git a/vendor/github.com/burntsushi/toml/decode.go b/vendor/github.com/burntsushi/toml/decode.go new file mode 100644 index 00000000000..b0fd51d5b6e --- /dev/null +++ b/vendor/github.com/burntsushi/toml/decode.go @@ -0,0 +1,509 @@ +package toml + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "time" +) + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// When using the various `Decode*` functions, the type `Primitive` may +// be given to any value, and its decoding will be delayed. +// +// A `Primitive` value can be decoded using the `PrimitiveDecode` function. +// +// The underlying representation of a `Primitive` value is subject to change. +// Do not rely on it. +// +// N.B. Primitive values are still parsed, so using them will only avoid +// the overhead of reflection. They can be useful when you don't know the +// exact type of TOML data until run time. +type Primitive struct { + undecoded interface{} + context Key +} + +// DEPRECATED! +// +// Use MetaData.PrimitiveDecode instead. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]bool)} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decode will decode the contents of `data` in TOML format into a pointer +// `v`. +// +// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be +// used interchangeably.) +// +// TOML arrays of tables correspond to either a slice of structs or a slice +// of maps. +// +// TOML datetimes correspond to Go `time.Time` values. +// +// All other TOML types (float, string, int, bool and array) correspond +// to the obvious Go types. +// +// An exception to the above rules is if a type implements the +// encoding.TextUnmarshaler interface. In this case, any primitive TOML value +// (floats, strings, integers, booleans and datetimes) will be converted to +// a byte string and given to the value's UnmarshalText method. See the +// Unmarshaler example for a demonstration with time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go +// struct. The special `toml` struct tag may be used to map TOML keys to +// struct fields that don't match the key name exactly. (See the example.) +// A case insensitive match to struct names will be tried if an exact match +// can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there +// may exist TOML values that cannot be placed into your representation, and +// there may be parts of your representation that do not correspond to +// TOML values. This loose mapping can be made stricter by using the IsDefined +// and/or Undecoded methods on the MetaData returned. +// +// This decoder will not handle cyclic types. If a cyclic type is passed, +// `Decode` will not terminate. +func Decode(data string, v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + } + p, err := parse(data) + if err != nil { + return MetaData{}, err + } + md := MetaData{ + p.mapping, p.types, p.ordered, + make(map[string]bool, len(p.ordered)), nil, + } + return md, md.unify(p.mapping, indirect(rv)) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at `fpath` and decode it for you. +func DecodeFile(fpath string, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadFile(fpath) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// DecodeReader is just like Decode, except it will consume all bytes +// from the reader and decode it for you. +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadAll(r) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + + // Special case. Look for a `Primitive` value. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Handle time.Time values specifically. + // TODO: Remove this code when we decide to drop support for Go 1.1. + // This isn't necessary in Go 1.2 because time.Time satisfies the encoding + // interfaces. + if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { + return md.unifyDatetime(data, rv) + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // BUG(burntsushi) + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML + // hash or array. In particular, the unmarshaler should only be applied + // to primitive TOML values. But at this point, it will be applied to + // all kinds of values and produce an incorrect error whenever those values + // are hashes or arrays (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32: + fallthrough + case reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = true + md.context = append(md.context, key) + if err := md.unify(datum, subv); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + // Bad user! No soup for you! + return e("cannot write unexported field %s.%s", + rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = true + md.context = append(md.context, k) + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + sliceLen := datav.Len() + if sliceLen != rv.Len() { + return e("expected array length %d; got TOML array of length %d", + rv.Len(), sliceLen) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + sliceLen := data.Len() + for i := 0; i < sliceLen; i++ { + v := data.Index(i).Interface() + sliceval := indirect(rv.Index(i)) + if err := md.unify(v, sliceval); err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { + if _, ok := data.(time.Time); ok { + rv.Set(reflect.ValueOf(data)) + return nil + } + return badtype("time.Time", data) +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + return badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// Pointers are followed until the value is not a pointer. +// New values are allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of +// interest to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(TextUnmarshaler); ok { + return true + } + return false +} + +func badtype(expected string, data interface{}) error { + return e("cannot load TOML value of type %T into a Go %s", data, expected) +} diff --git a/vendor/github.com/burntsushi/toml/decode_meta.go b/vendor/github.com/burntsushi/toml/decode_meta.go new file mode 100644 index 00000000000..b9914a6798c --- /dev/null +++ b/vendor/github.com/burntsushi/toml/decode_meta.go @@ -0,0 +1,121 @@ +package toml + +import "strings" + +// MetaData allows access to meta information about TOML data that may not +// be inferrable via reflection. In particular, whether a key has been defined +// and the TOML type of a key. +type MetaData struct { + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]bool + context Key // Used only during decoding. +} + +// IsDefined returns true if the key given exists in the TOML data. The key +// should be specified hierarchially. e.g., +// +// // access the TOML key 'a.b.c' +// IsDefined("a", "b", "c") +// +// IsDefined will return false if an empty key given. Keys are case sensitive. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var hash map[string]interface{} + var ok bool + var hashOrVal interface{} = md.mapping + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that +// does not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + fullkey := strings.Join(key, ".") + if typ, ok := md.types[fullkey]; ok { + return typ.typeString() + } + return "" +} + +// Key is the type of any TOML key, including key groups. Use (MetaData).Keys +// to get values of this type. +type Key []string + +func (k Key) String() string { + return strings.Join(k, ".") +} + +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. +// +// The list will have the same order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if !md.decoded[key.String()] { + undecoded = append(undecoded, key) + } + } + return undecoded +} diff --git a/vendor/github.com/burntsushi/toml/doc.go b/vendor/github.com/burntsushi/toml/doc.go new file mode 100644 index 00000000000..fe26800041b --- /dev/null +++ b/vendor/github.com/burntsushi/toml/doc.go @@ -0,0 +1,27 @@ +/* +Package toml provides facilities for decoding and encoding TOML configuration +files via reflection. There is also support for delaying decoding with +the Primitive type, and querying the set of keys in a TOML document with the +MetaData type. + +The specification implemented: https://github.com/mojombo/toml + +The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify +whether a file is a valid TOML document. It can also be used to print the +type of each key in a TOML document. + +Testing + +There are two important types of tests used for this package. The first is +contained inside '*_test.go' files and uses the standard Go unit testing +framework. These tests are primarily devoted to holistically testing the +decoder and encoder. + +The second type of testing is used to verify the implementation's adherence +to the TOML specification. These tests have been factored into their own +project: https://github.com/BurntSushi/toml-test + +The reason the tests are in a separate project is so that they can be used by +any implementation of TOML. Namely, it is language agnostic. +*/ +package toml diff --git a/vendor/github.com/burntsushi/toml/encode.go b/vendor/github.com/burntsushi/toml/encode.go new file mode 100644 index 00000000000..0f2558b2eaf --- /dev/null +++ b/vendor/github.com/burntsushi/toml/encode.go @@ -0,0 +1,568 @@ +package toml + +import ( + "bufio" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayMixedElementTypes = errors.New( + "toml: cannot encode array with mixed element types") + errArrayNilElement = errors.New( + "toml: cannot encode array with nil element") + errNonString = errors.New( + "toml: cannot encode a map with non-string key type") + errAnonNonStruct = errors.New( + "toml: cannot encode an anonymous field that is not a struct") + errArrayNoTable = errors.New( + "toml: TOML array element cannot contain a table") + errNoKey = errors.New( + "toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var quotedReplacer = strings.NewReplacer( + "\t", "\\t", + "\n", "\\n", + "\r", "\\r", + "\"", "\\\"", + "\\", "\\\\", +) + +// Encoder controls the encoding of Go values to a TOML document to some +// io.Writer. +// +// The indentation level can be controlled with the Indent field. +type Encoder struct { + // A single indentation level. By default it is two spaces. + Indent string + + // hasWritten is whether we have written any output to w yet. + hasWritten bool + w *bufio.Writer +} + +// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer +// given. By default, a single indentation level is 2 spaces. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the underlying +// io.Writer. If the value given cannot be encoded to a valid TOML document, +// then an error is returned. +// +// The mapping between Go values and TOML values should be precisely the same +// as for the Decode* functions. Similarly, the TextMarshaler interface is +// supported by encoding the resulting bytes as strings. (If you want to write +// arbitrary binary data then you will need to use something like base64 since +// TOML does not have any binary types.) +// +// When encoding TOML hashes (i.e., Go maps or structs), keys without any +// sub-hashes are encoded first. +// +// If a Go map is encoded, then its keys are sorted alphabetically for +// deterministic output. More control over this behavior may be provided if +// there is demand for it. +// +// Encoding Go values without a corresponding TOML representation---like map +// types with non-string keys---will cause an error to be returned. Similarly +// for mixed arrays/slices, arrays/slices with nil elements, embedded +// non-struct types and nested slices containing maps or structs. +// (e.g., [][]map[string]string is not allowed but []map[string]string is OK +// and so is []map[string][]string.) +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case. Time needs to be in ISO8601 format. + // Special case. If we can marshal the type to text, then we used that. + // Basically, this prevents the encoder for handling these types as + // generic structs (or whatever the underlying type of a TextMarshaler is). + switch rv.Interface().(type) { + case time.Time, TextMarshaler: + enc.keyEqElement(key, rv) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.keyEqElement(key, rv) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.keyEqElement(key, rv) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + panic(e("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element (primitives and +// arrays). +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: + // Special case time.Time as a primitive. Has to come before + // TextMarshaler below because time.Time implements + // encoding.TextMarshaler, but we need to always use UTC. + enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) + return + case TextMarshaler: + // Special case. Use text marshaler if it's available for this value. + if s, err := v.MarshalText(); err != nil { + encPanic(err) + } else { + enc.writeQuoted(string(s)) + } + return + } + switch rv.Kind() { + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) + case reflect.Float64: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Interface: + enc.eElement(rv.Elem()) + case reflect.String: + enc.writeQuoted(rv.String()) + default: + panic(e("unexpected primitive type: %s", rv.Kind())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one +// number on either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", quotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + panicIfInvalidKey(key) + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + enc.eMapOrStruct(key, trv) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + panicIfInvalidKey(key) + if len(key) == 1 { + // Output an extra new line between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + } + enc.eMapOrStruct(key, rv) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv) + case reflect.Struct: + enc.eStruct(key, rv) + default: + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string) { + sort.Strings(mapKeys) + for _, mapKey := range mapKeys { + mrv := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(mrv) { + // Don't write anything for nil fields. + continue + } + enc.encode(key.add(mapKey), mrv) + } + } + writeMapKeys(mapKeysDirect) + writeMapKeys(mapKeysSub) +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table, then all keys under it will be in that + // table (not the one we're writing here). + rt := rv.Type() + var fieldsDirect, fieldsSub [][]int + var addFields func(rt reflect.Type, rv reflect.Value, start []int) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + // skip unexported fields + if f.PkgPath != "" && !f.Anonymous { + continue + } + frv := rv.Field(i) + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + // Treat anonymous struct fields with + // tag names as though they are not + // anonymous, like encoding/json does. + if getOptions(f.Tag).name == "" { + addFields(t, frv, f.Index) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && + getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), f.Index) + } + continue + } + // Fall through to the normal field encoding logic below + // for non-struct anonymous fields. + } + } + + if typeIsHash(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + var writeFields = func(fields [][]int) { + for _, fieldIndex := range fields { + sft := rt.FieldByIndex(fieldIndex) + sf := rv.FieldByIndex(fieldIndex) + if isNil(sf) { + // Don't write anything for nil fields. + continue + } + + opts := getOptions(sft.Tag) + if opts.skip { + continue + } + keyName := sft.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(sf) { + continue + } + if opts.omitzero && isZero(sf) { + continue + } + + enc.encode(key.add(keyName), sf) + } + } + writeFields(fieldsDirect) + writeFields(fieldsSub) +} + +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. + +// Returns the TOML type of a Go value. The type may be `nil`, which means +// no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + switch rv.Interface().(type) { + case time.Time: + return tomlDatetime + case TextMarshaler: + return tomlString + default: + return tomlHash + } + default: + panic("unexpected reflect.Kind: " + rv.Kind().String()) + } +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + elem := rv.Index(i) + switch elemType := tomlTypeOfGo(elem); { + case elemType == nil: + encPanic(errArrayNilElement) + case !typeEqual(firstType, elemType): + encPanic(errArrayMixedElementTypes) + } + } + // If we have a nested array, then we must make sure that the nested + // array contains ONLY primitives. + // This checks arbitrarily nested arrays. + if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { + nest := tomlArrayType(eindirect(rv.Index(0))) + if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { + encPanic(errArrayNoTable) + } + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + panicIfInvalidKey(key) + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + enc.newline() +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +func panicIfInvalidKey(key Key) { + for _, k := range key { + if len(k) == 0 { + encPanic(e("Key '%s' is not a valid table name. Key names "+ + "cannot be empty.", key.maybeQuotedAll())) + } + } +} + +func isValidKeyName(s string) bool { + return len(s) != 0 +} diff --git a/vendor/github.com/burntsushi/toml/encoding_types.go b/vendor/github.com/burntsushi/toml/encoding_types.go new file mode 100644 index 00000000000..d36e1dd6002 --- /dev/null +++ b/vendor/github.com/burntsushi/toml/encoding_types.go @@ -0,0 +1,19 @@ +// +build go1.2 + +package toml + +// In order to support Go 1.1, we define our own TextMarshaler and +// TextUnmarshaler types. For Go 1.2+, we just alias them with the +// standard library interfaces. + +import ( + "encoding" +) + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/burntsushi/toml/encoding_types_1.1.go b/vendor/github.com/burntsushi/toml/encoding_types_1.1.go new file mode 100644 index 00000000000..e8d503d0469 --- /dev/null +++ b/vendor/github.com/burntsushi/toml/encoding_types_1.1.go @@ -0,0 +1,18 @@ +// +build !go1.2 + +package toml + +// These interfaces were introduced in Go 1.2, so we add them manually when +// compiling for Go 1.1. + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler interface { + MarshalText() (text []byte, err error) +} + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/vendor/github.com/burntsushi/toml/lex.go b/vendor/github.com/burntsushi/toml/lex.go new file mode 100644 index 00000000000..104ebda2127 --- /dev/null +++ b/vendor/github.com/burntsushi/toml/lex.go @@ -0,0 +1,858 @@ +package toml + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemCommentStart +) + +const ( + eof = 0 + tableStart = '[' + tableEnd = ']' + arrayTableStart = '[' + arrayTableEnd = ']' + tableSep = '.' + keySep = '=' + arrayStart = '[' + arrayEnd = ']' + arrayValTerm = ',' + commentStart = '#' + stringStart = '"' + stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' +) + +type stateFn func(lx *lexer) stateFn + +type lexer struct { + input string + start int + pos int + width int + line int + state stateFn + items chan item + + // A stack of state functions used to maintain context. + // The idea is to reuse parts of the state machine in various places. + // For example, values can appear at the top level or within arbitrarily + // nested arrays. The last state on the stack is used after a value has + // been lexed. Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + line int +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input + "\n", + state: lexTop, + line: 1, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop.") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ, lx.current(), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.pos >= len(lx.input) { + lx.width = 0 + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:]) + lx.pos += lx.width + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called only once per call of next. +func (lx *lexer) backup() { + lx.pos -= lx.width + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// errorf stops all lexing by emitting an error and returning `nil`. +// Note that any value that is a character is escaped if it's a special +// character (new lines, tabs, etc.). +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + lx.items <- item{ + itemError, + fmt.Sprintf(format, values...), + lx.line, + } + return nil +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + + switch r { + case commentStart: + lx.push(lexTop) + return lexCommentStart + case tableStart: + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("Unexpected EOF.") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a new line. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == commentStart: + // a comment will read to a new line for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.ignore() + return lexTop + } + return lx.errorf("Expected a top-level item to end with a new line, "+ + "comment or EOF, but got %q instead.", r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == arrayTableStart { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != arrayTableEnd { + return lx.errorf("Expected end of table array name delimiter %q, "+ + "but got %q instead.", arrayTableEnd, r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("Unexpected end of table name. (Table names cannot " + + "be empty.)") + case r == tableSep: + return lx.errorf("Unexpected table separator. (Table names cannot " + + "be empty.)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexValue // reuse string lexing + default: + return lexBareTableName + } +} + +// lexBareTableName lexes the name of a table. It assumes that at least one +// valid character for the table has already been read. +func lexBareTableName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareTableName + } + lx.backup() + lx.emit(itemText) + return lexTableNameEnd +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: + lx.ignore() + return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("Expected '.' or ']' to end table name, but got %q "+ + "instead.", r) + } +} + +// lexKeyStart consumes a key name up until the first non-whitespace character. +// lexKeyStart will ignore whitespace. +func lexKeyStart(lx *lexer) stateFn { + r := lx.peek() + switch { + case r == keySep: + return lx.errorf("Unexpected key separator %q.", keySep) + case isWhitespace(r) || isNL(r): + lx.next() + return lexSkip(lx, lexKeyStart) + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.emit(itemKeyStart) + lx.push(lexKeyEnd) + return lexValue // reuse string lexing + default: + lx.ignore() + lx.emit(itemKeyStart) + return lexBareKey + } +} + +// lexBareKey consumes the text of a bare key. Assumes that the first character +// (which is not whitespace) has not yet been consumed. +func lexBareKey(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareKey + case isWhitespace(r): + lx.backup() + lx.emit(itemText) + return lexKeyEnd + case r == keySep: + lx.backup() + lx.emit(itemText) + return lexKeyEnd + default: + return lx.errorf("Bare keys cannot contain %q.", r) + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case r == keySep: + return lexSkip(lx, lexValue) + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + default: + return lx.errorf("Expected key separator %q, but got %q instead.", + keySep, r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT new lines. + // In array syntax, the array states are responsible for ignoring new + // lines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case arrayStart: + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '+', '-': + return lexNumberStart + case '.': // special error case, be kind to users + return lx.errorf("Floats must start with a digit, not '.'.") + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + return lx.errorf("Expected value but found %q instead.", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and new lines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == commentStart: + lx.push(lexArrayValue) + return lexCommentStart + case r == arrayValTerm: + return lx.errorf("Unexpected array value terminator %q.", + arrayValTerm) + case r == arrayEnd: + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes the cruft between values of an array. Namely, +// it ignores whitespace and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == commentStart: + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == arrayValTerm: + lx.ignore() + return lexArrayValue // move on to the next value + case r == arrayEnd: + return lexArrayEnd + } + return lx.errorf("Expected an array value terminator %q or an array "+ + "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r) +} + +// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has +// just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case isNL(r): + return lx.errorf("Strings cannot contain new lines.") + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == stringEnd: + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == '\\': + return lexMultilineStringEscape + case r == stringEnd: + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case isNL(r): + return lx.errorf("Strings cannot contain new lines.") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == rawStringEnd: + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.errorf("Invalid escape character %q. Only the following "+ + "escape characters are allowed: "+ + "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+ + "\\uXXXX and \\UXXXXXXXX.", r) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf("Expected four hexadecimal digits after '\\u', "+ + "but got '%s' instead.", lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf("Expected eight hexadecimal digits after '\\U', "+ + "but got '%s' instead.", lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart consumes either an integer, a float, or datetime. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + return lexNumber + case 'e', 'E': + return lexFloat + case '.': + return lx.errorf("Floats must start with a digit, not '.'.") + } + return lx.errorf("Expected a digit but got %q.", r) +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-': + return lexDatetime + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', 'T', ':', '.', 'Z': + return lexDatetime + } + + lx.backup() + lx.emit(itemDatetime) + return lx.pop() +} + +// lexNumberStart consumes either an integer or a float. It assumes that a sign +// has already been read, but that *no* digits have been consumed. +// lexNumberStart will move to the appropriate integer or float states. +func lexNumberStart(lx *lexer) stateFn { + // We MUST see a digit. Even floats have to start with a digit. + r := lx.next() + if !isDigit(r) { + if r == '.' { + return lx.errorf("Floats must start with a digit, not '.'.") + } + return lx.errorf("Expected a digit but got %q.", r) + } + return lexNumber +} + +// lexNumber consumes an integer or a float after seeing the first digit. +func lexNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumber + } + switch r { + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if r == eof || isWhitespace(r) || isNL(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("Expected value but found %q instead.", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first new line character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + r := lx.peek() + if isNL(r) || r == eof { + lx.emit(itemText) + return lx.pop() + } + lx.next() + return lexComment +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + return func(lx *lexer) stateFn { + lx.ignore() + return nextState + } +} + +// isWhitespace returns true if `r` is a whitespace character according +// to the spec. +func isWhitespace(r rune) bool { + return r == '\t' || r == ' ' +} + +func isNL(r rune) bool { + return r == '\n' || r == '\r' +} + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} + +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} + +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} diff --git a/vendor/github.com/burntsushi/toml/parse.go b/vendor/github.com/burntsushi/toml/parse.go new file mode 100644 index 00000000000..a5625555c5e --- /dev/null +++ b/vendor/github.com/burntsushi/toml/parse.go @@ -0,0 +1,557 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +type parser struct { + mapping map[string]interface{} + types map[string]tomlType + lx *lexer + + // A list of keys in the order that they appear in the TOML data. + ordered []Key + + // the full key for the current hash in scope + context Key + + // the base key name for everything except hashes + currentKey string + + // rough approximation of line number + approxLine int + + // A map of 'key.group.names' to whether they were created implicitly. + implicits map[string]bool +} + +type parseError string + +func (pe parseError) Error() string { + return string(pe) +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(parseError); ok { + return + } + panic(r) + } + }() + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]bool), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicf(format string, v ...interface{}) { + msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", + p.approxLine, p.current(), fmt.Sprintf(format, v...)) + panic(parseError(msg)) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + if it.typ == itemError { + p.panicf("%s", it.val) + } + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: + p.approxLine = item.line + p.expect(itemText) + case itemTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemTableEnd, kg.typ) + + p.establishContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemArrayTableEnd, kg.typ) + + p.establishContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: + kname := p.next() + p.approxLine = kname.line + p.currentKey = p.keyString(kname) + + val, typ := p.value(p.next()) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + panic("unreachable") + } +} + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) + return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + } + p.bug("Expected boolean value, but got '%s'.", it.val) + case itemInteger: + if !numUnderscoresOK(it.val) { + p.panicf("Invalid integer %q: underscores must be surrounded by digits", + it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseInt(val, 10, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Integer '%s' is out of the range of 64-bit "+ + "signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemFloat: + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicf("Invalid float %q: underscores must be "+ + "surrounded by digits", it.val) + } + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicf("Invalid float %q: '.' must be followed "+ + "by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Float '%s' is out of the range of 64-bit "+ + "IEEE-754 floating-point numbers.", it.val) + } else { + p.panicf("Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemDatetime: + var t time.Time + var ok bool + var err error + for _, format := range []string{ + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05", + "2006-01-02", + } { + t, err = time.ParseInLocation(format, it.val, time.Local) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) + case itemArray: + array := make([]interface{}, 0) + types := make([]tomlType, 0) + + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it) + array = append(array, val) + types = append(types, typ) + } + return array, p.typeOfArray(types) + } + p.bug("Unexpected value type: %s", it.typ) + panic("unreachable") +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + accept = false + continue + } + accept = true + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// establishContext sets the current context of the parser, +// where the context is either a hash or an array of hashes. Which one is +// set depends on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) establishContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 5) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as "+ + "an array.", keyContext) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var tmpHash interface{} + var ok bool + + hash := p.mapping + keyContext := make(Key, 0) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.bug("Expected hash to have type 'map[string]interface{}', but "+ + "it has '%T' instead.", tmpHash) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Typically, if the given key has already been set, then we have + // to raise an error since duplicate keys are disallowed. However, + // it's possible that a key was previously defined implicitly. In this + // case, it is allowed to be redefined concretely. (See the + // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + hash[key] = value +} + +// setType sets the type of a particular value at a given key. +// It should be called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + for _, k := range p.context { + keyContext = append(keyContext, k) + } + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + p.types[keyContext.String()] = typ +} + +// addImplicit sets the given Key as having been created implicitly. +func (p *parser) addImplicit(key Key) { + p.implicits[key.String()] = true +} + +// removeImplicit stops tagging the given key as having been implicitly +// created. +func (p *parser) removeImplicit(key Key) { + p.implicits[key.String()] = false +} + +// isImplicit returns true if the key group pointed to by the key was created +// implicitly. +func (p *parser) isImplicit(key Key) bool { + return p.implicits[key.String()] +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) == 0 || s[0] != '\n' { + return s + } + return s[1:] +} + +func stripEscapedWhitespace(s string) string { + esc := strings.Split(s, "\\\n") + if len(esc) > 1 { + for i := 1; i < len(esc); i++ { + esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) + } + } + return strings.Join(esc, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the "+ + "lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} + +func isStringType(ty itemType) bool { + return ty == itemString || ty == itemMultilineString || + ty == itemRawString || ty == itemRawMultilineString +} diff --git a/vendor/github.com/burntsushi/toml/session.vim b/vendor/github.com/burntsushi/toml/session.vim new file mode 100644 index 00000000000..562164be060 --- /dev/null +++ b/vendor/github.com/burntsushi/toml/session.vim @@ -0,0 +1 @@ +au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/burntsushi/toml/type_check.go b/vendor/github.com/burntsushi/toml/type_check.go new file mode 100644 index 00000000000..c73f8afc1a6 --- /dev/null +++ b/vendor/github.com/burntsushi/toml/type_check.go @@ -0,0 +1,91 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsHash(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} + +// typeOfArray returns a tomlType for an array given a list of types of its +// values. +// +// In the current spec, if an array is homogeneous, then its type is always +// "Array". If the array is not homogeneous, an error is generated. +func (p *parser) typeOfArray(types []tomlType) tomlType { + // Empty arrays are cool. + if len(types) == 0 { + return tomlArray + } + + theType := types[0] + for _, t := range types[1:] { + if !typeEqual(theType, t) { + p.panicf("Array contains values of type '%s' and '%s', but "+ + "arrays must be homogeneous.", theType, t) + } + } + return tomlArray +} diff --git a/vendor/github.com/burntsushi/toml/type_fields.go b/vendor/github.com/burntsushi/toml/type_fields.go new file mode 100644 index 00000000000..608997c22f6 --- /dev/null +++ b/vendor/github.com/burntsushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/hashicorp/consul-template/LICENSE b/vendor/github.com/hashicorp/consul-template/LICENSE new file mode 100644 index 00000000000..82b4de97c7e --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/consul-template/child/child.go b/vendor/github.com/hashicorp/consul-template/child/child.go new file mode 100644 index 00000000000..7577bdd1d26 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/child/child.go @@ -0,0 +1,344 @@ +package child + +import ( + "errors" + "io" + "log" + "math/rand" + "os" + "os/exec" + "sync" + "syscall" + "time" +) + +var ( + // ErrMissingCommand is the error returned when no command is specified + // to run. + ErrMissingCommand error = errors.New("missing command") + + // ExitCodeOK is the default OK exit code. + ExitCodeOK int = 0 + + // ExitCodeError is the default error code returned when the child exits with + // an error without a more specific code. + ExitCodeError int = 127 +) + +// Child is a wrapper around a child process which can be used to send signals +// and manage the processes' lifecycle. +type Child struct { + sync.RWMutex + + stdin io.Reader + stdout, stderr io.Writer + command string + args []string + + reloadSignal os.Signal + + killSignal os.Signal + killTimeout time.Duration + + splay time.Duration + + // cmd is the actual child process under management. + cmd *exec.Cmd + + // exitCh is the channel where the processes exit will be returned. + exitCh chan int + + // stopLock is the mutex to lock when stopping. stopCh is the circuit breaker + // to force-terminate any waiting splays to kill the process now. stopped is + // a boolean that tells us if we have previously been stopped. + stopLock sync.RWMutex + stopCh chan struct{} + stopped bool +} + +// NewInput is input to the NewChild function. +type NewInput struct { + // Stdin is the io.Reader where input will come from. This is sent directly to + // the child process. Stdout and Stderr represent the io.Writer objects where + // the child process will send output and errorput. + Stdin io.Reader + Stdout, Stderr io.Writer + + // Command is the name of the command to execute. Args are the list of + // arguments to pass when starting the command. + Command string + Args []string + + // ReloadSignal is the signal to send to reload this process. This value may + // be nil. + ReloadSignal os.Signal + + // KillSignal is the signal to send to gracefully kill this process. This + // value may be nil. + KillSignal os.Signal + + // KillTimeout is the amount of time to wait for the process to gracefully + // terminate before force-killing. + KillTimeout time.Duration + + // Splay is the maximum random amount of time to wait before sending signals. + // This option helps reduce the thundering herd problem by effectively + // sleeping for a random amount of time before sending the signal. This + // prevents multiple processes from all signaling at the same time. This value + // may be zero (which disables the splay entirely). + Splay time.Duration +} + +// New creates a new child process for management with high-level APIs for +// sending signals to the child process, restarting the child process, and +// gracefully terminating the child process. +func New(i *NewInput) (*Child, error) { + if i == nil { + i = new(NewInput) + } + + if len(i.Command) == 0 { + return nil, ErrMissingCommand + } + + child := &Child{ + stdin: i.Stdin, + stdout: i.Stdout, + stderr: i.Stderr, + command: i.Command, + args: i.Args, + reloadSignal: i.ReloadSignal, + killSignal: i.KillSignal, + killTimeout: i.KillTimeout, + splay: i.Splay, + stopCh: make(chan struct{}, 1), + } + + return child, nil +} + +// ExitCh returns the current exit channel for this child process. This channel +// may change if the process is restarted, so implementers must not cache this +// value. +func (c *Child) ExitCh() <-chan int { + c.RLock() + defer c.RUnlock() + return c.exitCh +} + +// Pid returns the pid of the child process. If no child process exists, 0 is +// returned. +func (c *Child) Pid() int { + c.RLock() + defer c.RUnlock() + return c.pid() +} + +// Start starts and begins execution of the child process. A buffered channel +// is returned which is where the command's exit code will be returned upon +// exit. Any errors that occur prior to starting the command will be returned +// as the second error argument, but any errors returned by the command after +// execution will be returned as a non-zero value over the exit code channel. +func (c *Child) Start() error { + log.Printf("[INFO] (child) spawning %q %q", c.command, c.args) + c.Lock() + defer c.Unlock() + return c.start() +} + +// Signal sends the signal to the child process, returning any errors that +// occur. +func (c *Child) Signal(s os.Signal) error { + log.Printf("[INFO] (child) receiving signal %q", s.String()) + c.RLock() + defer c.RUnlock() + return c.signal(s) +} + +// Reload sends the reload signal to the child process and does not wait for a +// response. If no reload signal was provided, the process is restarted and +// replaces the process attached to this Child. +func (c *Child) Reload() error { + if c.reloadSignal == nil { + log.Printf("[INFO] (child) restarting process") + + // Take a full lock because start is going to replace the process. We also + // want to make sure that no other routines attempt to send reload signals + // during this transition. + c.Lock() + defer c.Unlock() + + c.kill() + return c.start() + } else { + log.Printf("[INFO] (child) reloading process") + + // We only need a read lock here because neither the process nor the exit + // channel are changing. + c.RLock() + defer c.RUnlock() + + return c.reload() + } +} + +// Kill sends the kill signal to the child process and waits for successful +// termination. If no kill signal is defined, the process is killed with the +// most aggressive kill signal. If the process does not gracefully stop within +// the provided KillTimeout, the process is force-killed. If a splay was +// provided, this function will sleep for a random period of time between 0 and +// the provided splay value to reduce the thundering herd problem. This function +// does not return any errors because it guarantees the process will be dead by +// the return of the function call. +func (c *Child) Kill() { + log.Printf("[INFO] (child) killing process") + c.Lock() + defer c.Unlock() + c.kill() +} + +// Stop behaves almost identical to Kill except it supresses future processes +// from being started by this child and it prevents the killing of the child +// process from sending its value back up the exit channel. This is useful +// when doing a graceful shutdown of an application. +func (c *Child) Stop() { + log.Printf("[INFO] (child) stopping process") + + c.Lock() + defer c.Unlock() + + c.stopLock.Lock() + defer c.stopLock.Unlock() + if c.stopped { + log.Printf("[WARN] (child) already stopped") + return + } + c.kill() + close(c.stopCh) + c.stopped = true +} + +func (c *Child) start() error { + cmd := exec.Command(c.command, c.args...) + cmd.Stdin = c.stdin + cmd.Stdout = c.stdout + cmd.Stderr = c.stderr + if err := cmd.Start(); err != nil { + return err + } + c.cmd = cmd + + // Create a new exitCh so that previously invoked commands (if any) don't + // cause us to exit, and start a goroutine to wait for that process to end. + exitCh := make(chan int, 1) + go func() { + var code int + err := cmd.Wait() + if err == nil { + code = ExitCodeOK + } else { + code = ExitCodeError + if exiterr, ok := err.(*exec.ExitError); ok { + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + code = status.ExitStatus() + } + } + } + + // If the child is in the process of killing, do not send a response back + // down the exit channel. + c.stopLock.RLock() + defer c.stopLock.RUnlock() + if c.stopped { + return + } + + select { + case <-c.stopCh: + case exitCh <- code: + } + + }() + + c.exitCh = exitCh + return nil +} + +func (c *Child) pid() int { + if !c.running() { + return 0 + } + return c.cmd.Process.Pid +} + +func (c *Child) signal(s os.Signal) error { + if !c.running() { + return nil + } + return c.cmd.Process.Signal(s) +} + +func (c *Child) reload() error { + select { + case <-c.stopCh: + case <-c.randomSplay(): + } + + return c.signal(c.reloadSignal) +} + +func (c *Child) kill() { + if !c.running() { + return + } + + exited := false + process := c.cmd.Process + + select { + case <-c.stopCh: + case <-c.randomSplay(): + } + + if c.killSignal != nil { + if err := process.Signal(c.killSignal); err == nil { + // Wait a few seconds for it to exit + killCh := make(chan struct{}, 1) + go func() { + defer close(killCh) + process.Wait() + }() + + select { + case <-c.stopCh: + case <-killCh: + exited = true + case <-time.After(c.killTimeout): + } + } + } + + if !exited { + process.Kill() + } + + c.cmd = nil +} + +func (c *Child) running() bool { + return c.cmd != nil && c.cmd.Process != nil +} + +func (c *Child) randomSplay() <-chan time.Time { + if c.splay == 0 { + return time.After(0) + } + + ns := c.splay.Nanoseconds() + offset := rand.Int63n(ns) + t := time.Duration(offset) + + log.Printf("[DEBUG] (child) waiting %.2fs for random splay", t.Seconds()) + + return time.After(t) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/config.go b/vendor/github.com/hashicorp/consul-template/config/config.go new file mode 100644 index 00000000000..b283ae5b82a --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/config.go @@ -0,0 +1,880 @@ +package config + +import ( + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "regexp" + "strings" + "syscall" + "time" + + "github.com/hashicorp/consul-template/signals" + "github.com/hashicorp/consul-template/watch" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl" + "github.com/mitchellh/mapstructure" +) + +// The pattern to split the config template syntax on +var configTemplateRe = regexp.MustCompile("([a-zA-Z]:)?([^:]+)") + +const ( + // DefaultFilePerms are the default file permissions for templates rendered + // onto disk when a specific file permission has not already been specified. + DefaultFilePerms = 0644 + + // DefaultDedupPrefix is the default prefix used for de-duplication mode + DefaultDedupPrefix = "consul-template/dedup/" + + // DefaultCommandTimeout is the amount of time to wait for a command to return. + DefaultCommandTimeout = 30 * time.Second + + // DefaultReloadSignal is the default signal for reload. + DefaultReloadSignal = syscall.SIGHUP + + // DefaultDumpSignal is the default signal for a core dump. + DefaultDumpSignal = syscall.SIGQUIT + + // DefaultKillSignal is the default signal for termination. + DefaultKillSignal = syscall.SIGINT +) + +// Config is used to configure Consul Template +type Config struct { + // Path is the path to this configuration file on disk. This value is not + // read from disk by rather dynamically populated by the code so the Config + // has a reference to the path to the file on disk that created it. + Path string `mapstructure:"-"` + + // Consul is the location of the Consul instance to query (may be an IP + // address or FQDN) with port. + Consul string `mapstructure:"consul"` + + // Token is the Consul API token. + Token string `mapstructure:"token"` + + // ReloadSignal is the signal to listen for a reload event. + ReloadSignal os.Signal `mapstructure:"reload_signal"` + + // DumpSignal is the signal to listen for a core dump event. + DumpSignal os.Signal `mapstructure:"dump_signal"` + + // KillSignal is the signal to listen for a graceful terminate event. + KillSignal os.Signal `mapstructure:"kill_signal"` + + // Auth is the HTTP basic authentication for communicating with Consul. + Auth *AuthConfig `mapstructure:"auth"` + + // Vault is the configuration for connecting to a vault server. + Vault *VaultConfig `mapstructure:"vault"` + + // SSL indicates we should use a secure connection while talking to + // Consul. This requires Consul to be configured to serve HTTPS. + SSL *SSLConfig `mapstructure:"ssl"` + + // Syslog is the configuration for syslog. + Syslog *SyslogConfig `mapstructure:"syslog"` + + // Exec is the configuration for exec/supervise mode. + Exec *ExecConfig `mapstructure:"exec"` + + // MaxStale is the maximum amount of time for staleness from Consul as given + // by LastContact. If supplied, Consul Template will query all servers instead + // of just the leader. + MaxStale time.Duration `mapstructure:"max_stale"` + + // ConfigTemplates is a slice of the ConfigTemplate objects in the config. + ConfigTemplates []*ConfigTemplate `mapstructure:"template"` + + // Retry is the duration of time to wait between Consul failures. + Retry time.Duration `mapstructure:"retry"` + + // Wait is the quiescence timers. + Wait *watch.Wait `mapstructure:"wait"` + + // PidFile is the path on disk where a PID file should be written containing + // this processes PID. + PidFile string `mapstructure:"pid_file"` + + // LogLevel is the level with which to log for this config. + LogLevel string `mapstructure:"log_level"` + + // Deduplicate is used to configure the dedup settings + Deduplicate *DeduplicateConfig `mapstructure:"deduplicate"` + + // setKeys is the list of config keys that were set by the user. + setKeys map[string]struct{} +} + +// Copy returns a deep copy of the current configuration. This is useful because +// the nested data structures may be shared. +func (c *Config) Copy() *Config { + config := new(Config) + config.Path = c.Path + config.Consul = c.Consul + config.Token = c.Token + config.ReloadSignal = c.ReloadSignal + config.DumpSignal = c.DumpSignal + config.KillSignal = c.KillSignal + + if c.Auth != nil { + config.Auth = &AuthConfig{ + Enabled: c.Auth.Enabled, + Username: c.Auth.Username, + Password: c.Auth.Password, + } + } + + if c.Vault != nil { + config.Vault = &VaultConfig{ + Address: c.Vault.Address, + Token: c.Vault.Token, + UnwrapToken: c.Vault.UnwrapToken, + RenewToken: c.Vault.RenewToken, + } + + if c.Vault.SSL != nil { + config.Vault.SSL = &SSLConfig{ + Enabled: c.Vault.SSL.Enabled, + Verify: c.Vault.SSL.Verify, + Cert: c.Vault.SSL.Cert, + Key: c.Vault.SSL.Key, + CaCert: c.Vault.SSL.CaCert, + } + } + } + + if c.SSL != nil { + config.SSL = &SSLConfig{ + Enabled: c.SSL.Enabled, + Verify: c.SSL.Verify, + Cert: c.SSL.Cert, + Key: c.SSL.Key, + CaCert: c.SSL.CaCert, + } + } + + if c.Syslog != nil { + config.Syslog = &SyslogConfig{ + Enabled: c.Syslog.Enabled, + Facility: c.Syslog.Facility, + } + } + + if c.Exec != nil { + config.Exec = &ExecConfig{ + Command: c.Exec.Command, + Splay: c.Exec.Splay, + ReloadSignal: c.Exec.ReloadSignal, + KillSignal: c.Exec.KillSignal, + KillTimeout: c.Exec.KillTimeout, + } + } + + config.MaxStale = c.MaxStale + + config.ConfigTemplates = make([]*ConfigTemplate, len(c.ConfigTemplates)) + for i, t := range c.ConfigTemplates { + config.ConfigTemplates[i] = &ConfigTemplate{ + Source: t.Source, + Destination: t.Destination, + EmbeddedTemplate: t.EmbeddedTemplate, + Command: t.Command, + CommandTimeout: t.CommandTimeout, + Perms: t.Perms, + Backup: t.Backup, + LeftDelim: t.LeftDelim, + RightDelim: t.RightDelim, + Wait: t.Wait, + } + } + + config.Retry = c.Retry + + if c.Wait != nil { + config.Wait = &watch.Wait{ + Min: c.Wait.Min, + Max: c.Wait.Max, + } + } + + config.PidFile = c.PidFile + config.LogLevel = c.LogLevel + + if c.Deduplicate != nil { + config.Deduplicate = &DeduplicateConfig{ + Enabled: c.Deduplicate.Enabled, + Prefix: c.Deduplicate.Prefix, + TTL: c.Deduplicate.TTL, + } + } + + config.setKeys = c.setKeys + + return config +} + +// Merge merges the values in config into this config object. Values in the +// config object overwrite the values in c. +func (c *Config) Merge(config *Config) { + if config.WasSet("path") { + c.Path = config.Path + } + + if config.WasSet("consul") { + c.Consul = config.Consul + } + + if config.WasSet("token") { + c.Token = config.Token + } + + if config.WasSet("reload_signal") { + c.ReloadSignal = config.ReloadSignal + } + + if config.WasSet("dump_signal") { + c.DumpSignal = config.DumpSignal + } + + if config.WasSet("kill_signal") { + c.KillSignal = config.KillSignal + } + + if config.WasSet("vault") { + if c.Vault == nil { + c.Vault = &VaultConfig{} + } + if config.WasSet("vault.address") { + c.Vault.Address = config.Vault.Address + } + if config.WasSet("vault.token") { + c.Vault.Token = config.Vault.Token + } + if config.WasSet("vault.unwrap_token") { + c.Vault.UnwrapToken = config.Vault.UnwrapToken + } + if config.WasSet("vault.renew_token") { + c.Vault.RenewToken = config.Vault.RenewToken + } + if config.WasSet("vault.ssl") { + if c.Vault.SSL == nil { + c.Vault.SSL = &SSLConfig{} + } + if config.WasSet("vault.ssl.verify") { + c.Vault.SSL.Verify = config.Vault.SSL.Verify + c.Vault.SSL.Enabled = true + } + if config.WasSet("vault.ssl.cert") { + c.Vault.SSL.Cert = config.Vault.SSL.Cert + c.Vault.SSL.Enabled = true + } + if config.WasSet("vault.ssl.key") { + c.Vault.SSL.Key = config.Vault.SSL.Key + c.Vault.SSL.Enabled = true + } + if config.WasSet("vault.ssl.ca_cert") { + c.Vault.SSL.CaCert = config.Vault.SSL.CaCert + c.Vault.SSL.Enabled = true + } + if config.WasSet("vault.ssl.enabled") { + c.Vault.SSL.Enabled = config.Vault.SSL.Enabled + } + } + } + + if config.WasSet("auth") { + if c.Auth == nil { + c.Auth = &AuthConfig{} + } + if config.WasSet("auth.username") { + c.Auth.Username = config.Auth.Username + c.Auth.Enabled = true + } + if config.WasSet("auth.password") { + c.Auth.Password = config.Auth.Password + c.Auth.Enabled = true + } + if config.WasSet("auth.enabled") { + c.Auth.Enabled = config.Auth.Enabled + } + } + + if config.WasSet("ssl") { + if c.SSL == nil { + c.SSL = &SSLConfig{} + } + if config.WasSet("ssl.verify") { + c.SSL.Verify = config.SSL.Verify + c.SSL.Enabled = true + } + if config.WasSet("ssl.cert") { + c.SSL.Cert = config.SSL.Cert + c.SSL.Enabled = true + } + if config.WasSet("ssl.key") { + c.SSL.Key = config.SSL.Key + c.SSL.Enabled = true + } + if config.WasSet("ssl.ca_cert") { + c.SSL.CaCert = config.SSL.CaCert + c.SSL.Enabled = true + } + if config.WasSet("ssl.enabled") { + c.SSL.Enabled = config.SSL.Enabled + } + } + + if config.WasSet("syslog") { + if c.Syslog == nil { + c.Syslog = &SyslogConfig{} + } + if config.WasSet("syslog.facility") { + c.Syslog.Facility = config.Syslog.Facility + c.Syslog.Enabled = true + } + if config.WasSet("syslog.enabled") { + c.Syslog.Enabled = config.Syslog.Enabled + } + } + + if config.WasSet("exec") { + if c.Exec == nil { + c.Exec = &ExecConfig{} + } + if config.WasSet("exec.command") { + c.Exec.Command = config.Exec.Command + } + if config.WasSet("exec.splay") { + c.Exec.Splay = config.Exec.Splay + } + if config.WasSet("exec.reload_signal") { + c.Exec.ReloadSignal = config.Exec.ReloadSignal + } + if config.WasSet("exec.kill_signal") { + c.Exec.KillSignal = config.Exec.KillSignal + } + if config.WasSet("exec.kill_timeout") { + c.Exec.KillTimeout = config.Exec.KillTimeout + } + } + + if config.WasSet("max_stale") { + c.MaxStale = config.MaxStale + } + + if len(config.ConfigTemplates) > 0 { + if c.ConfigTemplates == nil { + c.ConfigTemplates = make([]*ConfigTemplate, 0, 1) + } + for _, template := range config.ConfigTemplates { + c.ConfigTemplates = append(c.ConfigTemplates, &ConfigTemplate{ + Source: template.Source, + Destination: template.Destination, + EmbeddedTemplate: template.EmbeddedTemplate, + Command: template.Command, + CommandTimeout: template.CommandTimeout, + Perms: template.Perms, + Backup: template.Backup, + LeftDelim: template.LeftDelim, + RightDelim: template.RightDelim, + Wait: template.Wait, + }) + } + } + + if config.WasSet("retry") { + c.Retry = config.Retry + } + + if config.WasSet("wait") { + c.Wait = &watch.Wait{ + Min: config.Wait.Min, + Max: config.Wait.Max, + } + } + + if config.WasSet("pid_file") { + c.PidFile = config.PidFile + } + + if config.WasSet("log_level") { + c.LogLevel = config.LogLevel + } + + if config.WasSet("deduplicate") { + if c.Deduplicate == nil { + c.Deduplicate = &DeduplicateConfig{} + } + if config.WasSet("deduplicate.enabled") { + c.Deduplicate.Enabled = config.Deduplicate.Enabled + } + if config.WasSet("deduplicate.prefix") { + c.Deduplicate.Prefix = config.Deduplicate.Prefix + } + } + + if c.setKeys == nil { + c.setKeys = make(map[string]struct{}) + } + for k := range config.setKeys { + if _, ok := c.setKeys[k]; !ok { + c.setKeys[k] = struct{}{} + } + } +} + +// WasSet determines if the given key was set in the config (as opposed to just +// having the default value). +func (c *Config) WasSet(key string) bool { + if _, ok := c.setKeys[key]; ok { + return true + } + return false +} + +// Set is a helper function for marking a key as set. +func (c *Config) Set(key string) { + if c.setKeys == nil { + c.setKeys = make(map[string]struct{}) + } + if _, ok := c.setKeys[key]; !ok { + c.setKeys[key] = struct{}{} + } +} + +// ParseConfig reads the configuration file at the given path and returns a new +// Config struct with the data populated. +func ParseConfig(path string) (*Config, error) { + var errs *multierror.Error + + // Read the contents of the file + contents, err := ioutil.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("error reading config at %q: %s", path, err) + } + + // Parse the file (could be HCL or JSON) + var shadow interface{} + if err := hcl.Decode(&shadow, string(contents)); err != nil { + return nil, fmt.Errorf("error decoding config at %q: %s", path, err) + } + + // Convert to a map and flatten the keys we want to flatten + parsed, ok := shadow.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("error converting config at %q", path) + } + flattenKeys(parsed, []string{ + "auth", + "ssl", + "syslog", + "exec", + "vault", + "deduplicate", + }) + + // Deprecations + if vault, ok := parsed["vault"].(map[string]interface{}); ok { + if val, ok := vault["renew"]; ok { + log.Println(`[WARN] vault.renew has been renamed to vault.renew_token. ` + + `Update your configuration files and change "renew" to "renew_token".`) + vault["renew_token"] = val + delete(vault, "renew") + } + } + + // Create a new, empty config + config := new(Config) + + // Use mapstructure to populate the basic config fields + metadata := new(mapstructure.Metadata) + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + StringToFileModeFunc(), + signals.StringToSignalFunc(), + watch.StringToWaitDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + mapstructure.StringToTimeDurationHookFunc(), + ), + ErrorUnused: true, + Metadata: metadata, + Result: config, + }) + if err != nil { + errs = multierror.Append(errs, err) + return nil, errs.ErrorOrNil() + } + if err := decoder.Decode(parsed); err != nil { + errs = multierror.Append(errs, err) + return nil, errs.ErrorOrNil() + } + + // Store a reference to the path where this config was read from + config.Path = path + + // Explicitly check for the nil signal and set the value back to nil + if config.ReloadSignal == signals.SIGNIL { + config.ReloadSignal = nil + } + if config.DumpSignal == signals.SIGNIL { + config.DumpSignal = nil + } + if config.KillSignal == signals.SIGNIL { + config.KillSignal = nil + } + if config.Exec != nil { + if config.Exec.ReloadSignal == signals.SIGNIL { + config.Exec.ReloadSignal = nil + } + if config.Exec.KillSignal == signals.SIGNIL { + config.Exec.KillSignal = nil + } + } + + // Setup default values for templates + for _, t := range config.ConfigTemplates { + // Ensure there's a default value for the template's file permissions + if t.Perms == 0000 { + t.Perms = DefaultFilePerms + } + + // Ensure we have a default command timeout + if t.CommandTimeout == 0 { + t.CommandTimeout = DefaultCommandTimeout + } + + // Set up a default zero wait, which disables it for this + // template. + if t.Wait == nil { + t.Wait = &watch.Wait{} + } + } + + // Update the list of set keys + if config.setKeys == nil { + config.setKeys = make(map[string]struct{}) + } + for _, key := range metadata.Keys { + if _, ok := config.setKeys[key]; !ok { + config.setKeys[key] = struct{}{} + } + } + config.setKeys["path"] = struct{}{} + + d := DefaultConfig() + d.Merge(config) + config = d + + return config, errs.ErrorOrNil() +} + +// ConfigFromPath iterates and merges all configuration files in a given +// directory, returning the resulting config. +func ConfigFromPath(path string) (*Config, error) { + // Ensure the given filepath exists + if _, err := os.Stat(path); os.IsNotExist(err) { + return nil, fmt.Errorf("config: missing file/folder: %s", path) + } + + // Check if a file was given or a path to a directory + stat, err := os.Stat(path) + if err != nil { + return nil, fmt.Errorf("config: error stating file: %s", err) + } + + // Recursively parse directories, single load files + if stat.Mode().IsDir() { + // Ensure the given filepath has at least one config file + _, err := ioutil.ReadDir(path) + if err != nil { + return nil, fmt.Errorf("config: error listing directory: %s", err) + } + + // Create a blank config to merge off of + config := DefaultConfig() + + // Potential bug: Walk does not follow symlinks! + err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error { + // If WalkFunc had an error, just return it + if err != nil { + return err + } + + // Do nothing for directories + if info.IsDir() { + return nil + } + + // Parse and merge the config + newConfig, err := ParseConfig(path) + if err != nil { + return err + } + config.Merge(newConfig) + + return nil + }) + + if err != nil { + return nil, fmt.Errorf("config: walk error: %s", err) + } + + return config, nil + } else if stat.Mode().IsRegular() { + return ParseConfig(path) + } + + return nil, fmt.Errorf("config: unknown filetype: %q", stat.Mode().String()) +} + +// DefaultConfig returns the default configuration struct. +func DefaultConfig() *Config { + logLevel := os.Getenv("CONSUL_TEMPLATE_LOG") + if logLevel == "" { + logLevel = "WARN" + } + + config := &Config{ + Vault: &VaultConfig{ + RenewToken: true, + SSL: &SSLConfig{ + Enabled: true, + Verify: true, + }, + }, + Auth: &AuthConfig{ + Enabled: false, + }, + ReloadSignal: DefaultReloadSignal, + DumpSignal: DefaultDumpSignal, + KillSignal: DefaultKillSignal, + SSL: &SSLConfig{ + Enabled: false, + Verify: true, + }, + Syslog: &SyslogConfig{ + Enabled: false, + Facility: "LOCAL0", + }, + Deduplicate: &DeduplicateConfig{ + Enabled: false, + Prefix: DefaultDedupPrefix, + TTL: 15 * time.Second, + }, + Exec: &ExecConfig{ + KillSignal: syscall.SIGTERM, + KillTimeout: 30 * time.Second, + }, + ConfigTemplates: make([]*ConfigTemplate, 0), + Retry: 5 * time.Second, + MaxStale: 1 * time.Second, + Wait: &watch.Wait{}, + LogLevel: logLevel, + setKeys: make(map[string]struct{}), + } + + if v := os.Getenv("CONSUL_HTTP_ADDR"); v != "" { + config.Consul = v + } + + if v := os.Getenv("CONSUL_TOKEN"); v != "" { + config.Token = v + } + + if v := os.Getenv("VAULT_ADDR"); v != "" { + config.Vault.Address = v + } + + if v := os.Getenv("VAULT_TOKEN"); v != "" { + config.Vault.Token = v + } + + if v := os.Getenv("VAULT_UNWRAP_TOKEN"); v != "" { + config.Vault.UnwrapToken = true + } + + if v := os.Getenv("VAULT_CAPATH"); v != "" { + config.Vault.SSL.Cert = v + } + + if v := os.Getenv("VAULT_CACERT"); v != "" { + config.Vault.SSL.CaCert = v + } + + if v := os.Getenv("VAULT_SKIP_VERIFY"); v != "" { + config.Vault.SSL.Verify = false + } + + return config +} + +// AuthConfig is the HTTP basic authentication data. +type AuthConfig struct { + Enabled bool `mapstructure:"enabled"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` +} + +// String is the string representation of this authentication. If authentication +// is not enabled, this returns the empty string. The username and password will +// be separated by a colon. +func (a *AuthConfig) String() string { + if !a.Enabled { + return "" + } + + if a.Password != "" { + return fmt.Sprintf("%s:%s", a.Username, a.Password) + } + + return a.Username +} + +// ExecConfig is used to configure the application when it runs in +// exec/supervise mode. +type ExecConfig struct { + // Command is the command to execute and watch as a child process. + Command string `mapstructure:"command"` + + // Splay is the maximum amount of time to wait to kill the process. + Splay time.Duration `mapstructure:"splay"` + + // ReloadSignal is the signal to send to the child process when a template + // changes. This tells the child process that templates have + ReloadSignal os.Signal `mapstructure:"reload_signal"` + + // KillSignal is the signal to send to the command to kill it gracefully. The + // default value is "SIGTERM". + KillSignal os.Signal `mapstructure:"kill_signal"` + + // KillTimeout is the amount of time to give the process to cleanup before + // hard-killing it. + KillTimeout time.Duration `mapstructure:"kill_timeout"` +} + +// DeduplicateConfig is used to enable the de-duplication mode, which depends +// on electing a leader per-template and watching of a key. This is used +// to reduce the cost of many instances of CT running the same template. +type DeduplicateConfig struct { + // Controls if deduplication mode is enabled + Enabled bool `mapstructure:"enabled"` + + // Controls the KV prefix used. Defaults to defaultDedupPrefix + Prefix string `mapstructure:"prefix"` + + // TTL is the Session TTL used for lock acquisition, defaults to 15 seconds. + TTL time.Duration `mapstructure:"ttl"` +} + +// SSLConfig is the configuration for SSL. +type SSLConfig struct { + Enabled bool `mapstructure:"enabled"` + Verify bool `mapstructure:"verify"` + Cert string `mapstructure:"cert"` + Key string `mapstructure:"key"` + CaCert string `mapstructure:"ca_cert"` +} + +// SyslogConfig is the configuration for syslog. +type SyslogConfig struct { + Enabled bool `mapstructure:"enabled"` + Facility string `mapstructure:"facility"` +} + +// ConfigTemplate is the representation of an input template, output location, +// and optional command to execute when rendered +type ConfigTemplate struct { + Source string `mapstructure:"source"` + Destination string `mapstructure:"destination"` + EmbeddedTemplate string `mapstructure:"contents"` + Command string `mapstructure:"command"` + CommandTimeout time.Duration `mapstructure:"command_timeout"` + Perms os.FileMode `mapstructure:"perms"` + Backup bool `mapstructure:"backup"` + LeftDelim string `mapstructure:"left_delimiter"` + RightDelim string `mapstructure:"right_delimiter"` + Wait *watch.Wait `mapstructure:"wait"` +} + +// VaultConfig is the configuration for connecting to a vault server. +type VaultConfig struct { + Address string `mapstructure:"address"` + Token string `mapstructure:"token" json:"-"` + UnwrapToken bool `mapstructure:"unwrap_token"` + RenewToken bool `mapstructure:"renew_token"` + + // SSL indicates we should use a secure connection while talking to Vault. + SSL *SSLConfig `mapstructure:"ssl"` +} + +// ParseConfigTemplate parses a string into a ConfigTemplate struct +func ParseConfigTemplate(s string) (*ConfigTemplate, error) { + if len(strings.TrimSpace(s)) < 1 { + return nil, errors.New("cannot specify empty template declaration") + } + + var source, destination, command string + parts := configTemplateRe.FindAllString(s, -1) + + switch len(parts) { + case 1: + source = parts[0] + case 2: + source, destination = parts[0], parts[1] + case 3: + source, destination, command = parts[0], parts[1], parts[2] + default: + return nil, errors.New("invalid template declaration format") + } + + return &ConfigTemplate{ + Source: source, + Destination: destination, + Command: command, + CommandTimeout: DefaultCommandTimeout, + Perms: DefaultFilePerms, + Wait: &watch.Wait{}, + }, nil +} + +// flattenKeys is a function that takes a map[string]interface{} and recursively +// flattens any keys that are a []map[string]interface{} where the key is in the +// given list of keys. +func flattenKeys(m map[string]interface{}, keys []string) { + keyMap := make(map[string]struct{}) + for _, key := range keys { + keyMap[key] = struct{}{} + } + + var flatten func(map[string]interface{}) + flatten = func(m map[string]interface{}) { + for k, v := range m { + if _, ok := keyMap[k]; !ok { + continue + } + + switch typed := v.(type) { + case []map[string]interface{}: + if len(typed) > 0 { + last := typed[len(typed)-1] + flatten(last) + m[k] = last + } else { + m[k] = nil + } + case map[string]interface{}: + flatten(typed) + m[k] = typed + default: + m[k] = v + } + } + } + + flatten(m) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/config_testing.go b/vendor/github.com/hashicorp/consul-template/config/config_testing.go new file mode 100644 index 00000000000..677373bce8c --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/config_testing.go @@ -0,0 +1,25 @@ +package config + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestConfig(contents string, t *testing.T) *Config { + f, err := ioutil.TempFile(os.TempDir(), "") + if err != nil { + t.Fatal(err) + } + + _, err = f.Write([]byte(contents)) + if err != nil { + t.Fatal(err) + } + + config, err := ParseConfig(f.Name()) + if err != nil { + t.Fatal(err) + } + return config +} diff --git a/vendor/github.com/hashicorp/consul-template/config/mapstructure.go b/vendor/github.com/hashicorp/consul-template/config/mapstructure.go new file mode 100644 index 00000000000..e788a0b67f7 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/mapstructure.go @@ -0,0 +1,33 @@ +package config + +import ( + "os" + "reflect" + "strconv" + + "github.com/mitchellh/mapstructure" +) + +// StringToFileModeFunc returns a function that converts strings to os.FileMode +// value. This is designed to be used with mapstructure for parsing out a +// filemode value. +func StringToFileModeFunc() mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(os.FileMode(0)) { + return data, nil + } + + // Convert it by parsing + v, err := strconv.ParseUint(data.(string), 8, 12) + if err != nil { + return data, err + } + return os.FileMode(v), nil + } +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go new file mode 100644 index 00000000000..de5925843f6 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go @@ -0,0 +1,222 @@ +package dependency + +import ( + "encoding/gob" + "errors" + "fmt" + "log" + "regexp" + "sort" + "sync" + + "github.com/hashicorp/consul/api" +) + +func init() { + gob.Register([]*NodeDetail{}) + gob.Register([]*NodeService{}) +} + +// NodeDetail is a wrapper around the node and its services. +type NodeDetail struct { + Node *Node + Services NodeServiceList +} + +// NodeService is a service on a single node. +type NodeService struct { + ID string + Service string + Tags ServiceTags + Port int + Address string +} + +// CatalogNode represents a single node from the Consul catalog. +type CatalogNode struct { + sync.Mutex + + rawKey string + dataCenter string + stopped bool + stopCh chan struct{} +} + +// Fetch queries the Consul API defined by the given client and returns a +// of NodeDetail object. +func (d *CatalogNode) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + d.Lock() + if d.stopped { + defer d.Unlock() + return nil, nil, ErrStopped + } + d.Unlock() + + if opts == nil { + opts = &QueryOptions{} + } + + consulOpts := opts.consulQueryOptions() + if d.dataCenter != "" { + consulOpts.Datacenter = d.dataCenter + } + + consul, err := clients.Consul() + if err != nil { + return nil, nil, fmt.Errorf("catalog node: error getting client: %s", err) + } + + nodeName := d.rawKey + if nodeName == "" { + log.Printf("[DEBUG] (%s) getting local agent name", d.Display()) + nodeName, err = consul.Agent().NodeName() + if err != nil { + return nil, nil, fmt.Errorf("catalog node: error getting local agent: %s", err) + } + } + + var n *api.CatalogNode + var qm *api.QueryMeta + dataCh := make(chan struct{}) + go func() { + log.Printf("[DEBUG] (%s) querying consul with %+v", d.Display(), consulOpts) + n, qm, err = consul.Catalog().Node(nodeName, consulOpts) + close(dataCh) + }() + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-dataCh: + } + + if err != nil { + return nil, nil, fmt.Errorf("catalog node: error fetching: %s", err) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + if n == nil { + log.Printf("[WARN] (%s) could not find node by that name", d.Display()) + var node *NodeDetail + return node, rm, nil + } + + services := make(NodeServiceList, 0, len(n.Services)) + for _, v := range n.Services { + services = append(services, &NodeService{ + ID: v.ID, + Service: v.Service, + Tags: ServiceTags(deepCopyAndSortTags(v.Tags)), + Port: v.Port, + Address: v.Address, + }) + } + sort.Stable(services) + + node := &NodeDetail{ + Node: &Node{ + Node: n.Node.Node, + Address: n.Node.Address, + }, + Services: services, + } + + return node, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *CatalogNode) CanShare() bool { + return false +} + +// HashCode returns a unique identifier. +func (d *CatalogNode) HashCode() string { + if d.dataCenter != "" { + return fmt.Sprintf("NodeDetail|%s@%s", d.rawKey, d.dataCenter) + } + return fmt.Sprintf("NodeDetail|%s", d.rawKey) +} + +// Display prints the human-friendly output. +func (d *CatalogNode) Display() string { + if d.dataCenter != "" { + return fmt.Sprintf("node(%s@%s)", d.rawKey, d.dataCenter) + } + return fmt.Sprintf(`"node(%s)"`, d.rawKey) +} + +// Stop halts the dependency's fetch function. +func (d *CatalogNode) Stop() { + d.Lock() + defer d.Unlock() + + if !d.stopped { + close(d.stopCh) + d.stopped = true + } +} + +// ParseCatalogNode parses a name name and optional datacenter value. +// If the name is empty or not provided then the current agent is used. +func ParseCatalogNode(s ...string) (*CatalogNode, error) { + switch len(s) { + case 0: + cn := &CatalogNode{stopCh: make(chan struct{})} + return cn, nil + case 1: + cn := &CatalogNode{ + rawKey: s[0], + stopCh: make(chan struct{}), + } + return cn, nil + case 2: + dc := s[1] + + re := regexp.MustCompile(`\A` + + `(@(?P[[:word:]\.\-]+))?` + + `\z`) + names := re.SubexpNames() + match := re.FindAllStringSubmatch(dc, -1) + + if len(match) == 0 { + return nil, errors.New("invalid node dependency format") + } + + r := match[0] + + m := map[string]string{} + for i, n := range r { + if names[i] != "" { + m[names[i]] = n + } + } + + nd := &CatalogNode{ + rawKey: s[0], + dataCenter: m["datacenter"], + stopCh: make(chan struct{}), + } + + return nd, nil + default: + return nil, fmt.Errorf("expected 0, 1, or 2 arguments, got %d", len(s)) + } +} + +// Sorting + +// NodeServiceList is a sortable list of node service names. +type NodeServiceList []*NodeService + +func (s NodeServiceList) Len() int { return len(s) } +func (s NodeServiceList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s NodeServiceList) Less(i, j int) bool { + if s[i].Service == s[j].Service { + return s[i].ID <= s[j].ID + } + return s[i].Service <= s[j].Service +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go new file mode 100644 index 00000000000..b9ac07722c6 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go @@ -0,0 +1,180 @@ +package dependency + +import ( + "encoding/gob" + "errors" + "fmt" + "log" + "regexp" + "sort" + "sync" + + "github.com/hashicorp/consul/api" +) + +func init() { + gob.Register([]*Node{}) +} + +// Node is a node entry in Consul +type Node struct { + Node string + Address string +} + +// CatalogNodes is the representation of all registered nodes in Consul. +type CatalogNodes struct { + sync.Mutex + + rawKey string + DataCenter string + stopped bool + stopCh chan struct{} +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of Node objects +func (d *CatalogNodes) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + d.Lock() + if d.stopped { + defer d.Unlock() + return nil, nil, ErrStopped + } + d.Unlock() + + if opts == nil { + opts = &QueryOptions{} + } + + consulOpts := opts.consulQueryOptions() + if d.DataCenter != "" { + consulOpts.Datacenter = d.DataCenter + } + + consul, err := clients.Consul() + if err != nil { + return nil, nil, fmt.Errorf("catalog nodes: error getting client: %s", err) + } + + var n []*api.Node + var qm *api.QueryMeta + dataCh := make(chan struct{}) + go func() { + log.Printf("[DEBUG] (%s) querying Consul with %+v", d.Display(), consulOpts) + n, qm, err = consul.Catalog().Nodes(consulOpts) + close(dataCh) + }() + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-dataCh: + } + + if err != nil { + return nil, nil, fmt.Errorf("catalog nodes: error fetching: %s", err) + } + + log.Printf("[DEBUG] (%s) Consul returned %d nodes", d.Display(), len(n)) + + nodes := make([]*Node, 0, len(n)) + for _, node := range n { + nodes = append(nodes, &Node{ + Node: node.Node, + Address: node.Address, + }) + } + sort.Stable(NodeList(nodes)) + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return nodes, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *CatalogNodes) CanShare() bool { + return true +} + +// HashCode returns a unique identifier. +func (d *CatalogNodes) HashCode() string { + return fmt.Sprintf("CatalogNodes|%s", d.rawKey) +} + +// Display prints the human-friendly output. +func (d *CatalogNodes) Display() string { + if d.rawKey == "" { + return fmt.Sprintf(`"nodes"`) + } + + return fmt.Sprintf(`"nodes(%s)"`, d.rawKey) +} + +// Stop halts the dependency's fetch function. +func (d *CatalogNodes) Stop() { + d.Lock() + defer d.Unlock() + + if !d.stopped { + close(d.stopCh) + d.stopped = true + } +} + +// ParseCatalogNodes parses a string of the format @dc. +func ParseCatalogNodes(s ...string) (*CatalogNodes, error) { + switch len(s) { + case 0: + cn := &CatalogNodes{ + rawKey: "", + stopCh: make(chan struct{}), + } + return cn, nil + case 1: + dc := s[0] + + re := regexp.MustCompile(`\A` + + `(@(?P[[:word:]\.\-]+))?` + + `\z`) + names := re.SubexpNames() + match := re.FindAllStringSubmatch(dc, -1) + + if len(match) == 0 { + return nil, errors.New("invalid node dependency format") + } + + r := match[0] + + m := map[string]string{} + for i, n := range r { + if names[i] != "" { + m[names[i]] = n + } + } + + cn := &CatalogNodes{ + rawKey: dc, + DataCenter: m["datacenter"], + stopCh: make(chan struct{}), + } + + return cn, nil + default: + return nil, fmt.Errorf("expected 0 or 1 arguments, got %d", len(s)) + } +} + +// NodeList is a sortable list of node objects by name and then IP address. +type NodeList []*Node + +func (s NodeList) Len() int { return len(s) } +func (s NodeList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s NodeList) Less(i, j int) bool { + if s[i].Node == s[j].Node { + return s[i].Address <= s[j].Address + } + return s[i].Node <= s[j].Node +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go new file mode 100644 index 00000000000..33dc1527e3d --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go @@ -0,0 +1,187 @@ +package dependency + +import ( + "encoding/gob" + "errors" + "fmt" + "log" + "regexp" + "sort" + "sync" + + "github.com/hashicorp/consul/api" +) + +func init() { + gob.Register([]*CatalogService{}) +} + +// CatalogService is a catalog entry in Consul. +type CatalogService struct { + Name string + Tags ServiceTags +} + +// CatalogServices is the representation of a requested catalog service +// dependency from inside a template. +type CatalogServices struct { + sync.Mutex + + rawKey string + Name string + Tags []string + DataCenter string + stopped bool + stopCh chan struct{} +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of CatalogService objects. +func (d *CatalogServices) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + d.Lock() + if d.stopped { + defer d.Unlock() + return nil, nil, ErrStopped + } + d.Unlock() + + if opts == nil { + opts = &QueryOptions{} + } + + consulOpts := opts.consulQueryOptions() + if d.DataCenter != "" { + consulOpts.Datacenter = d.DataCenter + } + + consul, err := clients.Consul() + if err != nil { + return nil, nil, fmt.Errorf("catalog services: error getting client: %s", err) + } + + var entries map[string][]string + var qm *api.QueryMeta + dataCh := make(chan struct{}) + go func() { + log.Printf("[DEBUG] (%s) querying Consul with %+v", d.Display(), consulOpts) + entries, qm, err = consul.Catalog().Services(consulOpts) + close(dataCh) + }() + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-dataCh: + } + + if err != nil { + return nil, nil, fmt.Errorf("catalog services: error fetching: %s", err) + } + + log.Printf("[DEBUG] (%s) Consul returned %d catalog services", d.Display(), len(entries)) + + var catalogServices []*CatalogService + for name, tags := range entries { + tags = deepCopyAndSortTags(tags) + catalogServices = append(catalogServices, &CatalogService{ + Name: name, + Tags: ServiceTags(tags), + }) + } + + sort.Stable(CatalogServicesList(catalogServices)) + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return catalogServices, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *CatalogServices) CanShare() bool { + return true +} + +// HashCode returns a unique identifier. +func (d *CatalogServices) HashCode() string { + return fmt.Sprintf("CatalogServices|%s", d.rawKey) +} + +// Display prints the human-friendly output. +func (d *CatalogServices) Display() string { + if d.rawKey == "" { + return fmt.Sprintf(`"services"`) + } + + return fmt.Sprintf(`"services(%s)"`, d.rawKey) +} + +// Stop halts the dependency's fetch function. +func (d *CatalogServices) Stop() { + d.Lock() + defer d.Unlock() + + if !d.stopped { + close(d.stopCh) + d.stopped = true + } +} + +// ParseCatalogServices parses a string of the format @dc. +func ParseCatalogServices(s ...string) (*CatalogServices, error) { + switch len(s) { + case 0: + cs := &CatalogServices{ + rawKey: "", + stopCh: make(chan struct{}), + } + return cs, nil + case 1: + dc := s[0] + + re := regexp.MustCompile(`\A` + + `(@(?P[[:word:]\.\-]+))?` + + `\z`) + names := re.SubexpNames() + match := re.FindAllStringSubmatch(dc, -1) + + if len(match) == 0 { + return nil, errors.New("invalid catalog service dependency format") + } + + r := match[0] + + m := map[string]string{} + for i, n := range r { + if names[i] != "" { + m[names[i]] = n + } + } + + nd := &CatalogServices{ + rawKey: dc, + DataCenter: m["datacenter"], + stopCh: make(chan struct{}), + } + + return nd, nil + default: + return nil, fmt.Errorf("expected 0 or 1 arguments, got %d", len(s)) + } +} + +/// --- Sorting + +// CatalogServicesList is a sortable slice of CatalogService structs. +type CatalogServicesList []*CatalogService + +func (s CatalogServicesList) Len() int { return len(s) } +func (s CatalogServicesList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s CatalogServicesList) Less(i, j int) bool { + if s[i].Name <= s[j].Name { + return true + } + return false +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/client_set.go b/vendor/github.com/hashicorp/consul-template/dependency/client_set.go new file mode 100644 index 00000000000..a1f9bd3887d --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/client_set.go @@ -0,0 +1,312 @@ +package dependency + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "log" + "net/http" + "sync" + + consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-cleanhttp" + vaultapi "github.com/hashicorp/vault/api" +) + +// ClientSet is a collection of clients that dependencies use to communicate +// with remote services like Consul or Vault. +type ClientSet struct { + sync.RWMutex + + vault *vaultClient + consul *consulClient +} + +// consulClient is a wrapper around a real Consul API client. +type consulClient struct { + client *consulapi.Client + httpClient *http.Client +} + +// vaultClient is a wrapper around a real Vault API client. +type vaultClient struct { + client *vaultapi.Client + httpClient *http.Client +} + +// CreateConsulClientInput is used as input to the CreateConsulClient function. +type CreateConsulClientInput struct { + Address string + Token string + AuthEnabled bool + AuthUsername string + AuthPassword string + SSLEnabled bool + SSLVerify bool + SSLCert string + SSLKey string + SSLCACert string +} + +// CreateVaultClientInput is used as input to the CreateVaultClient function. +type CreateVaultClientInput struct { + Address string + Token string + UnwrapToken bool + SSLEnabled bool + SSLVerify bool + SSLCert string + SSLKey string + SSLCACert string +} + +// NewClientSet creates a new client set that is ready to accept clients. +func NewClientSet() *ClientSet { + return &ClientSet{} +} + +// CreateConsulClient creates a new Consul API client from the given input. +func (c *ClientSet) CreateConsulClient(i *CreateConsulClientInput) error { + log.Printf("[INFO] (clients) creating consul/api client") + + // Generate the default config + consulConfig := consulapi.DefaultConfig() + + // Set the address + if i.Address != "" { + log.Printf("[DEBUG] (clients) setting consul address to %q", i.Address) + consulConfig.Address = i.Address + } + + // Configure the token + if i.Token != "" { + log.Printf("[DEBUG] (clients) setting consul token") + consulConfig.Token = i.Token + } + + // Add basic auth + if i.AuthEnabled { + log.Printf("[DEBUG] (clients) setting basic auth") + consulConfig.HttpAuth = &consulapi.HttpBasicAuth{ + Username: i.AuthUsername, + Password: i.AuthPassword, + } + } + + // This transport will attempt to keep connections open to the Consul server. + transport := cleanhttp.DefaultPooledTransport() + + // Configure SSL + if i.SSLEnabled { + log.Printf("[DEBUG] (clients) enabling consul SSL") + consulConfig.Scheme = "https" + + var tlsConfig tls.Config + + // Custom certificate or certificate and key + if i.SSLCert != "" && i.SSLKey != "" { + cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLKey) + if err != nil { + return fmt.Errorf("client set: consul: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } else if i.SSLCert != "" { + cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLCert) + if err != nil { + return fmt.Errorf("client set: consul: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + // Custom CA certificate + if i.SSLCACert != "" { + cacert, err := ioutil.ReadFile(i.SSLCACert) + if err != nil { + return fmt.Errorf("client set: consul: %s", err) + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(cacert) + + tlsConfig.RootCAs = caCertPool + } + + // Construct all the certificates now + tlsConfig.BuildNameToCertificate() + + // SSL verification + if !i.SSLVerify { + log.Printf("[WARN] (clients) disabling consul SSL verification") + tlsConfig.InsecureSkipVerify = true + } + + // Save the TLS config on our transport + transport.TLSClientConfig = &tlsConfig + } + + // Setup the new transport + consulConfig.HttpClient.Transport = transport + + // Create the API client + client, err := consulapi.NewClient(consulConfig) + if err != nil { + return fmt.Errorf("client set: consul: %s", err) + } + + // Save the data on ourselves + c.consul = &consulClient{ + client: client, + httpClient: consulConfig.HttpClient, + } + + return nil +} + +func (c *ClientSet) CreateVaultClient(i *CreateVaultClientInput) error { + log.Printf("[INFO] (clients) creating vault/api client") + + // Generate the default config + vaultConfig := vaultapi.DefaultConfig() + + // Set the address + if i.Address != "" { + log.Printf("[DEBUG] (clients) setting vault address to %q", i.Address) + vaultConfig.Address = i.Address + } + + // This transport will attempt to keep connections open to the Vault server. + transport := cleanhttp.DefaultPooledTransport() + + // Configure SSL + if i.SSLEnabled { + log.Printf("[DEBUG] (clients) enabling vault SSL") + var tlsConfig tls.Config + + // Custom certificate or certificate and key + if i.SSLCert != "" && i.SSLKey != "" { + cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLKey) + if err != nil { + return fmt.Errorf("client set: vault: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } else if i.SSLCert != "" { + cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLCert) + if err != nil { + return fmt.Errorf("client set: vault: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + // Custom CA certificate + if i.SSLCACert != "" { + cacert, err := ioutil.ReadFile(i.SSLCACert) + if err != nil { + return fmt.Errorf("client set: vault: %s", err) + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(cacert) + + tlsConfig.RootCAs = caCertPool + } + + // Construct all the certificates now + tlsConfig.BuildNameToCertificate() + + // SSL verification + if !i.SSLVerify { + log.Printf("[WARN] (clients) disabling vault SSL verification") + tlsConfig.InsecureSkipVerify = true + } + + // Save the TLS config on our transport + transport.TLSClientConfig = &tlsConfig + } + + // Setup the new transport + vaultConfig.HttpClient.Transport = transport + + // Create the client + client, err := vaultapi.NewClient(vaultConfig) + if err != nil { + return fmt.Errorf("client set: vault: %s", err) + } + + // Set the token if given + if i.Token != "" { + log.Printf("[DEBUG] (clients) setting vault token") + client.SetToken(i.Token) + } + + // Check if we are unwrapping + if i.UnwrapToken { + log.Printf("[INFO] (clients) unwrapping vault token") + secret, err := client.Logical().Unwrap(i.Token) + if err != nil { + return fmt.Errorf("client set: vault unwrap: %s", err) + } + + if secret == nil { + return fmt.Errorf("client set: vault unwrap: no secret") + } + + if secret.Auth == nil { + return fmt.Errorf("client set: vault unwrap: no secret auth") + } + + if secret.Auth.ClientToken == "" { + return fmt.Errorf("client set: vault unwrap: no token returned") + } + + client.SetToken(secret.Auth.ClientToken) + } + + // Save the data on ourselves + c.vault = &vaultClient{ + client: client, + httpClient: vaultConfig.HttpClient, + } + + return nil +} + +// Consul returns the Consul client for this clientset, or an error if no +// Consul client has been set. +func (c *ClientSet) Consul() (*consulapi.Client, error) { + c.RLock() + defer c.RUnlock() + + if c.consul == nil { + return nil, fmt.Errorf("clientset: missing consul client") + } + cp := new(consulapi.Client) + *cp = *c.consul.client + return cp, nil +} + +// Vault returns the Vault client for this clientset, or an error if no +// Vault client has been set. +func (c *ClientSet) Vault() (*vaultapi.Client, error) { + c.RLock() + defer c.RUnlock() + + if c.vault == nil { + return nil, fmt.Errorf("clientset: missing vault client") + } + cp := new(vaultapi.Client) + *cp = *c.vault.client + return cp, nil +} + +// Stop closes all idle connections for any attached clients. +func (c *ClientSet) Stop() { + c.Lock() + defer c.Unlock() + + if c.consul != nil { + c.consul.httpClient.Transport.(*http.Transport).CloseIdleConnections() + } + + if c.vault != nil { + c.vault.httpClient.Transport.(*http.Transport).CloseIdleConnections() + } +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/datacenters.go b/vendor/github.com/hashicorp/consul-template/dependency/datacenters.go new file mode 100644 index 00000000000..25cc28e9026 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/datacenters.go @@ -0,0 +1,118 @@ +package dependency + +import ( + "fmt" + "log" + "sort" + "sync" + "time" +) + +var sleepTime = 15 * time.Second + +// Datacenters is the dependency to query all datacenters +type Datacenters struct { + sync.Mutex + + rawKey string + + stopped bool + stopCh chan struct{} +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of strings representing the datacenters +func (d *Datacenters) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + d.Lock() + if d.stopped { + defer d.Unlock() + return nil, nil, ErrStopped + } + d.Unlock() + + if opts == nil { + opts = &QueryOptions{} + } + + log.Printf("[DEBUG] (%s) querying Consul with %+v", d.Display(), opts) + + // This is pretty ghetto, but the datacenters endpoint does not support + // blocking queries, so we are going to "fake it until we make it". When we + // first query, the LastIndex will be "0", meaning we should immediately + // return data, but future calls will include a LastIndex. If we have a + // LastIndex in the query metadata, sleep for 15 seconds before asking Consul + // again. + // + // This is probably okay given the frequency in which datacenters actually + // change, but is technically not edge-triggering. + if opts.WaitIndex != 0 { + log.Printf("[DEBUG] (%s) pretending to long-poll", d.Display()) + select { + case <-d.stopCh: + log.Printf("[DEBUG] (%s) received interrupt", d.Display()) + return nil, nil, ErrStopped + case <-time.After(sleepTime): + } + } + + consul, err := clients.Consul() + if err != nil { + return nil, nil, fmt.Errorf("datacenters: error getting client: %s", err) + } + + catalog := consul.Catalog() + result, err := catalog.Datacenters() + if err != nil { + return nil, nil, fmt.Errorf("datacenters: error fetching: %s", err) + } + + log.Printf("[DEBUG] (%s) Consul returned %d datacenters", d.Display(), len(result)) + sort.Strings(result) + + return respWithMetadata(result) +} + +// CanShare returns if this dependency is shareable. +func (d *Datacenters) CanShare() bool { + return true +} + +// HashCode returns the hash code for this dependency. +func (d *Datacenters) HashCode() string { + return fmt.Sprintf("Datacenters|%s", d.rawKey) +} + +// Display returns a string that should be displayed to the user in output (for +// example). +func (d *Datacenters) Display() string { + if d.rawKey == "" { + return fmt.Sprintf(`"datacenters"`) + } + + return fmt.Sprintf(`"datacenters(%s)"`, d.rawKey) +} + +// Stop terminates this dependency's execution early. +func (d *Datacenters) Stop() { + d.Lock() + defer d.Unlock() + + if !d.stopped { + close(d.stopCh) + d.stopped = true + } +} + +// ParseDatacenters creates a new datacenter dependency. +func ParseDatacenters(s ...string) (*Datacenters, error) { + switch len(s) { + case 0: + dcs := &Datacenters{ + rawKey: "", + stopCh: make(chan struct{}, 0), + } + return dcs, nil + default: + return nil, fmt.Errorf("expected 0 arguments, got %d", len(s)) + } +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/dependency.go b/vendor/github.com/hashicorp/consul-template/dependency/dependency.go new file mode 100644 index 00000000000..f750257f17b --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/dependency.go @@ -0,0 +1,115 @@ +package dependency + +import ( + "errors" + "fmt" + "sort" + "time" + + consulapi "github.com/hashicorp/consul/api" +) + +// ErrStopped is a special error that is returned when a dependency is +// prematurely stopped, usually due to a configuration reload or a process +// interrupt. +var ErrStopped = errors.New("dependency stopped") + +// Dependency is an interface for a dependency that Consul Template is capable +// of watching. +type Dependency interface { + Fetch(*ClientSet, *QueryOptions) (interface{}, *ResponseMetadata, error) + CanShare() bool + HashCode() string + Display() string + Stop() +} + +// FetchError is a special kind of error returned by the Fetch method that +// contains additional metadata which informs the caller how to respond. This +// error implements the standard Error interface, so it can be passed as a +// regular error down the stack. +type FetchError struct { + originalError error + shouldExit bool +} + +func (e *FetchError) Error() string { + return e.originalError.Error() +} + +func (e *FetchError) OriginalError() error { + return e.originalError +} + +func (e *FetchError) ShouldExit() bool { + return e.shouldExit +} + +func ErrWithExit(err error) *FetchError { + return &FetchError{ + originalError: err, + shouldExit: true, + } +} + +func ErrWithExitf(s string, i ...interface{}) *FetchError { + return ErrWithExit(fmt.Errorf(s, i...)) +} + +// ServiceTags is a slice of tags assigned to a Service +type ServiceTags []string + +// Contains returns true if the tags exists in the ServiceTags slice. +func (t ServiceTags) Contains(s string) bool { + for _, v := range t { + if v == s { + return true + } + } + return false +} + +// QueryOptions is a list of options to send with the query. These options are +// client-agnostic, and the dependency determines which, if any, of the options +// to use. +type QueryOptions struct { + AllowStale bool + WaitIndex uint64 + WaitTime time.Duration +} + +// Converts the query options to Consul API ready query options. +func (r *QueryOptions) consulQueryOptions() *consulapi.QueryOptions { + return &consulapi.QueryOptions{ + AllowStale: r.AllowStale, + WaitIndex: r.WaitIndex, + WaitTime: r.WaitTime, + } +} + +// ResponseMetadata is a struct that contains metadata about the response. This +// is returned from a Fetch function call. +type ResponseMetadata struct { + LastIndex uint64 + LastContact time.Duration +} + +// deepCopyAndSortTags deep copies the tags in the given string slice and then +// sorts and returns the copied result. +func deepCopyAndSortTags(tags []string) []string { + newTags := make([]string, 0, len(tags)) + for _, tag := range tags { + newTags = append(newTags, tag) + } + sort.Strings(newTags) + return newTags +} + +// respWithMetadata is a short wrapper to return the given interface with fake +// response metadata for non-Consul dependencies. +func respWithMetadata(i interface{}) (interface{}, *ResponseMetadata, error) { + return i, &ResponseMetadata{ + LastContact: 0, + LastIndex: uint64(time.Now().Unix()), + }, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/file.go b/vendor/github.com/hashicorp/consul-template/dependency/file.go new file mode 100644 index 00000000000..0c34ae7521f --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/file.go @@ -0,0 +1,135 @@ +package dependency + +import ( + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "sync" + "time" +) + +// File represents a local file dependency. +type File struct { + sync.Mutex + mutex sync.RWMutex + rawKey string + lastStat os.FileInfo + stopped bool + stopCh chan struct{} +} + +// Fetch retrieves this dependency and returns the result or any errors that +// occur in the process. +func (d *File) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + d.Lock() + if d.stopped { + defer d.Unlock() + return nil, nil, ErrStopped + } + d.Unlock() + + var err error + var newStat os.FileInfo + var data []byte + + dataCh := make(chan struct{}) + go func() { + log.Printf("[DEBUG] (%s) querying file", d.Display()) + newStat, err = d.watch() + close(dataCh) + }() + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-dataCh: + } + + if err != nil { + return "", nil, fmt.Errorf("file: error watching: %s", err) + } + + d.mutex.Lock() + defer d.mutex.Unlock() + d.lastStat = newStat + + if data, err = ioutil.ReadFile(d.rawKey); err == nil { + return respWithMetadata(string(data)) + } + return nil, nil, fmt.Errorf("file: error reading: %s", err) +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *File) CanShare() bool { + return false +} + +// HashCode returns a unique identifier. +func (d *File) HashCode() string { + return fmt.Sprintf("StoreKeyPrefix|%s", d.rawKey) +} + +// Display prints the human-friendly output. +func (d *File) Display() string { + return fmt.Sprintf(`"file(%s)"`, d.rawKey) +} + +// Stop halts the dependency's fetch function. +func (d *File) Stop() { + d.Lock() + defer d.Unlock() + + if !d.stopped { + close(d.stopCh) + d.stopped = true + } +} + +// watch watchers the file for changes +func (d *File) watch() (os.FileInfo, error) { + for { + stat, err := os.Stat(d.rawKey) + if err != nil { + return nil, err + } + + changed := func(d *File, stat os.FileInfo) bool { + d.mutex.RLock() + defer d.mutex.RUnlock() + + if d.lastStat == nil { + return true + } + if d.lastStat.Size() != stat.Size() { + return true + } + + if d.lastStat.ModTime() != stat.ModTime() { + return true + } + + return false + }(d, stat) + + if changed { + return stat, nil + } + time.Sleep(3 * time.Second) + } +} + +// ParseFile creates a file dependency from the given path. +func ParseFile(s string) (*File, error) { + if len(s) == 0 { + return nil, errors.New("cannot specify empty file dependency") + } + + kd := &File{ + rawKey: s, + stopCh: make(chan struct{}), + } + + return kd, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/health_service.go b/vendor/github.com/hashicorp/consul-template/dependency/health_service.go new file mode 100644 index 00000000000..ac6541f6120 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/health_service.go @@ -0,0 +1,414 @@ +package dependency + +import ( + "encoding/gob" + "errors" + "fmt" + "log" + "regexp" + "sort" + "strings" + "sync" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-multierror" +) + +func init() { + gob.Register([]*HealthService{}) +} + +const ( + HealthAny = "any" + HealthPassing = "passing" + HealthWarning = "warning" + HealthUnknown = "unknown" + HealthCritical = "critical" + HealthMaint = "maintenance" + + NodeMaint = "_node_maintenance" + ServiceMaint = "_service_maintenance:" +) + +// HealthService is a service entry in Consul. +type HealthService struct { + Node string + NodeAddress string + Address string + ID string + Name string + Tags ServiceTags + Checks []*api.HealthCheck + Status string + Port uint64 +} + +// HealthServices is the struct that is formed from the dependency inside a +// template. +type HealthServices struct { + sync.Mutex + + rawKey string + Name string + Tag string + DataCenter string + StatusFilter ServiceStatusFilter + stopped bool + stopCh chan struct{} +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of HealthService objects. +func (d *HealthServices) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + d.Lock() + if d.stopped { + defer d.Unlock() + return nil, nil, ErrStopped + } + d.Unlock() + + if opts == nil { + opts = &QueryOptions{} + } + + consulOpts := opts.consulQueryOptions() + if d.DataCenter != "" { + consulOpts.Datacenter = d.DataCenter + } + + onlyHealthy := false + if d.StatusFilter == nil { + onlyHealthy = true + } + + consul, err := clients.Consul() + if err != nil { + return nil, nil, fmt.Errorf("health services: error getting client: %s", err) + } + + var entries []*api.ServiceEntry + var qm *api.QueryMeta + dataCh := make(chan struct{}) + go func() { + log.Printf("[DEBUG] (%s) querying consul with %+v", d.Display(), consulOpts) + entries, qm, err = consul.Health().Service(d.Name, d.Tag, onlyHealthy, consulOpts) + close(dataCh) + }() + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-dataCh: + } + + if err != nil { + return nil, nil, fmt.Errorf("health services: error fetching: %s", err) + } + + log.Printf("[DEBUG] (%s) Consul returned %d services", d.Display(), len(entries)) + + services := make([]*HealthService, 0, len(entries)) + + for _, entry := range entries { + // Get the status of this service from its checks. + status, err := statusFromChecks(entry.Checks) + if err != nil { + return nil, nil, fmt.Errorf("health services: "+ + "error getting status from checks: %s", err) + } + + // If we are not checking only healthy services, filter out services that do + // not match the given filter. + if d.StatusFilter != nil && !d.StatusFilter.Accept(status) { + continue + } + + // Sort the tags. + tags := deepCopyAndSortTags(entry.Service.Tags) + + // Get the address of the service, falling back to the address of the node. + var address string + if entry.Service.Address != "" { + address = entry.Service.Address + } else { + address = entry.Node.Address + } + + services = append(services, &HealthService{ + Node: entry.Node.Node, + NodeAddress: entry.Node.Address, + Address: address, + ID: entry.Service.ID, + Name: entry.Service.Service, + Tags: tags, + Status: status, + Checks: entry.Checks, + Port: uint64(entry.Service.Port), + }) + } + + log.Printf("[DEBUG] (%s) %d services after health check status filtering", d.Display(), len(services)) + + sort.Stable(HealthServiceList(services)) + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return services, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *HealthServices) CanShare() bool { + return true +} + +// HashCode returns a unique identifier. +func (d *HealthServices) HashCode() string { + return fmt.Sprintf("HealthServices|%s", d.rawKey) +} + +// Display prints the human-friendly output. +func (d *HealthServices) Display() string { + return fmt.Sprintf(`"service(%s)"`, d.rawKey) +} + +// Stop halts the dependency's fetch function. +func (d *HealthServices) Stop() { + d.Lock() + defer d.Unlock() + + if !d.stopped { + close(d.stopCh) + d.stopped = true + } +} + +// ParseHealthServices processes the incoming strings to build a service dependency. +// +// Supported arguments +// ParseHealthServices("service_id") +// ParseHealthServices("service_id", "health_check") +// +// Where service_id is in the format of service(.tag(@datacenter)) +// and health_check is either "any" or "passing". +// +// If no health_check is provided then its the same as "passing". +func ParseHealthServices(s ...string) (*HealthServices, error) { + var query string + var filter ServiceStatusFilter + var err error + + switch len(s) { + case 1: + query = s[0] + filter, err = NewServiceStatusFilter("") + if err != nil { + return nil, err + } + case 2: + query = s[0] + filter, err = NewServiceStatusFilter(s[1]) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("expected 1 or 2 arguments, got %d", len(s)) + } + + if len(query) == 0 { + return nil, errors.New("cannot specify empty health service dependency") + } + + re := regexp.MustCompile(`\A` + + `((?P[[:word:]\-.]+)\.)?` + + `((?P[[:word:]\-/_]+))` + + `(@(?P[[:word:]\.\-]+))?(:(?P[0-9]+))?` + + `\z`) + names := re.SubexpNames() + match := re.FindAllStringSubmatch(query, -1) + + if len(match) == 0 { + return nil, errors.New("invalid health service dependency format") + } + + r := match[0] + + m := map[string]string{} + for i, n := range r { + if names[i] != "" { + m[names[i]] = n + } + } + + tag, name, datacenter, port := m["tag"], m["name"], m["datacenter"], m["port"] + + if name == "" { + return nil, errors.New("name part is required") + } + + if port != "" { + log.Printf("[WARN] specifying a port in a 'service' query is not "+ + "supported - please remove the port from the query %q", query) + } + + var key string + if filter == nil { + key = query + } else { + key = fmt.Sprintf("%s %s", query, filter) + } + + sd := &HealthServices{ + rawKey: key, + Name: name, + Tag: tag, + DataCenter: datacenter, + StatusFilter: filter, + stopCh: make(chan struct{}), + } + + return sd, nil +} + +// statusFromChecks accepts a list of checks and returns the most likely status +// given those checks. Any "critical" statuses will automatically mark the +// service as critical. After that, any "unknown" statuses will mark as +// "unknown". If any warning checks exist, the status will be marked as +// "warning", and finally "passing". If there are no checks, the service will be +// marked as "passing". +func statusFromChecks(checks []*api.HealthCheck) (string, error) { + var passing, warning, unknown, critical, maintenance bool + for _, check := range checks { + if check.CheckID == NodeMaint || strings.HasPrefix(check.CheckID, ServiceMaint) { + maintenance = true + continue + } + + switch check.Status { + case "passing": + passing = true + case "warning": + warning = true + case "unknown": + unknown = true + case "critical": + critical = true + default: + return "", fmt.Errorf("unknown status: %q", check.Status) + } + } + + switch { + case maintenance: + return HealthMaint, nil + case critical: + return HealthCritical, nil + case unknown: + return HealthUnknown, nil + case warning: + return HealthWarning, nil + case passing: + return HealthPassing, nil + default: + // No checks? + return HealthPassing, nil + } +} + +// ServiceStatusFilter is used to specify a list of service statuses that you want filter by. +type ServiceStatusFilter []string + +// String returns the string representation of this status filter +func (f ServiceStatusFilter) String() string { + return fmt.Sprintf("[%s]", strings.Join(f, ",")) +} + +// NewServiceStatusFilter creates a status filter from the given string in the +// format `[key[,key[,key...]]]`. Each status is split on the comma character +// and must match one of the valid status names. +// +// If the empty string is given, it is assumed only "passing" statuses are to +// be returned. +// +// If the user specifies "any" with other keys, an error will be returned. +func NewServiceStatusFilter(s string) (ServiceStatusFilter, error) { + // If no statuses were given, use the default status of "all passing". + if len(s) == 0 || len(strings.TrimSpace(s)) == 0 { + return nil, nil + } + + var errs *multierror.Error + var hasAny bool + + raw := strings.Split(s, ",") + trimmed := make(ServiceStatusFilter, 0, len(raw)) + for _, r := range raw { + trim := strings.TrimSpace(r) + + // Ignore the empty string. + if len(trim) == 0 { + continue + } + + // Record the case where we have the "any" status - it will be used later. + if trim == HealthAny { + hasAny = true + } + + // Validate that the service is actually a valid name. + switch trim { + case HealthAny, HealthUnknown, HealthPassing, HealthWarning, HealthCritical, HealthMaint: + trimmed = append(trimmed, trim) + default: + errs = multierror.Append(errs, fmt.Errorf("service filter: invalid filter %q", trim)) + } + } + + // If the user specified "any" with additional keys, that is invalid. + if hasAny && len(trimmed) != 1 { + errs = multierror.Append(errs, fmt.Errorf("service filter: cannot specify extra keys when using %q", "any")) + } + + return trimmed, errs.ErrorOrNil() +} + +// Accept allows us to check if a slice of health checks pass this filter. +func (f ServiceStatusFilter) Accept(s string) bool { + // If the any filter is activated, pass everything. + if f.any() { + return true + } + + // Iterate over each status and see if the given status is any of those + // statuses. + for _, status := range f { + if status == s { + return true + } + } + + return false +} + +// any is a helper method to determine if this is an "any" service status +// filter. If "any" was given, it must be the only item in the list. +func (f ServiceStatusFilter) any() bool { + return len(f) == 1 && f[0] == HealthAny +} + +// HealthServiceList is a sortable slice of Service +type HealthServiceList []*HealthService + +// Len, Swap, and Less are used to implement the sort.Sort interface. +func (s HealthServiceList) Len() int { return len(s) } +func (s HealthServiceList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s HealthServiceList) Less(i, j int) bool { + if s[i].Node < s[j].Node { + return true + } else if s[i].Node == s[j].Node { + return s[i].ID <= s[j].ID + } + return false +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/store_key.go b/vendor/github.com/hashicorp/consul-template/dependency/store_key.go new file mode 100644 index 00000000000..e14c3cb85a8 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/store_key.go @@ -0,0 +1,171 @@ +package dependency + +import ( + "errors" + "fmt" + "log" + "regexp" + "sync" + + api "github.com/hashicorp/consul/api" +) + +// StoreKey represents a single item in Consul's KV store. +type StoreKey struct { + sync.Mutex + + rawKey string + Path string + DataCenter string + + defaultValue string + defaultGiven bool + + stopped bool + stopCh chan struct{} +} + +// Fetch queries the Consul API defined by the given client and returns string +// of the value to Path. +func (d *StoreKey) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + d.Lock() + if d.stopped { + defer d.Unlock() + return nil, nil, ErrStopped + } + d.Unlock() + + if opts == nil { + opts = &QueryOptions{} + } + + consulOpts := opts.consulQueryOptions() + if d.DataCenter != "" { + consulOpts.Datacenter = d.DataCenter + } + + consul, err := clients.Consul() + if err != nil { + return nil, nil, fmt.Errorf("store key: error getting client: %s", err) + } + + var pair *api.KVPair + var qm *api.QueryMeta + dataCh := make(chan struct{}) + go func() { + log.Printf("[DEBUG] (%s) querying consul with %+v", d.Display(), consulOpts) + pair, qm, err = consul.KV().Get(d.Path, consulOpts) + close(dataCh) + }() + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-dataCh: + } + + if err != nil { + return "", nil, fmt.Errorf("store key: error fetching: %s", err) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + if pair == nil { + if d.defaultGiven { + log.Printf("[DEBUG] (%s) Consul returned no data (using default of %q)", + d.Display(), d.defaultValue) + return d.defaultValue, rm, nil + } + + log.Printf("[WARN] (%s) Consul returned no data (does the path exist?)", + d.Display()) + return "", rm, nil + } + + log.Printf("[DEBUG] (%s) Consul returned %s", d.Display(), pair.Value) + + return string(pair.Value), rm, nil +} + +// SetDefault is used to set the default value. +func (d *StoreKey) SetDefault(s string) { + d.defaultGiven = true + d.defaultValue = s +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *StoreKey) CanShare() bool { + return true +} + +// HashCode returns a unique identifier. +func (d *StoreKey) HashCode() string { + if d.defaultGiven { + return fmt.Sprintf("StoreKey|%s|%s", d.rawKey, d.defaultValue) + } + return fmt.Sprintf("StoreKey|%s", d.rawKey) +} + +// Display prints the human-friendly output. +func (d *StoreKey) Display() string { + if d.defaultGiven { + return fmt.Sprintf(`"key_or_default(%s, %q)"`, d.rawKey, d.defaultValue) + } + return fmt.Sprintf(`"key(%s)"`, d.rawKey) +} + +// Stop halts the dependency's fetch function. +func (d *StoreKey) Stop() { + d.Lock() + defer d.Unlock() + + if !d.stopped { + close(d.stopCh) + d.stopped = true + } +} + +// ParseStoreKey parses a string of the format a(/b(/c...)) +func ParseStoreKey(s string) (*StoreKey, error) { + if len(s) == 0 { + return nil, errors.New("cannot specify empty key dependency") + } + + re := regexp.MustCompile(`\A` + + `(?P[^@]+)` + + `(@(?P.+))?` + + `\z`) + names := re.SubexpNames() + match := re.FindAllStringSubmatch(s, -1) + + if len(match) == 0 { + return nil, errors.New("invalid key dependency format") + } + + r := match[0] + + m := map[string]string{} + for i, n := range r { + if names[i] != "" { + m[names[i]] = n + } + } + + key, datacenter := m["key"], m["datacenter"] + + if key == "" { + return nil, errors.New("key part is required") + } + + kd := &StoreKey{ + rawKey: s, + Path: key, + DataCenter: datacenter, + stopCh: make(chan struct{}), + } + + return kd, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/store_key_prefix.go b/vendor/github.com/hashicorp/consul-template/dependency/store_key_prefix.go new file mode 100644 index 00000000000..14f2cf6ed46 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/store_key_prefix.go @@ -0,0 +1,184 @@ +package dependency + +import ( + "encoding/gob" + "errors" + "fmt" + "log" + "regexp" + "strings" + "sync" + + "github.com/hashicorp/consul/api" +) + +func init() { + gob.Register([]*KeyPair{}) +} + +// KeyPair is a simple Key-Value pair +type KeyPair struct { + Path string + Key string + Value string + + // Lesser-used, but still valuable keys from api.KV + CreateIndex uint64 + ModifyIndex uint64 + LockIndex uint64 + Flags uint64 + Session string +} + +// StoreKeyPrefix is the representation of a requested key dependency +// from inside a template. +type StoreKeyPrefix struct { + sync.Mutex + + rawKey string + Prefix string + DataCenter string + stopped bool + stopCh chan struct{} +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of KeyPair objects +func (d *StoreKeyPrefix) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + d.Lock() + if d.stopped { + defer d.Unlock() + return nil, nil, ErrStopped + } + d.Unlock() + + if opts == nil { + opts = &QueryOptions{} + } + + consulOpts := opts.consulQueryOptions() + if d.DataCenter != "" { + consulOpts.Datacenter = d.DataCenter + } + + consul, err := clients.Consul() + if err != nil { + return nil, nil, fmt.Errorf("store key prefix: error getting client: %s", err) + } + + var prefixes api.KVPairs + var qm *api.QueryMeta + dataCh := make(chan struct{}) + go func() { + log.Printf("[DEBUG] (%s) querying consul with %+v", d.Display(), consulOpts) + prefixes, qm, err = consul.KV().List(d.Prefix, consulOpts) + close(dataCh) + }() + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-dataCh: + } + + if err != nil { + return nil, nil, fmt.Errorf("store key prefix: error fetching: %s", err) + } + + log.Printf("[DEBUG] (%s) Consul returned %d key pairs", d.Display(), len(prefixes)) + + keyPairs := make([]*KeyPair, 0, len(prefixes)) + for _, pair := range prefixes { + key := strings.TrimPrefix(pair.Key, d.Prefix) + key = strings.TrimLeft(key, "/") + + keyPairs = append(keyPairs, &KeyPair{ + Path: pair.Key, + Key: key, + Value: string(pair.Value), + CreateIndex: pair.CreateIndex, + ModifyIndex: pair.ModifyIndex, + LockIndex: pair.LockIndex, + Flags: pair.Flags, + Session: pair.Session, + }) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return keyPairs, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *StoreKeyPrefix) CanShare() bool { + return true +} + +// HashCode returns a unique identifier. +func (d *StoreKeyPrefix) HashCode() string { + return fmt.Sprintf("StoreKeyPrefix|%s", d.rawKey) +} + +// Display prints the human-friendly output. +func (d *StoreKeyPrefix) Display() string { + return fmt.Sprintf(`"storeKeyPrefix(%s)"`, d.rawKey) +} + +// Stop halts the dependency's fetch function. +func (d *StoreKeyPrefix) Stop() { + d.Lock() + defer d.Unlock() + + if !d.stopped { + close(d.stopCh) + d.stopped = true + } +} + +// ParseStoreKeyPrefix parses a string of the format a(/b(/c...)) +func ParseStoreKeyPrefix(s string) (*StoreKeyPrefix, error) { + // a(/b(/c))(@datacenter) + re := regexp.MustCompile(`\A` + + `(?P[[:word:],\.\:\-\/]+)?` + + `(@(?P[[:word:]\.\-]+))?` + + `\z`) + names := re.SubexpNames() + match := re.FindAllStringSubmatch(s, -1) + + if len(match) == 0 { + return nil, errors.New("invalid key prefix dependency format") + } + + r := match[0] + + m := map[string]string{} + for i, n := range r { + if names[i] != "" { + m[names[i]] = n + } + } + + prefix, datacenter := m["prefix"], m["datacenter"] + + // Empty prefix or nil prefix should default to "/" + if len(prefix) == 0 { + prefix = "/" + } + + // Remove leading slash + if len(prefix) > 1 && prefix[0] == '/' { + prefix = prefix[1:len(prefix)] + } + + kpd := &StoreKeyPrefix{ + rawKey: s, + Prefix: prefix, + DataCenter: datacenter, + stopCh: make(chan struct{}), + } + + return kpd, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/test.go b/vendor/github.com/hashicorp/consul-template/dependency/test.go new file mode 100644 index 00000000000..d3f40313a78 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/test.go @@ -0,0 +1,126 @@ +package dependency + +import ( + "fmt" + "sync" + "time" +) + +// Test is a special dependency that does not actually speaks to a server. +type Test struct { + Name string +} + +func (d *Test) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + time.Sleep(10 * time.Millisecond) + data := "this is some data" + rm := &ResponseMetadata{LastIndex: 1} + return data, rm, nil +} + +func (d *Test) CanShare() bool { + return true +} + +func (d *Test) HashCode() string { + return fmt.Sprintf("Test|%s", d.Name) +} + +func (d *Test) Display() string { return "fakedep" } + +func (d *Test) Stop() {} + +// TestStale is a special dependency that can be used to test what happens when +// stale data is permitted. +type TestStale struct { + Name string +} + +// Fetch is used to implement the dependency interface. +func (d *TestStale) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + time.Sleep(10 * time.Millisecond) + + if opts == nil { + opts = &QueryOptions{} + } + + if opts.AllowStale { + data := "this is some stale data" + rm := &ResponseMetadata{LastIndex: 1, LastContact: 50 * time.Millisecond} + return data, rm, nil + } else { + data := "this is some fresh data" + rm := &ResponseMetadata{LastIndex: 1} + return data, rm, nil + } +} + +func (d *TestStale) CanShare() bool { + return true +} + +func (d *TestStale) HashCode() string { + return fmt.Sprintf("TestStale|%s", d.Name) +} + +func (d *TestStale) Display() string { return "fakedep" } + +func (d *TestStale) Stop() {} + +// TestFetchError is a special dependency that returns an error while fetching. +type TestFetchError struct { + Name string +} + +func (d *TestFetchError) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + time.Sleep(10 * time.Millisecond) + return nil, nil, fmt.Errorf("failed to contact server") +} + +func (d *TestFetchError) CanShare() bool { + return true +} + +func (d *TestFetchError) HashCode() string { + return fmt.Sprintf("TestFetchError|%s", d.Name) +} + +func (d *TestFetchError) Display() string { return "fakedep" } + +func (d *TestFetchError) Stop() {} + +// TestRetry is a special dependency that errors on the first fetch and +// succeeds on subsequent fetches. +type TestRetry struct { + sync.Mutex + Name string + retried bool +} + +func (d *TestRetry) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + time.Sleep(10 * time.Millisecond) + + d.Lock() + defer d.Unlock() + + if d.retried { + data := "this is some data" + rm := &ResponseMetadata{LastIndex: 1} + return data, rm, nil + } else { + d.retried = true + return nil, nil, fmt.Errorf("failed to contact server (try again)") + } +} + +func (d *TestRetry) CanShare() bool { + return true +} + +func (d *TestRetry) HashCode() string { + return fmt.Sprintf("TestRetry|%s", d.Name) +} + +func (d *TestRetry) Display() string { return "fakedep" } + +func (d *TestRetry) Stop() {} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_secret.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_secret.go new file mode 100644 index 00000000000..9c024e1bd7a --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_secret.go @@ -0,0 +1,197 @@ +package dependency + +import ( + "fmt" + "log" + "strings" + "sync" + "time" + + vaultapi "github.com/hashicorp/vault/api" +) + +// Secret is a vault secret. +type Secret struct { + LeaseID string + LeaseDuration int + Renewable bool + + // Data is the actual contents of the secret. The format of the data + // is arbitrary and up to the secret backend. + Data map[string]interface{} +} + +// VaultSecret is the dependency to Vault for a secret +type VaultSecret struct { + sync.Mutex + + Path string + data map[string]interface{} + secret *Secret + + stopped bool + stopCh chan struct{} +} + +// Fetch queries the Vault API +func (d *VaultSecret) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + d.Lock() + if d.stopped { + defer d.Unlock() + return nil, nil, ErrStopped + } + d.Unlock() + + if opts == nil { + opts = &QueryOptions{} + } + + log.Printf("[DEBUG] (%s) querying vault with %+v", d.Display(), opts) + + // If this is not the first query and we have a lease duration, sleep until we + // try to renew. + if opts.WaitIndex != 0 && d.secret != nil && d.secret.LeaseDuration != 0 { + duration := time.Duration(d.secret.LeaseDuration/2.0) * time.Second + log.Printf("[DEBUG] (%s) pretending to long-poll for %q", + d.Display(), duration) + select { + case <-d.stopCh: + log.Printf("[DEBUG] (%s) received interrupt", d.Display()) + return nil, nil, ErrStopped + case <-time.After(duration): + } + } + + // Grab the vault client + vault, err := clients.Vault() + if err != nil { + return nil, nil, ErrWithExitf("vault secret: %s", err) + } + + // Attempt to renew the secret. If we do not have a secret or if that secret + // is not renewable, we will attempt a (re-)read later. + if d.secret != nil && d.secret.LeaseID != "" && d.secret.Renewable { + renewal, err := vault.Sys().Renew(d.secret.LeaseID, 0) + if err == nil { + log.Printf("[DEBUG] (%s) successfully renewed", d.Display()) + + log.Printf("[DEBUG] (%s) %#v", d.Display(), renewal) + + secret := &Secret{ + LeaseID: renewal.LeaseID, + LeaseDuration: d.secret.LeaseDuration, + Renewable: renewal.Renewable, + Data: d.secret.Data, + } + + d.Lock() + d.secret = secret + d.Unlock() + + return respWithMetadata(secret) + } + + // The renewal failed for some reason. + log.Printf("[WARN] (%s) failed to renew, re-obtaining: %s", d.Display(), err) + } + + // If we got this far, we either didn't have a secret to renew, the secret was + // not renewable, or the renewal failed, so attempt a fresh read. + var vaultSecret *vaultapi.Secret + if len(d.data) == 0 { + vaultSecret, err = vault.Logical().Read(d.Path) + } else { + vaultSecret, err = vault.Logical().Write(d.Path, d.data) + } + if err != nil { + return nil, nil, ErrWithExitf("error obtaining from vault: %s", err) + } + + // The secret could be nil (maybe it does not exist yet). This is not an error + // to Vault, but it is an error to Consul Template, so return an error + // instead. + if vaultSecret == nil { + return nil, nil, fmt.Errorf("no secret exists at path %q", d.Display()) + } + + // Create our cloned secret + secret := &Secret{ + LeaseID: vaultSecret.LeaseID, + LeaseDuration: leaseDurationOrDefault(vaultSecret.LeaseDuration), + Renewable: vaultSecret.Renewable, + Data: vaultSecret.Data, + } + + d.Lock() + d.secret = secret + d.Unlock() + + log.Printf("[DEBUG] (%s) vault returned the secret", d.Display()) + + return respWithMetadata(secret) +} + +// CanShare returns if this dependency is shareable. +func (d *VaultSecret) CanShare() bool { + return false +} + +// HashCode returns the hash code for this dependency. +func (d *VaultSecret) HashCode() string { + return fmt.Sprintf("VaultSecret|%s", d.Path) +} + +// Display returns a string that should be displayed to the user in output (for +// example). +func (d *VaultSecret) Display() string { + return fmt.Sprintf(`"secret(%s)"`, d.Path) +} + +// Stop halts the given dependency's fetch. +func (d *VaultSecret) Stop() { + d.Lock() + defer d.Unlock() + + if !d.stopped { + close(d.stopCh) + d.stopped = true + } +} + +// ParseVaultSecret creates a new datacenter dependency. +func ParseVaultSecret(s ...string) (*VaultSecret, error) { + if len(s) == 0 { + return nil, fmt.Errorf("expected 1 or more arguments, got %d", len(s)) + } + + path, rest := s[0], s[1:len(s)] + + if len(path) == 0 { + return nil, fmt.Errorf("vault path must be at least one character") + } + + data := make(map[string]interface{}) + for _, str := range rest { + parts := strings.SplitN(str, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid value %q - must be key=value", str) + } + + k, v := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + data[k] = v + } + + vs := &VaultSecret{ + Path: path, + data: data, + stopCh: make(chan struct{}), + } + return vs, nil +} + +func leaseDurationOrDefault(d int) int { + if d == 0 { + return 5 * 60 + } + return d +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_secrets.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_secrets.go new file mode 100644 index 00000000000..b4173351e44 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_secrets.go @@ -0,0 +1,134 @@ +package dependency + +import ( + "fmt" + "log" + "sort" + "sync" + "time" +) + +// VaultSecrets is the dependency to list secrets in Vault. +type VaultSecrets struct { + sync.Mutex + + Path string + + stopped bool + stopCh chan struct{} +} + +// Fetch queries the Vault API +func (d *VaultSecrets) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + d.Lock() + if d.stopped { + defer d.Unlock() + return nil, nil, ErrStopped + } + d.Unlock() + + if opts == nil { + opts = &QueryOptions{} + } + + log.Printf("[DEBUG] (%s) querying vault with %+v", d.Display(), opts) + + // If this is not the first query and we have a lease duration, sleep until we + // try to renew. + if opts.WaitIndex != 0 { + log.Printf("[DEBUG] (%s) pretending to long-poll", d.Display()) + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-time.After(sleepTime): + } + } + + // Grab the vault client + vault, err := clients.Vault() + if err != nil { + return nil, nil, ErrWithExitf("vault secrets: %s", err) + } + + // Get the list as a secret + vaultSecret, err := vault.Logical().List(d.Path) + if err != nil { + return nil, nil, ErrWithExitf("error listing secrets from vault: %s", err) + } + + // If the secret or data data is nil, return an empty list of strings. + if vaultSecret == nil || vaultSecret.Data == nil { + return respWithMetadata(make([]string, 0)) + } + + // If there are no keys at that path, return the empty list. + keys, ok := vaultSecret.Data["keys"] + if !ok { + return respWithMetadata(make([]string, 0)) + } + + // Convert the interface into a list of interfaces. + list, ok := keys.([]interface{}) + if !ok { + return nil, nil, ErrWithExitf("vault returned an unexpected payload for %q", d.Display()) + } + + // Pull each item out of the list and safely cast to a string. + result := make([]string, len(list)) + for i, v := range list { + typed, ok := v.(string) + if !ok { + return nil, nil, ErrWithExitf("vault returned a non-string when listing secrets for %q", d.Display()) + } + result[i] = typed + } + sort.Strings(result) + + log.Printf("[DEBUG] (%s) vault listed %d secrets(s)", d.Display(), len(result)) + + return respWithMetadata(result) +} + +// CanShare returns if this dependency is shareable. +func (d *VaultSecrets) CanShare() bool { + return false +} + +// HashCode returns the hash code for this dependency. +func (d *VaultSecrets) HashCode() string { + return fmt.Sprintf("VaultSecrets|%s", d.Path) +} + +// Display returns a string that should be displayed to the user in output (for +// example). +func (d *VaultSecrets) Display() string { + return fmt.Sprintf(`"secrets(%s)"`, d.Path) +} + +// Stop halts the dependency's fetch function. +func (d *VaultSecrets) Stop() { + d.Lock() + defer d.Unlock() + + if !d.stopped { + close(d.stopCh) + d.stopped = true + } +} + +// ParseVaultSecrets creates a new datacenter dependency. +func ParseVaultSecrets(s string) (*VaultSecrets, error) { + // Ensure a trailing slash, always. + if len(s) == 0 { + s = "/" + } + if s[len(s)-1] != '/' { + s = fmt.Sprintf("%s/", s) + } + + vs := &VaultSecrets{ + Path: s, + stopCh: make(chan struct{}), + } + return vs, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go new file mode 100644 index 00000000000..7c1ca20a369 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go @@ -0,0 +1,119 @@ +package dependency + +import ( + "log" + "sync" + "time" +) + +// VaultToken is the dependency to Vault for a secret +type VaultToken struct { + sync.Mutex + + leaseID string + leaseDuration int + + stopped bool + stopCh chan struct{} +} + +// Fetch queries the Vault API +func (d *VaultToken) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + d.Lock() + if d.stopped { + defer d.Unlock() + return nil, nil, ErrStopped + } + d.Unlock() + + if opts == nil { + opts = &QueryOptions{} + } + + log.Printf("[DEBUG] (%s) renewing vault token", d.Display()) + + // If this is not the first query and we have a lease duration, sleep until we + // try to renew. + if opts.WaitIndex != 0 && d.leaseDuration != 0 { + duration := time.Duration(d.leaseDuration/2.0) * time.Second + + if duration < 1*time.Second { + log.Printf("[DEBUG] (%s) increasing sleep to 1s (was %q)", + d.Display(), duration) + duration = 1 * time.Second + } + + log.Printf("[DEBUG] (%s) sleeping for %q", d.Display(), duration) + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-time.After(duration): + } + } + + // Grab the vault client + vault, err := clients.Vault() + if err != nil { + return nil, nil, ErrWithExitf("vault_token: %s", err) + } + + token, err := vault.Auth().Token().RenewSelf(0) + if err != nil { + return nil, nil, ErrWithExitf("error renewing vault token: %s", err) + } + + // Create our cloned secret + secret := &Secret{ + LeaseID: token.LeaseID, + LeaseDuration: token.Auth.LeaseDuration, + Renewable: token.Auth.Renewable, + Data: token.Data, + } + + leaseDuration := token.Auth.LeaseDuration + if leaseDuration == 0 { + log.Printf("[WARN] (%s) lease duration is 0, setting to 5s", d.Display()) + leaseDuration = 5 + } + + d.Lock() + d.leaseID = secret.LeaseID + d.leaseDuration = leaseDuration + d.Unlock() + + log.Printf("[DEBUG] (%s) successfully renewed token", d.Display()) + + return respWithMetadata(secret) +} + +// CanShare returns if this dependency is shareable. +func (d *VaultToken) CanShare() bool { + return false +} + +// HashCode returns the hash code for this dependency. +func (d *VaultToken) HashCode() string { + return "VaultToken" +} + +// Display returns a string that should be displayed to the user in output (for +// example). +func (d *VaultToken) Display() string { + return "vault_token" +} + +// Stop halts the dependency's fetch function. +func (d *VaultToken) Stop() { + d.Lock() + defer d.Unlock() + + if !d.stopped { + close(d.stopCh) + d.stopped = true + } +} + +// ParseVaultToken creates a new VaultToken dependency. +func ParseVaultToken() (*VaultToken, error) { + return &VaultToken{stopCh: make(chan struct{})}, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/manager/dedup.go b/vendor/github.com/hashicorp/consul-template/manager/dedup.go new file mode 100644 index 00000000000..dc79015d071 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/manager/dedup.go @@ -0,0 +1,453 @@ +package manager + +import ( + "bytes" + "compress/lzw" + "crypto/md5" + "encoding/gob" + "fmt" + "log" + "path" + "sync" + "time" + + "github.com/hashicorp/consul-template/config" + dep "github.com/hashicorp/consul-template/dependency" + "github.com/hashicorp/consul-template/template" + consulapi "github.com/hashicorp/consul/api" +) + +const ( + // sessionCreateRetry is the amount of time we wait + // to recreate a session when lost. + sessionCreateRetry = 15 * time.Second + + // lockRetry is the interval on which we try to re-acquire locks + lockRetry = 10 * time.Second + + // listRetry is the interval on which we retry listing a data path + listRetry = 10 * time.Second + + // templateDataFlag is added as a flag to the shared data values + // so that we can use it as a sanity check + templateDataFlag = 0x22b9a127a2c03520 +) + +// templateData is GOB encoded share the depdency values +type templateData struct { + Data map[string]interface{} +} + +// DedupManager is used to de-duplicate which instance of Consul-Template +// is handling each template. For each template, a lock path is determined +// using the MD5 of the template. This path is used to elect a "leader" +// instance. +// +// The leader instance operations like usual, but any time a template is +// rendered, any of the data required for rendering is stored in the +// Consul KV store under the lock path. +// +// The follower instances depend on the leader to do the primary watching +// and rendering, and instead only watch the aggregated data in the KV. +// Followers wait for updates and re-render the template. +// +// If a template depends on 50 views, and is running on 50 machines, that +// would normally require 2500 blocking queries. Using deduplication, one +// instance has 50 view queries, plus 50 additional queries on the lock +// path for a total of 100. +// +type DedupManager struct { + // config is the consul-template configuration + config *config.Config + + // clients is used to access the underlying clinets + clients *dep.ClientSet + + // Brain is where we inject udpates + brain *template.Brain + + // templates is the set of templates we are trying to dedup + templates []*template.Template + + // leader tracks if we are currently the leader + leader map[*template.Template]<-chan struct{} + leaderLock sync.RWMutex + + // lastWrite tracks the hash of the data paths + lastWrite map[*template.Template][]byte + lastWriteLock sync.RWMutex + + // updateCh is used to indicate an update watched data + updateCh chan struct{} + + // wg is used to wait for a clean shutdown + wg sync.WaitGroup + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewDedupManager creates a new Dedup manager +func NewDedupManager(config *config.Config, clients *dep.ClientSet, brain *template.Brain, templates []*template.Template) (*DedupManager, error) { + d := &DedupManager{ + config: config, + clients: clients, + brain: brain, + templates: templates, + leader: make(map[*template.Template]<-chan struct{}), + lastWrite: make(map[*template.Template][]byte), + updateCh: make(chan struct{}, 1), + stopCh: make(chan struct{}), + } + return d, nil +} + +// Start is used to start the de-duplication manager +func (d *DedupManager) Start() error { + log.Printf("[INFO] (dedup) starting de-duplication manager") + + client, err := d.clients.Consul() + if err != nil { + return err + } + go d.createSession(client) + + // Start to watch each template + for _, t := range d.templates { + go d.watchTemplate(client, t) + } + return nil +} + +// Stop is used to stop the de-duplication manager +func (d *DedupManager) Stop() error { + d.stopLock.Lock() + defer d.stopLock.Unlock() + if d.stop { + return nil + } + + log.Printf("[INFO] (dedup) stopping de-duplication manager") + d.stop = true + close(d.stopCh) + d.wg.Wait() + return nil +} + +// createSession is used to create and maintain a session to Consul +func (d *DedupManager) createSession(client *consulapi.Client) { +START: + log.Printf("[INFO] (dedup) attempting to create session") + session := client.Session() + sessionCh := make(chan struct{}) + ttl := fmt.Sprintf("%ds", d.config.Deduplicate.TTL/time.Second) + se := &consulapi.SessionEntry{ + Name: "Consul-Template de-duplication", + Behavior: "delete", + TTL: ttl, + } + id, _, err := session.Create(se, nil) + if err != nil { + log.Printf("[ERR] (dedup) failed to create session: %v", err) + goto WAIT + } + log.Printf("[INFO] (dedup) created session %s", id) + + // Attempt to lock each template + for _, t := range d.templates { + d.wg.Add(1) + go d.attemptLock(client, id, sessionCh, t) + } + + // Renew our session periodically + if err := session.RenewPeriodic("15s", id, nil, d.stopCh); err != nil { + log.Printf("[ERR] (dedup) failed to renew session: %v", err) + d.wg.Wait() + } + close(sessionCh) + +WAIT: + select { + case <-time.After(sessionCreateRetry): + goto START + case <-d.stopCh: + return + } +} + +// IsLeader checks if we are currently the leader instance +func (d *DedupManager) IsLeader(tmpl *template.Template) bool { + d.leaderLock.RLock() + defer d.leaderLock.RUnlock() + + lockCh, ok := d.leader[tmpl] + if !ok { + return false + } + select { + case <-lockCh: + return false + default: + return true + } +} + +// UpdateDeps is used to update the values of the dependencies for a template +func (d *DedupManager) UpdateDeps(t *template.Template, deps []dep.Dependency) error { + // Calculate the path to write updates to + dataPath := path.Join(d.config.Deduplicate.Prefix, t.HexMD5, "data") + + // Package up the dependency data + td := templateData{ + Data: make(map[string]interface{}), + } + for _, dp := range deps { + // Skip any dependencies that can't be shared + if !dp.CanShare() { + continue + } + + // Pull the current value from the brain + val, ok := d.brain.Recall(dp) + if ok { + td.Data[dp.HashCode()] = val + } + } + + // Encode via GOB and LZW compress + var buf bytes.Buffer + compress := lzw.NewWriter(&buf, lzw.LSB, 8) + enc := gob.NewEncoder(compress) + if err := enc.Encode(&td); err != nil { + return fmt.Errorf("encode failed: %v", err) + } + compress.Close() + + // Compute MD5 of the buffer + hash := md5.Sum(buf.Bytes()) + d.lastWriteLock.RLock() + existing, ok := d.lastWrite[t] + d.lastWriteLock.RUnlock() + if ok && bytes.Equal(existing, hash[:]) { + log.Printf("[INFO] (dedup) de-duplicate data '%s' already current", + dataPath) + return nil + } + + // Write the KV update + kvPair := consulapi.KVPair{ + Key: dataPath, + Value: buf.Bytes(), + Flags: templateDataFlag, + } + client, err := d.clients.Consul() + if err != nil { + return fmt.Errorf("failed to get consul client: %v", err) + } + if _, err := client.KV().Put(&kvPair, nil); err != nil { + return fmt.Errorf("failed to write '%s': %v", dataPath, err) + } + log.Printf("[INFO] (dedup) updated de-duplicate data '%s'", dataPath) + d.lastWriteLock.Lock() + d.lastWrite[t] = hash[:] + d.lastWriteLock.Unlock() + return nil +} + +// UpdateCh returns a channel to watch for depedency updates +func (d *DedupManager) UpdateCh() <-chan struct{} { + return d.updateCh +} + +// setLeader sets if we are currently the leader instance +func (d *DedupManager) setLeader(tmpl *template.Template, lockCh <-chan struct{}) { + // Update the lock state + d.leaderLock.Lock() + if lockCh != nil { + d.leader[tmpl] = lockCh + } else { + delete(d.leader, tmpl) + } + d.leaderLock.Unlock() + + // Clear the lastWrite hash if we've lost leadership + if lockCh == nil { + d.lastWriteLock.Lock() + delete(d.lastWrite, tmpl) + d.lastWriteLock.Unlock() + } + + // Do an async notify of an update + select { + case d.updateCh <- struct{}{}: + default: + } +} + +func (d *DedupManager) watchTemplate(client *consulapi.Client, t *template.Template) { + log.Printf("[INFO] (dedup) starting watch for template hash %s", t.HexMD5) + path := path.Join(d.config.Deduplicate.Prefix, t.HexMD5, "data") + + // Determine if stale queries are allowed + var allowStale bool + if d.config.MaxStale != 0 { + allowStale = true + } + + // Setup our query options + opts := &consulapi.QueryOptions{ + AllowStale: allowStale, + WaitTime: 60 * time.Second, + } + +START: + // Stop listening if we're stopped + select { + case <-d.stopCh: + return + default: + } + + // If we are current the leader, wait for leadership lost + d.leaderLock.RLock() + lockCh, ok := d.leader[t] + d.leaderLock.RUnlock() + if ok { + select { + case <-lockCh: + goto START + case <-d.stopCh: + return + } + } + + // Block for updates on the data key + log.Printf("[INFO] (dedup) listing data for template hash %s", t.HexMD5) + pair, meta, err := client.KV().Get(path, opts) + if err != nil { + log.Printf("[ERR] (dedup) failed to get '%s': %v", path, err) + select { + case <-time.After(listRetry): + goto START + case <-d.stopCh: + return + } + } + opts.WaitIndex = meta.LastIndex + + // If we've exceeded the maximum staleness, retry without stale + if allowStale && meta.LastContact > d.config.MaxStale { + allowStale = false + log.Printf("[DEBUG] (dedup) %s stale data (last contact exceeded max_stale)", path) + goto START + } + + // Re-enable stale queries if allowed + if d.config.MaxStale != 0 { + allowStale = true + } + + // Stop listening if we're stopped + select { + case <-d.stopCh: + return + default: + } + + // If we are current the leader, wait for leadership lost + d.leaderLock.RLock() + lockCh, ok = d.leader[t] + d.leaderLock.RUnlock() + if ok { + select { + case <-lockCh: + goto START + case <-d.stopCh: + return + } + } + + // Parse the data file + if pair != nil && pair.Flags == templateDataFlag { + d.parseData(pair.Key, pair.Value) + } + goto START +} + +// parseData is used to update brain from a KV data pair +func (d *DedupManager) parseData(path string, raw []byte) { + // Setup the decompression and decoders + r := bytes.NewReader(raw) + decompress := lzw.NewReader(r, lzw.LSB, 8) + defer decompress.Close() + dec := gob.NewDecoder(decompress) + + // Decode the data + var td templateData + if err := dec.Decode(&td); err != nil { + log.Printf("[ERR] (dedup) failed to decode '%s': %v", + path, err) + return + } + log.Printf("[INFO] (dedup) loading %d dependencies from '%s'", + len(td.Data), path) + + // Update the data in the brain + for hashCode, value := range td.Data { + d.brain.ForceSet(hashCode, value) + } + + // Trigger the updateCh + select { + case d.updateCh <- struct{}{}: + default: + } +} + +func (d *DedupManager) attemptLock(client *consulapi.Client, session string, sessionCh chan struct{}, t *template.Template) { + defer d.wg.Done() +START: + log.Printf("[INFO] (dedup) attempting lock for template hash %s", t.HexMD5) + basePath := path.Join(d.config.Deduplicate.Prefix, t.HexMD5) + lopts := &consulapi.LockOptions{ + Key: path.Join(basePath, "lock"), + Session: session, + MonitorRetries: 3, + MonitorRetryTime: 3 * time.Second, + } + lock, err := client.LockOpts(lopts) + if err != nil { + log.Printf("[ERR] (dedup) failed to create lock '%s': %v", + lopts.Key, err) + return + } + + var retryCh <-chan time.Time + leaderCh, err := lock.Lock(sessionCh) + if err != nil { + log.Printf("[ERR] (dedup) failed to acquire lock '%s': %v", + lopts.Key, err) + retryCh = time.After(lockRetry) + } else { + log.Printf("[INFO] (dedup) acquired lock '%s'", lopts.Key) + d.setLeader(t, leaderCh) + } + + select { + case <-retryCh: + retryCh = nil + goto START + case <-leaderCh: + log.Printf("[WARN] (dedup) lost lock ownership '%s'", lopts.Key) + d.setLeader(t, nil) + goto START + case <-sessionCh: + log.Printf("[INFO] (dedup) releasing lock '%s'", lopts.Key) + d.setLeader(t, nil) + lock.Unlock() + case <-d.stopCh: + log.Printf("[INFO] (dedup) releasing lock '%s'", lopts.Key) + lock.Unlock() + } +} diff --git a/vendor/github.com/hashicorp/consul-template/manager/errors.go b/vendor/github.com/hashicorp/consul-template/manager/errors.go new file mode 100644 index 00000000000..dbb84c36e43 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/manager/errors.go @@ -0,0 +1,31 @@ +package manager + +import "fmt" + +// ErrExitable is an interface that defines an integer ExitStatus() function. +type ErrExitable interface { + ExitStatus() int +} + +var _ error = new(ErrChildDied) +var _ ErrExitable = new(ErrChildDied) + +// ErrChildDied is the error returned when the child process prematurely dies. +type ErrChildDied struct { + code int +} + +// NewErrChildDied creates a new error with the given exit code. +func NewErrChildDied(c int) *ErrChildDied { + return &ErrChildDied{code: c} +} + +// Error implements the error interface. +func (e *ErrChildDied) Error() string { + return fmt.Sprintf("child process died with exit code %d", e.code) +} + +// ExitStatus implements the ErrExitable interface. +func (e *ErrChildDied) ExitStatus() int { + return e.code +} diff --git a/vendor/github.com/hashicorp/consul-template/manager/runner.go b/vendor/github.com/hashicorp/consul-template/manager/runner.go new file mode 100644 index 00000000000..ddb842ca3e1 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/manager/runner.go @@ -0,0 +1,1203 @@ +package manager + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/hashicorp/consul-template/child" + "github.com/hashicorp/consul-template/config" + dep "github.com/hashicorp/consul-template/dependency" + "github.com/hashicorp/consul-template/template" + "github.com/hashicorp/consul-template/watch" + "github.com/hashicorp/go-multierror" + "github.com/mattn/go-shellwords" +) + +const ( + // saneViewLimit is the number of views that we consider "sane" before we + // warn the user that they might be DDoSing their Consul cluster. + saneViewLimit = 128 +) + +// Runner responsible rendering Templates and invoking Commands. +type Runner struct { + // ErrCh and DoneCh are channels where errors and finish notifications occur. + ErrCh chan error + DoneCh chan struct{} + + // config is the Config that created this Runner. It is used internally to + // construct other objects and pass data. + config *config.Config + + // dry signals that output should be sent to stdout instead of committed to + // disk. once indicates the runner should execute each template exactly one + // time and then stop. + dry, once bool + + // outStream and errStream are the io.Writer streams where the runner will + // write information. These streams can be set using the SetOutStream() + // and SetErrStream() functions. + + // inStream is the ioReader where the runner will read information. + outStream, errStream io.Writer + inStream io.Reader + + // ctemplatesMap is a map of each template ID to the ConfigTemplates + // that made it. + ctemplatesMap map[string][]*config.ConfigTemplate + + // templates is the list of calculated templates. + templates []*template.Template + + // renderEvents is a mapping of a template ID to the render event. + renderEvents map[string]*RenderEvent + + // renderEventLock protects access into the renderEvents map + renderEventsLock sync.RWMutex + + // renderedCh is used to signal that a template has been rendered + renderedCh chan struct{} + + // dependencies is the list of dependencies this runner is watching. + dependencies map[string]dep.Dependency + + // watcher is the watcher this runner is using. + watcher *watch.Watcher + + // brain is the internal storage database of returned dependency data. + brain *template.Brain + + // child is the child process under management. This may be nil if not running + // in exec mode. + child *child.Child + + // childLock is the internal lock around the child process. + childLock sync.RWMutex + + // quiescenceMap is the map of templates to their quiescence timers. + // quiescenceCh is the channel where templates report returns from quiescence + // fires. + quiescenceMap map[string]*quiescence + quiescenceCh chan *template.Template + + // dedup is the deduplication manager if enabled + dedup *DedupManager +} + +// RenderEvent captures the time and events that occurred for a template +// rendering. +type RenderEvent struct { + // LastWouldRender marks the last time the template would have rendered. + LastWouldRender time.Time + + // LastDidRender marks the last time the template was written to disk. + LastDidRender time.Time +} + +// NewRunner accepts a slice of ConfigTemplates and returns a pointer to the new +// Runner and any error that occurred during creation. +func NewRunner(config *config.Config, dry, once bool) (*Runner, error) { + log.Printf("[INFO] (runner) creating new runner (dry: %v, once: %v)", dry, once) + + runner := &Runner{ + config: config, + dry: dry, + once: once, + } + + if err := runner.init(); err != nil { + return nil, err + } + + return runner, nil +} + +// Start begins the polling for this runner. Any errors that occur will cause +// this function to push an item onto the runner's error channel and the halt +// execution. This function is blocking and should be called as a goroutine. +func (r *Runner) Start() { + log.Printf("[INFO] (runner) starting") + + // Create the pid before doing anything. + if err := r.storePid(); err != nil { + r.ErrCh <- err + return + } + + // Start the de-duplication manager + var dedupCh <-chan struct{} + if r.dedup != nil { + if err := r.dedup.Start(); err != nil { + r.ErrCh <- err + return + } + dedupCh = r.dedup.UpdateCh() + } + + // Setup the child process exit channel + var childExitCh <-chan int + + // Fire an initial run to parse all the templates and setup the first-pass + // dependencies. This also forces any templates that have no dependencies to + // be rendered immediately (since they are already renderable). + log.Printf("[DEBUG] (runner) running initial templates") + if err := r.Run(); err != nil { + r.ErrCh <- err + return + } + + for { + // Enable quiescence for all templates if we have specified wait + // intervals. + NEXT_Q: + for _, t := range r.templates { + if _, ok := r.quiescenceMap[t.ID()]; ok { + continue NEXT_Q + } + + for _, c := range r.configTemplatesFor(t) { + if c.Wait.IsActive() { + log.Printf("[DEBUG] (runner) enabling template-specific quiescence for %q", t.ID()) + r.quiescenceMap[t.ID()] = newQuiescence( + r.quiescenceCh, c.Wait.Min, c.Wait.Max, t) + continue NEXT_Q + } + } + + if r.config.Wait.IsActive() { + log.Printf("[DEBUG] (runner) enabling global quiescence for %q", t.ID()) + r.quiescenceMap[t.ID()] = newQuiescence( + r.quiescenceCh, r.config.Wait.Min, r.config.Wait.Max, t) + continue NEXT_Q + } + } + + // Warn the user if they are watching too many dependencies. + if r.watcher.Size() > saneViewLimit { + log.Printf("[WARN] (runner) watching %d dependencies - watching this "+ + "many dependencies could DDoS your consul cluster", r.watcher.Size()) + } else { + log.Printf("[INFO] (runner) watching %d dependencies", r.watcher.Size()) + } + + if r.allTemplatesRendered() { + // If an exec command was given and a command is not currently running, + // spawn the child process for supervision. + if r.config.Exec.Command != "" { + if r.child == nil { + if err := r.spawnChild(); err != nil { + r.ErrCh <- err + return + } + } + + // It's possible that we didn't start a process, in which case no + // channel is returned. If we did get a new exitCh, that means a child + // was spawned, so we need to watch a new exitCh. It is also possible + // that during a run, the child process was restarted, which means a + // new exit channel should be used. + nexitCh := r.child.ExitCh() + if nexitCh != nil { + childExitCh = nexitCh + } + } + + // If we are running in once mode and all our templates are rendered, + // then we should exit here. + if r.once { + log.Printf("[INFO] (runner) once mode and all templates rendered") + + if r.child != nil { + r.stopDedup() + r.stopWatcher() + + log.Printf("[INFO] (runner) waiting for child process to exit") + select { + case c := <-childExitCh: + log.Printf("[INFO] (runner) child process died") + r.ErrCh <- NewErrChildDied(c) + return + case <-r.DoneCh: + } + } + + r.Stop() + return + } + } + + OUTER: + select { + case view := <-r.watcher.DataCh: + // Receive this update + r.Receive(view.Dependency, view.Data()) + + // Drain all dependency data. Given a large number of dependencies, it is + // feasible that we have data for more than one of them. Instead of + // wasting CPU cycles rendering templates when we have more dependencies + // waiting to be added to the brain, we drain the entire buffered channel + // on the watcher and then reports when it is done receiving new data + // which the parent select listens for. + // + // Please see https://github.com/hashicorp/consul-template/issues/168 for + // more information about this optimization and the entire backstory. + for { + select { + case view := <-r.watcher.DataCh: + r.Receive(view.Dependency, view.Data()) + default: + break OUTER + } + } + + case <-dedupCh: + // We may get triggered by the de-duplication manager for either a change + // in leadership (acquired or lost lock), or an update of data for a template + // that we are watching. + log.Printf("[INFO] (runner) watcher triggered by de-duplication manager") + break OUTER + + case err := <-r.watcher.ErrCh: + // If this is our own internal error, see if we should hard exit. + if derr, ok := err.(*dep.FetchError); ok { + log.Printf("[DEBUG] (runner) detected custom error type") + if derr.ShouldExit() { + log.Printf("[DEBUG] (runner) custom error asked for hard exit") + r.ErrCh <- derr.OriginalError() + return + } + } + + // Intentionally do not send the error back up to the runner. Eventually, + // once Consul API implements errwrap and multierror, we can check the + // "type" of error and conditionally alert back. + // + // if err.Contains(Something) { + // errCh <- err + // } + log.Printf("[ERR] (runner) watcher reported error: %s", err) + if r.once { + r.ErrCh <- err + return + } + + case tmpl := <-r.quiescenceCh: + // Remove the quiescence for this template from the map. This will force + // the upcoming Run call to actually evaluate and render the template. + log.Printf("[INFO] (runner) received template %q from quiescence", tmpl.ID()) + delete(r.quiescenceMap, tmpl.ID()) + + case c := <-childExitCh: + log.Printf("[INFO] (runner) child process died") + r.ErrCh <- NewErrChildDied(c) + return + + case <-r.DoneCh: + log.Printf("[INFO] (runner) received finish") + return + } + + // If we got this far, that means we got new data or one of the timers fired, + // so attempt to re-render. + if err := r.Run(); err != nil { + r.ErrCh <- err + return + } + } +} + +// Stop halts the execution of this runner and its subprocesses. +func (r *Runner) Stop() { + log.Printf("[INFO] (runner) stopping") + r.stopDedup() + r.stopWatcher() + r.stopChild() + + if err := r.deletePid(); err != nil { + log.Printf("[WARN] (runner) could not remove pid at %q: %s", + r.config.PidFile, err) + } + + close(r.DoneCh) +} + +// TemplateRenderedCh returns a channel that will return the path of the +// template when it is rendered. +func (r *Runner) TemplateRenderedCh() <-chan struct{} { + return r.renderedCh +} + +// RenderEvents returns the render events for each template was rendered. The +// map is keyed by template ID. +func (r *Runner) RenderEvents() map[string]*RenderEvent { + r.renderEventsLock.RLock() + defer r.renderEventsLock.RUnlock() + + times := make(map[string]*RenderEvent, len(r.renderEvents)) + for k, v := range r.renderEvents { + times[k] = v + } + return times +} + +func (r *Runner) stopDedup() { + if r.dedup != nil { + log.Printf("[DEBUG] (runner) stopping de-duplication manager") + r.dedup.Stop() + } else { + log.Printf("[DEBUG] (runner) de-duplication manager is not running") + } +} + +func (r *Runner) stopWatcher() { + if r.watcher != nil { + log.Printf("[DEBUG] (runner) stopping watcher") + r.watcher.Stop() + } else { + log.Printf("[DEBUG] (runner) watcher is not running") + } +} + +func (r *Runner) stopChild() { + r.childLock.RLock() + defer r.childLock.RUnlock() + + if r.child != nil { + log.Printf("[DEBUG] (runner) stopping child process") + r.child.Stop() + } else { + log.Printf("[DEBUG] (runner) child is not running") + } +} + +// Receive accepts a Dependency and data for that dep. This data is +// cached on the Runner. This data is then used to determine if a Template +// is "renderable" (i.e. all its Dependencies have been downloaded at least +// once). +func (r *Runner) Receive(d dep.Dependency, data interface{}) { + // Just because we received data, it does not mean that we are actually + // watching for that data. How is that possible you may ask? Well, this + // Runner's data channel is pooled, meaning it accepts multiple data views + // before actually blocking. Whilest this runner is performing a Run() and + // executing diffs, it may be possible that more data was pushed onto the + // data channel pool for a dependency that we no longer care about. + // + // Accepting this dependency would introduce stale data into the brain, and + // that is simply unacceptable. In fact, it is a fun little bug: + // + // https://github.com/hashicorp/consul-template/issues/198 + // + // and by "little" bug, I mean really big bug. + if _, ok := r.dependencies[d.HashCode()]; ok { + log.Printf("[DEBUG] (runner) receiving dependency %s", d.Display()) + r.brain.Remember(d, data) + } +} + +// Signal sends a signal to the child process, if it exists. Any errors that +// occur are returned. +func (r *Runner) Signal(s os.Signal) error { + r.childLock.RLock() + defer r.childLock.RUnlock() + if r.child == nil { + return nil + } + return r.child.Signal(s) +} + +// Run iterates over each template in this Runner and conditionally executes +// the template rendering and command execution. +// +// The template is rendered atomicly. If and only if the template render +// completes successfully, the optional commands will be executed, if given. +// Please note that all templates are rendered **and then** any commands are +// executed. +func (r *Runner) Run() error { + log.Printf("[INFO] (runner) running") + + var renderedAny bool + var commands []*config.ConfigTemplate + depsMap := make(map[string]dep.Dependency) + + for _, tmpl := range r.templates { + log.Printf("[DEBUG] (runner) checking template %s", tmpl.ID()) + + // Check if we are currently the leader instance + isLeader := true + if r.dedup != nil { + isLeader = r.dedup.IsLeader(tmpl) + } + + // If we are in once mode and this template was already rendered, move + // onto the next one. We do not want to re-render the template if we are + // in once mode, and we certainly do not want to re-run any commands. + if r.once { + r.renderEventsLock.RLock() + _, rendered := r.renderEvents[tmpl.ID()] + r.renderEventsLock.RUnlock() + if rendered { + log.Printf("[DEBUG] (runner) once mode and already rendered") + continue + } + } + + // Attempt to render the template, returning any missing dependencies and + // the rendered contents. If there are any missing dependencies, the + // contents cannot be rendered or trusted! + used, missing, contents, err := tmpl.Execute(r.brain) + if err != nil { + return err + } + + // Add the dependency to the list of dependencies for this runner. + for _, d := range used { + // If we've taken over leadership for a template, we may have data + // that is cached, but not have the watcher. We must treat this as + // missing so that we create the watcher and re-run the template. + if isLeader && !r.watcher.Watching(d) { + missing = append(missing, d) + } + if _, ok := depsMap[d.HashCode()]; !ok { + depsMap[d.HashCode()] = d + } + } + + // Diff any missing dependencies the template reported with dependencies + // the watcher is watching. + var unwatched []dep.Dependency + for _, d := range missing { + if !r.watcher.Watching(d) { + unwatched = append(unwatched, d) + } + } + + // If there are unwatched dependencies, start the watcher and move onto the + // next one. + if len(unwatched) > 0 { + log.Printf("[INFO] (runner) was not watching %d dependencies", len(unwatched)) + for _, d := range unwatched { + // If we are deduplicating, we must still handle non-sharable + // dependencies, since those will be ignored. + if isLeader || !d.CanShare() { + r.watcher.Add(d) + } + } + continue + } + + // If the template is missing data for some dependencies then we are not + // ready to render and need to move on to the next one. + if len(missing) > 0 { + log.Printf("[INFO] (runner) missing data for %d dependencies", len(missing)) + continue + } + + // Trigger an update of the de-duplicaiton manager + if r.dedup != nil && isLeader { + if err := r.dedup.UpdateDeps(tmpl, used); err != nil { + log.Printf("[ERR] (runner) failed to update dependency data for de-duplication: %v", err) + } + } + + // If quiescence is activated, start/update the timers and loop back around. + // We do not want to render the templates yet. + if q, ok := r.quiescenceMap[tmpl.ID()]; ok { + q.tick() + continue + } + + // For each configuration template that is tied to this template, attempt to + // render it to disk and accumulate commands for later use. + for _, ctemplate := range r.configTemplatesFor(tmpl) { + log.Printf("[DEBUG] (runner) checking ctemplate %+v", ctemplate) + + // Render the template, taking dry mode into account + wouldRender, didRender, err := r.render(contents, ctemplate.Destination, ctemplate.Perms, ctemplate.Backup) + if err != nil { + log.Printf("[DEBUG] (runner) error rendering %s", tmpl.ID()) + return err + } + + log.Printf("[DEBUG] (runner) wouldRender: %t, didRender: %t", wouldRender, didRender) + + // If we would have rendered this template (but we did not because the + // contents were the same or something), we should consider this template + // rendered even though the contents on disk have not been updated. We + // will not fire commands unless the template was _actually_ rendered to + // disk though. + if wouldRender { + // Make a note that we have rendered this template (required for once + // mode and just generally nice for debugging purposes). + r.markRenderTime(tmpl.ID(), false) + } + + // If we _actually_ rendered the template to disk, we want to run the + // appropriate commands. + if didRender { + // Record that at least one template was rendered. + renderedAny = true + + // Store the render time + r.markRenderTime(tmpl.ID(), true) + + if !r.dry { + // If the template was rendered (changed) and we are not in dry-run mode, + // aggregate commands, ignoring previously known commands + // + // Future-self Q&A: Why not use a map for the commands instead of an + // array with an expensive lookup option? Well I'm glad you asked that + // future-self! One of the API promises is that commands are executed + // in the order in which they are provided in the ConfigTemplate + // definitions. If we inserted commands into a map, we would lose that + // relative ordering and people would be unhappy. + if ctemplate.Command != "" && !commandExists(ctemplate, commands) { + log.Printf("[DEBUG] (runner) appending command: %s", ctemplate.Command) + commands = append(commands, ctemplate) + } + } + } + } + } + + // Check if we need to deliver any rendered signals + if renderedAny { + // Send the signal that a template got rendered + select { + case r.renderedCh <- struct{}{}: + default: + } + } + + // Perform the diff and update the known dependencies. + r.diffAndUpdateDeps(depsMap) + + // Execute each command in sequence, collecting any errors that occur - this + // ensures all commands execute at least once. + var errs []error + for _, t := range commands { + log.Printf("[DEBUG] (runner) running command: `%s`, timeout: %s", + t.Command, t.CommandTimeout) + if err := r.execute(t.Command, t.CommandTimeout); err != nil { + log.Printf("[ERR] (runner) error running command: %s", err) + errs = append(errs, err) + } + } + + // If we got this far and have a child process, we need to send the reload + // signal to the child process. + if renderedAny && r.child != nil { + r.childLock.RLock() + if err := r.child.Reload(); err != nil { + errs = append(errs, err) + } + r.childLock.RUnlock() + } + + // If any errors were returned, convert them to an ErrorList for human + // readability. + if len(errs) != 0 { + var result *multierror.Error + for _, err := range errs { + result = multierror.Append(result, err) + } + return result.ErrorOrNil() + } + + return nil +} + +// init() creates the Runner's underlying data structures and returns an error +// if any problems occur. +func (r *Runner) init() error { + // Ensure we have default vaults + conf := config.DefaultConfig() + conf.Merge(r.config) + r.config = conf + + // Print the final config for debugging + result, err := json.MarshalIndent(r.config, "", " ") + if err != nil { + return err + } + log.Printf("[DEBUG] (runner) final config (tokens suppressed):\n\n%s\n\n", + result) + + // Create the clientset + clients, err := newClientSet(r.config) + if err != nil { + return fmt.Errorf("runner: %s", err) + } + + // Create the watcher + watcher, err := newWatcher(r.config, clients, r.once) + if err != nil { + return fmt.Errorf("runner: %s", err) + } + r.watcher = watcher + + numTemplates := len(r.config.ConfigTemplates) + templates := make([]*template.Template, 0, numTemplates) + ctemplatesMap := make(map[string][]*config.ConfigTemplate) + + // Iterate over each ConfigTemplate, creating a new Template resource for each + // entry. Templates are parsed and saved, and a map of templates to their + // config templates is kept so templates can lookup their commands and output + // destinations. + for _, ctmpl := range r.config.ConfigTemplates { + tmpl, err := template.NewTemplate(ctmpl.Source, ctmpl.EmbeddedTemplate, ctmpl.LeftDelim, ctmpl.RightDelim) + if err != nil { + return err + } + + if _, ok := ctemplatesMap[tmpl.ID()]; !ok { + templates = append(templates, tmpl) + } + + if _, ok := ctemplatesMap[tmpl.ID()]; !ok { + ctemplatesMap[tmpl.ID()] = make([]*config.ConfigTemplate, 0, 1) + } + ctemplatesMap[tmpl.ID()] = append(ctemplatesMap[tmpl.ID()], ctmpl) + } + + // Convert the map of templates (which was only used to ensure uniqueness) + // back into an array of templates. + r.templates = templates + + r.renderEvents = make(map[string]*RenderEvent, numTemplates) + r.dependencies = make(map[string]dep.Dependency) + + r.renderedCh = make(chan struct{}, 1) + + r.ctemplatesMap = ctemplatesMap + r.inStream = os.Stdin + r.outStream = os.Stdout + r.errStream = os.Stderr + r.brain = template.NewBrain() + + r.ErrCh = make(chan error) + r.DoneCh = make(chan struct{}) + + r.quiescenceMap = make(map[string]*quiescence) + r.quiescenceCh = make(chan *template.Template) + + // Setup the dedup manager if needed. This is + if r.config.Deduplicate.Enabled { + if r.once { + log.Printf("[INFO] (runner) disabling de-duplication in once mode") + } else { + r.dedup, err = NewDedupManager(r.config, clients, r.brain, r.templates) + if err != nil { + return err + } + } + } + + return nil +} + +// diffAndUpdateDeps iterates through the current map of dependencies on this +// runner and stops the watcher for any deps that are no longer required. +// +// At the end of this function, the given depsMap is converted to a slice and +// stored on the runner. +func (r *Runner) diffAndUpdateDeps(depsMap map[string]dep.Dependency) { + // Diff and up the list of dependencies, stopping any unneeded watchers. + log.Printf("[INFO] (runner) diffing and updating dependencies") + + for key, d := range r.dependencies { + if _, ok := depsMap[key]; !ok { + log.Printf("[DEBUG] (runner) %s is no longer needed", d.Display()) + r.watcher.Remove(d) + r.brain.Forget(d) + } else { + log.Printf("[DEBUG] (runner) %s is still needed", d.Display()) + } + } + + r.dependencies = depsMap +} + +// ConfigTemplateFor returns the ConfigTemplate for the given Template +func (r *Runner) configTemplatesFor(tmpl *template.Template) []*config.ConfigTemplate { + return r.ctemplatesMap[tmpl.ID()] +} + +// ConfigTemplateMapping returns a mapping between the template ID and the set +// of ConfigTemplate represented by the template ID +func (r *Runner) ConfigTemplateMapping() map[string][]config.ConfigTemplate { + m := make(map[string][]config.ConfigTemplate, len(r.ctemplatesMap)) + + for id, set := range r.ctemplatesMap { + ctmpls := make([]config.ConfigTemplate, len(set)) + m[id] = ctmpls + for i, ctmpl := range set { + ctmpls[i] = *ctmpl + } + } + + return m +} + +// allTemplatesRendered returns true if all the templates in this Runner have +// been rendered at least one time. +func (r *Runner) allTemplatesRendered() bool { + r.renderEventsLock.RLock() + defer r.renderEventsLock.RUnlock() + + for _, tmpl := range r.templates { + if _, rendered := r.renderEvents[tmpl.ID()]; !rendered { + return false + } + } + + return true +} + +// markRenderTime stores the render time for the given template. If didRender is +// true, it stores the time for the template having been rendered, otherwise it +// stores it as would have been rendered. +func (r *Runner) markRenderTime(tmplID string, didRender bool) { + r.renderEventsLock.Lock() + defer r.renderEventsLock.Unlock() + + // Get the current time + now := time.Now() + + // Create the event for the template ID if it is the first time + event, ok := r.renderEvents[tmplID] + if !ok { + event = &RenderEvent{} + r.renderEvents[tmplID] = event + } + + if didRender { + event.LastDidRender = now + } else { + event.LastWouldRender = now + } +} + +// Render accepts a Template and a destination on disk. The first return +// parameter is a boolean that indicates if the template would have been +// rendered. Since this function is idempotent (meaning it does not write the +// template if the contents are the same), it is possible that a template is +// renderable, but never actually rendered because the contents are already +// present on disk in the correct state. In this situation, we want to inform +// the parent that the template would have been rendered, but was not. The +// second return value indicates if the template was actually committed to disk. +// By the associative property, if the second return value is true, the first +// return value must also be true (but not necessarily the other direction). The +// second return value indicates whether the caller should take action given a +// template on disk has changed. +// +// No template exists on disk: true, true, nil +// Template exists, but contents are different: true, true, nil +// Template exists, but contents are the same: true, false, nil +func (r *Runner) render(contents []byte, dest string, perms os.FileMode, backup bool) (bool, bool, error) { + existingContents, err := ioutil.ReadFile(dest) + if err != nil && !os.IsNotExist(err) { + return false, false, err + } + + if bytes.Equal(contents, existingContents) { + return true, false, nil + } + + if r.dry { + fmt.Fprintf(r.outStream, "> %s\n%s", dest, contents) + } else { + if err := atomicWrite(dest, contents, perms, backup); err != nil { + return false, false, err + } + } + + return true, true, nil +} + +// execute accepts a command string and runs that command string on the current +// system. +func (r *Runner) execute(command string, timeout time.Duration) error { + var shell, flag string + if runtime.GOOS == "windows" { + shell, flag = "cmd", "/C" + } else { + shell, flag = "/bin/sh", "-c" + } + + // Copy the current environment as well as some custom environment variables + // that are read by other Consul tools (like Consul's HTTP address). This + // allows the user to specify these values once (in the Consul Template config + // or command line), instead of in multiple places. + var customEnv = make(map[string]string) + + if r.config.Consul != "" { + customEnv["CONSUL_HTTP_ADDR"] = r.config.Consul + } + + if r.config.Token != "" { + customEnv["CONSUL_HTTP_TOKEN"] = r.config.Token + } + + if r.config.Auth.Enabled { + customEnv["CONSUL_HTTP_AUTH"] = r.config.Auth.String() + } + + customEnv["CONSUL_HTTP_SSL"] = strconv.FormatBool(r.config.SSL.Enabled) + customEnv["CONSUL_HTTP_SSL_VERIFY"] = strconv.FormatBool(r.config.SSL.Verify) + + if r.config.Vault.Address != "" { + customEnv["VAULT_ADDR"] = r.config.Vault.Address + } + + if !r.config.Vault.SSL.Verify { + customEnv["VAULT_SKIP_VERIFY"] = "true" + } + + if r.config.Vault.SSL.Cert != "" { + customEnv["VAULT_CAPATH"] = r.config.Vault.SSL.Cert + } + + if r.config.Vault.SSL.CaCert != "" { + customEnv["VAULT_CACERT"] = r.config.Vault.SSL.CaCert + } + + currentEnv := os.Environ() + cmdEnv := make([]string, len(currentEnv), len(currentEnv)+len(customEnv)) + copy(cmdEnv, currentEnv) + for k, v := range customEnv { + cmdEnv = append(cmdEnv, fmt.Sprintf("%s=%s", k, v)) + } + + // Create and invoke the command + cmd := exec.Command(shell, flag, command) + cmd.Stdout = r.outStream + cmd.Stderr = r.errStream + cmd.Env = cmdEnv + if err := cmd.Start(); err != nil { + return err + } + + done := make(chan error, 1) + go func() { + done <- cmd.Wait() + }() + + select { + case <-time.After(timeout): + if cmd.Process != nil { + if err := cmd.Process.Kill(); err != nil { + return fmt.Errorf("failed to kill %q in %s: %s", + command, timeout, err) + } + } + <-done // Allow the goroutine to finish + return fmt.Errorf( + "command %q\n"+ + "did not return for %s - if your command does not return, please\n"+ + "make sure to background it", + command, timeout) + case err := <-done: + return err + } +} + +// storePid is used to write out a PID file to disk. +func (r *Runner) storePid() error { + path := r.config.PidFile + if path == "" { + return nil + } + + log.Printf("[INFO] creating pid file at %q", path) + + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + if err != nil { + return fmt.Errorf("runner: could not open pid file: %s", err) + } + defer f.Close() + + pid := os.Getpid() + _, err = f.WriteString(fmt.Sprintf("%d", pid)) + if err != nil { + return fmt.Errorf("runner: could not write to pid file: %s", err) + } + return nil +} + +// deletePid is used to remove the PID on exit. +func (r *Runner) deletePid() error { + path := r.config.PidFile + if path == "" { + return nil + } + + log.Printf("[DEBUG] removing pid file at %q", path) + + stat, err := os.Stat(path) + if err != nil { + return fmt.Errorf("runner: could not remove pid file: %s", err) + } + if stat.IsDir() { + return fmt.Errorf("runner: specified pid file path is directory") + } + + err = os.Remove(path) + if err != nil { + return fmt.Errorf("runner: could not remove pid file: %s", err) + } + return nil +} + +// spawnChild creates a new child process and stores it on the runner object. +func (r *Runner) spawnChild() error { + r.childLock.Lock() + defer r.childLock.Unlock() + + p := shellwords.NewParser() + p.ParseEnv = true + p.ParseBacktick = true + args, err := p.Parse(r.config.Exec.Command) + if err != nil { + return err + } + + child, err := child.New(&child.NewInput{ + Stdin: r.inStream, + Stdout: r.outStream, + Stderr: r.errStream, + Command: args[0], + Args: args[1:], + ReloadSignal: r.config.Exec.ReloadSignal, + KillSignal: r.config.Exec.KillSignal, + KillTimeout: r.config.Exec.KillTimeout, + Splay: r.config.Exec.Splay, + }) + if err != nil { + return fmt.Errorf("error creating child: %s", err) + } + r.child = child + + if err := r.child.Start(); err != nil { + return fmt.Errorf("error starting child: %s", err) + } + return nil +} + +// quiescence is an internal representation of a single template's quiescence +// state. +type quiescence struct { + template *template.Template + min time.Duration + max time.Duration + ch chan *template.Template + timer *time.Timer + deadline time.Time +} + +// newQuiescence creates a new quiescence timer for the given template. +func newQuiescence(ch chan *template.Template, min, max time.Duration, t *template.Template) *quiescence { + return &quiescence{ + template: t, + min: min, + max: max, + ch: ch, + } +} + +// tick updates the minimum quiescence timer. +func (q *quiescence) tick() { + now := time.Now() + + // If this is the first tick, set up the timer and calculate the max + // deadline. + if q.timer == nil { + q.timer = time.NewTimer(q.min) + go func() { + select { + case <-q.timer.C: + q.ch <- q.template + } + }() + + q.deadline = now.Add(q.max) + return + } + + // Snooze the timer for the min time, or snooze less if we are coming + // up against the max time. If the timer has already fired and the reset + // doesn't work that's ok because we guarantee that the channel gets our + // template which means that we are obsolete and a fresh quiescence will + // be set up. + if now.Add(q.min).Before(q.deadline) { + q.timer.Reset(q.min) + } else if dur := q.deadline.Sub(now); dur > 0 { + q.timer.Reset(dur) + } +} + +// atomicWrite accepts a destination path and the template contents. It writes +// the template contents to a TempFile on disk, returning if any errors occur. +// +// If the parent destination directory does not exist, it will be created +// automatically with permissions 0755. To use a different permission, create +// the directory first or use `chmod` in a Command. +// +// If the destination path exists, all attempts will be made to preserve the +// existing file permissions. If those permissions cannot be read, an error is +// returned. If the file does not exist, it will be created automatically with +// permissions 0644. To use a different permission, create the destination file +// first or use `chmod` in a Command. +// +// If no errors occur, the Tempfile is "renamed" (moved) to the destination +// path. +func atomicWrite(path string, contents []byte, perms os.FileMode, backup bool) error { + parent := filepath.Dir(path) + if _, err := os.Stat(parent); os.IsNotExist(err) { + if err := os.MkdirAll(parent, 0755); err != nil { + return err + } + } + + f, err := ioutil.TempFile(parent, "") + if err != nil { + return err + } + defer os.Remove(f.Name()) + + if _, err := f.Write(contents); err != nil { + return err + } + + if err := f.Sync(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + + if err := os.Chmod(f.Name(), perms); err != nil { + return err + } + + // If we got this far, it means we are about to save the file. Copy the + // current contents of the file onto disk (if it exists) so we have a backup. + if backup { + if _, err := os.Stat(path); !os.IsNotExist(err) { + if err := copyFile(path, path+".bak"); err != nil { + return err + } + } + } + + if err := os.Rename(f.Name(), path); err != nil { + return err + } + + return nil +} + +// copyFile copies the file at src to the path at dst. Any errors that occur +// are returned. +func copyFile(src, dst string) error { + s, err := os.Open(src) + if err != nil { + return err + } + defer s.Close() + + stat, err := s.Stat() + if err != nil { + return err + } + + d, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, stat.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(d, s); err != nil { + d.Close() + return err + } + return d.Close() +} + +// Checks if a ConfigTemplate with the given data exists in the list of Config +// Templates. +func commandExists(c *config.ConfigTemplate, templates []*config.ConfigTemplate) bool { + needle := strings.TrimSpace(c.Command) + for _, t := range templates { + if needle == strings.TrimSpace(t.Command) { + return true + } + } + + return false +} + +// newClientSet creates a new client set from the given config. +func newClientSet(config *config.Config) (*dep.ClientSet, error) { + clients := dep.NewClientSet() + + if err := clients.CreateConsulClient(&dep.CreateConsulClientInput{ + Address: config.Consul, + Token: config.Token, + AuthEnabled: config.Auth.Enabled, + AuthUsername: config.Auth.Username, + AuthPassword: config.Auth.Password, + SSLEnabled: config.SSL.Enabled, + SSLVerify: config.SSL.Verify, + SSLCert: config.SSL.Cert, + SSLKey: config.SSL.Key, + SSLCACert: config.SSL.CaCert, + }); err != nil { + return nil, fmt.Errorf("runner: %s", err) + } + + if err := clients.CreateVaultClient(&dep.CreateVaultClientInput{ + Address: config.Vault.Address, + Token: config.Vault.Token, + UnwrapToken: config.Vault.UnwrapToken, + SSLEnabled: config.Vault.SSL.Enabled, + SSLVerify: config.Vault.SSL.Verify, + SSLCert: config.Vault.SSL.Cert, + SSLKey: config.Vault.SSL.Key, + SSLCACert: config.Vault.SSL.CaCert, + }); err != nil { + return nil, fmt.Errorf("runner: %s", err) + } + + return clients, nil +} + +// newWatcher creates a new watcher. +func newWatcher(config *config.Config, clients *dep.ClientSet, once bool) (*watch.Watcher, error) { + log.Printf("[INFO] (runner) creating Watcher") + + watcher, err := watch.NewWatcher(&watch.WatcherConfig{ + Clients: clients, + Once: once, + MaxStale: config.MaxStale, + RetryFunc: func(current time.Duration) time.Duration { + return config.Retry + }, + RenewVault: config.Vault.Token != "" && config.Vault.RenewToken, + }) + if err != nil { + return nil, err + } + + return watcher, err +} diff --git a/vendor/github.com/hashicorp/consul-template/signals/mapstructure.go b/vendor/github.com/hashicorp/consul-template/signals/mapstructure.go new file mode 100644 index 00000000000..f21cbd5d60a --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/mapstructure.go @@ -0,0 +1,32 @@ +package signals + +import ( + "reflect" + + "github.com/mitchellh/mapstructure" +) + +// StringToSignalFunc parses a string as a signal based on the signal lookup +// table. If the user supplied an empty string or nil, a special "nil signal" +// is returned. Clients should check for this value and set the response back +// nil after mapstructure finishes parsing. +func StringToSignalFunc() mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + + if t.String() != "os.Signal" { + return data, nil + } + + if data == nil || data.(string) == "" { + return SIGNIL, nil + } + + return Parse(data.(string)) + } +} diff --git a/vendor/github.com/hashicorp/consul-template/signals/nil.go b/vendor/github.com/hashicorp/consul-template/signals/nil.go new file mode 100644 index 00000000000..2c20645b3be --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/nil.go @@ -0,0 +1,7 @@ +package signals + +// NilSignal is a special signal that is blank or "nil" +type NilSignal int + +func (s *NilSignal) String() string { return "SIGNIL" } +func (s *NilSignal) Signal() {} diff --git a/vendor/github.com/hashicorp/consul-template/signals/signals.go b/vendor/github.com/hashicorp/consul-template/signals/signals.go new file mode 100644 index 00000000000..a26261e379e --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/signals.go @@ -0,0 +1,32 @@ +package signals + +import ( + "fmt" + "os" + "sort" + "strings" +) + +var SIGNIL os.Signal = new(NilSignal) + +var ValidSignals []string + +func init() { + valid := make([]string, 0, len(SignalLookup)) + for k, _ := range SignalLookup { + valid = append(valid, k) + } + sort.Strings(valid) + ValidSignals = valid +} + +// Parse parses the given string as a signal. If the signal is not found, +// an error is returned. +func Parse(s string) (os.Signal, error) { + sig, ok := SignalLookup[strings.ToUpper(s)] + if !ok { + return nil, fmt.Errorf("invalid signal %q - valid signals are %q", + sig, ValidSignals) + } + return sig, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/signals/signals_unix.go b/vendor/github.com/hashicorp/consul-template/signals/signals_unix.go new file mode 100644 index 00000000000..77b70d85d73 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/signals_unix.go @@ -0,0 +1,39 @@ +// +build linux darwin freebsd openbsd solaris netbsd + +package signals + +import ( + "os" + "syscall" +) + +var SignalLookup = map[string]os.Signal{ + "SIGABRT": syscall.SIGABRT, + "SIGALRM": syscall.SIGALRM, + "SIGBUS": syscall.SIGBUS, + "SIGCHLD": syscall.SIGCHLD, + "SIGCONT": syscall.SIGCONT, + "SIGFPE": syscall.SIGFPE, + "SIGHUP": syscall.SIGHUP, + "SIGILL": syscall.SIGILL, + "SIGINT": syscall.SIGINT, + "SIGIO": syscall.SIGIO, + "SIGIOT": syscall.SIGIOT, + "SIGKILL": syscall.SIGKILL, + "SIGPIPE": syscall.SIGPIPE, + "SIGPROF": syscall.SIGPROF, + "SIGQUIT": syscall.SIGQUIT, + "SIGSEGV": syscall.SIGSEGV, + "SIGSTOP": syscall.SIGSTOP, + "SIGSYS": syscall.SIGSYS, + "SIGTERM": syscall.SIGTERM, + "SIGTRAP": syscall.SIGTRAP, + "SIGTSTP": syscall.SIGTSTP, + "SIGTTIN": syscall.SIGTTIN, + "SIGTTOU": syscall.SIGTTOU, + "SIGURG": syscall.SIGURG, + "SIGUSR1": syscall.SIGUSR1, + "SIGUSR2": syscall.SIGUSR2, + "SIGXCPU": syscall.SIGXCPU, + "SIGXFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/hashicorp/consul-template/signals/signals_windows.go b/vendor/github.com/hashicorp/consul-template/signals/signals_windows.go new file mode 100644 index 00000000000..e1204a67dea --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/signals_windows.go @@ -0,0 +1,24 @@ +// +build windows + +package signals + +import ( + "os" + "syscall" +) + +var SignalLookup = map[string]os.Signal{ + "SIGABRT": syscall.SIGABRT, + "SIGALRM": syscall.SIGALRM, + "SIGBUS": syscall.SIGBUS, + "SIGFPE": syscall.SIGFPE, + "SIGHUP": syscall.SIGHUP, + "SIGILL": syscall.SIGILL, + "SIGINT": syscall.SIGINT, + "SIGKILL": syscall.SIGKILL, + "SIGPIPE": syscall.SIGPIPE, + "SIGQUIT": syscall.SIGQUIT, + "SIGSEGV": syscall.SIGSEGV, + "SIGTERM": syscall.SIGTERM, + "SIGTRAP": syscall.SIGTRAP, +} diff --git a/vendor/github.com/hashicorp/consul-template/template/brain.go b/vendor/github.com/hashicorp/consul-template/template/brain.go new file mode 100644 index 00000000000..ca3f6633030 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/template/brain.go @@ -0,0 +1,74 @@ +package template + +import ( + "sync" + + dep "github.com/hashicorp/consul-template/dependency" +) + +// Brain is what Template uses to determine the values that are +// available for template parsing. +type Brain struct { + sync.RWMutex + + // data is the map of individual dependencies (by HashCode()) and the most + // recent data for that dependency. + data map[string]interface{} + + // receivedData is an internal tracker of which dependencies have stored data + // in the brain. + receivedData map[string]struct{} +} + +// NewBrain creates a new Brain with empty values for each +// of the key structs. +func NewBrain() *Brain { + return &Brain{ + data: make(map[string]interface{}), + receivedData: make(map[string]struct{}), + } +} + +// Remember accepts a dependency and the data to store associated with that +// dep. This function converts the given data to a proper type and stores +// it interally. +func (b *Brain) Remember(d dep.Dependency, data interface{}) { + b.Lock() + defer b.Unlock() + + b.data[d.HashCode()] = data + b.receivedData[d.HashCode()] = struct{}{} +} + +// Recall gets the current value for the given dependency in the Brain. +func (b *Brain) Recall(d dep.Dependency) (interface{}, bool) { + b.RLock() + defer b.RUnlock() + + // If we have not received data for this dependency, return now. + if _, ok := b.receivedData[d.HashCode()]; !ok { + return nil, false + } + + return b.data[d.HashCode()], true +} + +// ForceSet is used to force set the value of a dependency +// for a given hash code +func (b *Brain) ForceSet(hashCode string, data interface{}) { + b.Lock() + defer b.Unlock() + + b.data[hashCode] = data + b.receivedData[hashCode] = struct{}{} +} + +// Forget accepts a dependency and removes all associated data with this +// dependency. It also resets the "receivedData" internal map. +func (b *Brain) Forget(d dep.Dependency) { + b.Lock() + defer b.Unlock() + + delete(b.data, d.HashCode()) + delete(b.receivedData, d.HashCode()) +} diff --git a/vendor/github.com/hashicorp/consul-template/template/template.go b/vendor/github.com/hashicorp/consul-template/template/template.go new file mode 100644 index 00000000000..811d4c2e9b7 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/template/template.go @@ -0,0 +1,173 @@ +package template + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "errors" + "fmt" + "io/ioutil" + "path/filepath" + "text/template" + + dep "github.com/hashicorp/consul-template/dependency" +) + +type Template struct { + // Path is the path to this template on disk. + Path string + + // LeftDelim and RightDelim are the left and right delimiters to use. + LeftDelim, RightDelim string + + // Contents is the string contents for the template. It is either given + // during template creation or read from disk when initialized. + Contents string + + // HexMD5 stores the hex version of the MD5 + HexMD5 string +} + +// NewTemplate creates and parses a new Consul Template template at the given +// path. If the template does not exist, an error is returned. During +// initialization, the template is read and is parsed for dependencies. Any +// errors that occur are returned. +func NewTemplate(path, contents, leftDelim, rightDelim string) (*Template, error) { + // Validate that we are either given the path or the explicit contents + pathEmpty, contentsEmpty := path == "", contents == "" + if !pathEmpty && !contentsEmpty { + return nil, errors.New("Either specify template path or content, not both") + } else if pathEmpty && contentsEmpty { + return nil, errors.New("Must specify template path or content") + } + + template := &Template{ + Path: path, + Contents: contents, + LeftDelim: leftDelim, + RightDelim: rightDelim, + } + if err := template.init(); err != nil { + return nil, err + } + + return template, nil +} + +// ID returns an identifier for the template +func (t *Template) ID() string { + return t.HexMD5 +} + +// Execute evaluates this template in the context of the given brain. +// +// The first return value is the list of used dependencies. +// The second return value is the list of missing dependencies. +// The third return value is the rendered text. +// The fourth return value any error that occurs. +func (t *Template) Execute(brain *Brain) ([]dep.Dependency, []dep.Dependency, []byte, error) { + usedMap := make(map[string]dep.Dependency) + missingMap := make(map[string]dep.Dependency) + name := filepath.Base(t.Path) + funcs := funcMap(brain, usedMap, missingMap) + + tmpl, err := template.New(name). + Delims(t.LeftDelim, t.RightDelim). + Funcs(funcs). + Parse(t.Contents) + if err != nil { + return nil, nil, nil, fmt.Errorf("template: %s", err) + } + + // TODO: accept an io.Writer instead + buff := new(bytes.Buffer) + if err := tmpl.Execute(buff, nil); err != nil { + return nil, nil, nil, fmt.Errorf("template: %s", err) + } + + // Update this list of this template's dependencies + var used []dep.Dependency + for _, dep := range usedMap { + used = append(used, dep) + } + + // Compile the list of missing dependencies + var missing []dep.Dependency + for _, dep := range missingMap { + missing = append(missing, dep) + } + + return used, missing, buff.Bytes(), nil +} + +// init reads the template file and initializes required variables. +func (t *Template) init() error { + // Render the template + if t.Path != "" { + contents, err := ioutil.ReadFile(t.Path) + if err != nil { + return err + } + t.Contents = string(contents) + } + + // Compute the MD5, encode as hex + hash := md5.Sum([]byte(t.Contents)) + t.HexMD5 = hex.EncodeToString(hash[:]) + + return nil +} + +// funcMap is the map of template functions to their respective functions. +func funcMap(brain *Brain, used, missing map[string]dep.Dependency) template.FuncMap { + return template.FuncMap{ + // API functions + "datacenters": datacentersFunc(brain, used, missing), + "file": fileFunc(brain, used, missing), + "key": keyFunc(brain, used, missing), + "key_or_default": keyWithDefaultFunc(brain, used, missing), + "ls": lsFunc(brain, used, missing), + "node": nodeFunc(brain, used, missing), + "nodes": nodesFunc(brain, used, missing), + "secret": secretFunc(brain, used, missing), + "secrets": secretsFunc(brain, used, missing), + "service": serviceFunc(brain, used, missing), + "services": servicesFunc(brain, used, missing), + "tree": treeFunc(brain, used, missing), + "vault": vaultFunc(brain, used, missing), + + // Helper functions + "byKey": byKey, + "byTag": byTag, + "contains": contains, + "env": env, + "explode": explode, + "in": in, + "loop": loop, + "join": join, + "trimSpace": trimSpace, + "parseBool": parseBool, + "parseFloat": parseFloat, + "parseInt": parseInt, + "parseJSON": parseJSON, + "parseUint": parseUint, + "plugin": plugin, + "regexReplaceAll": regexReplaceAll, + "regexMatch": regexMatch, + "replaceAll": replaceAll, + "timestamp": timestamp, + "toLower": toLower, + "toJSON": toJSON, + "toJSONPretty": toJSONPretty, + "toTitle": toTitle, + "toUpper": toUpper, + "toYAML": toYAML, + "split": split, + + // Math functions + "add": add, + "subtract": subtract, + "multiply": multiply, + "divide": divide, + } +} diff --git a/vendor/github.com/hashicorp/consul-template/template/template_functions.go b/vendor/github.com/hashicorp/consul-template/template/template_functions.go new file mode 100644 index 00000000000..1adf807a547 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/template/template_functions.go @@ -0,0 +1,989 @@ +package template + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/burntsushi/toml" + dep "github.com/hashicorp/consul-template/dependency" + yaml "gopkg.in/yaml.v2" +) + +// now is function that represents the current time in UTC. This is here +// primarily for the tests to override times. +var now = func() time.Time { return time.Now().UTC() } + +// datacentersFunc returns or accumulates datacenter dependencies. +func datacentersFunc(brain *Brain, + used, missing map[string]dep.Dependency) func(...string) ([]string, error) { + return func(s ...string) ([]string, error) { + result := []string{} + + d, err := dep.ParseDatacenters(s...) + if err != nil { + return result, err + } + + addDependency(used, d) + + if value, ok := brain.Recall(d); ok { + return value.([]string), nil + } + + addDependency(missing, d) + + return result, nil + } +} + +// fileFunc returns or accumulates file dependencies. +func fileFunc(brain *Brain, + used, missing map[string]dep.Dependency) func(string) (string, error) { + return func(s string) (string, error) { + if len(s) == 0 { + return "", nil + } + + d, err := dep.ParseFile(s) + if err != nil { + return "", err + } + + addDependency(used, d) + + if value, ok := brain.Recall(d); ok { + if value == nil { + return "", nil + } + return value.(string), nil + } + + addDependency(missing, d) + + return "", nil + } +} + +// keyFunc returns or accumulates key dependencies. +func keyFunc(brain *Brain, + used, missing map[string]dep.Dependency) func(string) (string, error) { + return func(s string) (string, error) { + if len(s) == 0 { + return "", nil + } + + d, err := dep.ParseStoreKey(s) + if err != nil { + return "", err + } + + addDependency(used, d) + + if value, ok := brain.Recall(d); ok { + if value == nil { + return "", nil + } + return value.(string), nil + } + + addDependency(missing, d) + + return "", nil + } +} + +// keyWithDefaultFunc returns or accumulates key dependencies that have a +// default value. +func keyWithDefaultFunc(brain *Brain, + used, missing map[string]dep.Dependency) func(string, string) (string, error) { + return func(s, def string) (string, error) { + if len(s) == 0 { + return def, nil + } + + d, err := dep.ParseStoreKey(s) + if err != nil { + return "", err + } + d.SetDefault(def) + + addDependency(used, d) + + if value, ok := brain.Recall(d); ok { + if value == nil { + return def, nil + } + return value.(string), nil + } + + addDependency(missing, d) + + return def, nil + } +} + +// lsFunc returns or accumulates keyPrefix dependencies. +func lsFunc(brain *Brain, + used, missing map[string]dep.Dependency) func(string) ([]*dep.KeyPair, error) { + return func(s string) ([]*dep.KeyPair, error) { + result := []*dep.KeyPair{} + + if len(s) == 0 { + return result, nil + } + + d, err := dep.ParseStoreKeyPrefix(s) + if err != nil { + return result, err + } + + addDependency(used, d) + + // Only return non-empty top-level keys + if value, ok := brain.Recall(d); ok { + for _, pair := range value.([]*dep.KeyPair) { + if pair.Key != "" && !strings.Contains(pair.Key, "/") { + result = append(result, pair) + } + } + return result, nil + } + + addDependency(missing, d) + + return result, nil + } +} + +// nodeFunc returns or accumulates catalog node dependency. +func nodeFunc(brain *Brain, + used, missing map[string]dep.Dependency) func(...string) (*dep.NodeDetail, error) { + return func(s ...string) (*dep.NodeDetail, error) { + + d, err := dep.ParseCatalogNode(s...) + if err != nil { + return nil, err + } + + addDependency(used, d) + + if value, ok := brain.Recall(d); ok { + return value.(*dep.NodeDetail), nil + } + + addDependency(missing, d) + + return nil, nil + } +} + +// nodesFunc returns or accumulates catalog node dependencies. +func nodesFunc(brain *Brain, + used, missing map[string]dep.Dependency) func(...string) ([]*dep.Node, error) { + return func(s ...string) ([]*dep.Node, error) { + result := []*dep.Node{} + + d, err := dep.ParseCatalogNodes(s...) + if err != nil { + return nil, err + } + + addDependency(used, d) + + if value, ok := brain.Recall(d); ok { + return value.([]*dep.Node), nil + } + + addDependency(missing, d) + + return result, nil + } +} + +// secretFunc returns or accumulates secret dependencies from Vault. +func secretFunc(brain *Brain, + used, missing map[string]dep.Dependency) func(...string) (*dep.Secret, error) { + return func(s ...string) (*dep.Secret, error) { + result := &dep.Secret{} + + if len(s) == 0 { + return result, nil + } + + d, err := dep.ParseVaultSecret(s...) + if err != nil { + return result, nil + } + + addDependency(used, d) + + if value, ok := brain.Recall(d); ok { + result = value.(*dep.Secret) + return result, nil + } + + addDependency(missing, d) + + return result, nil + } +} + +// secretsFunc returns or accumulates a list of secret dependencies from Vault. +func secretsFunc(brain *Brain, + used, missing map[string]dep.Dependency) func(string) ([]string, error) { + return func(s string) ([]string, error) { + result := []string{} + + if len(s) == 0 { + return result, nil + } + + d, err := dep.ParseVaultSecrets(s) + if err != nil { + return result, nil + } + + addDependency(used, d) + + if value, ok := brain.Recall(d); ok { + result = value.([]string) + return result, nil + } + + addDependency(missing, d) + + return result, nil + } +} + +// serviceFunc returns or accumulates health service dependencies. +func serviceFunc(brain *Brain, + used, missing map[string]dep.Dependency) func(...string) ([]*dep.HealthService, error) { + return func(s ...string) ([]*dep.HealthService, error) { + result := []*dep.HealthService{} + + if len(s) == 0 || s[0] == "" { + return result, nil + } + + d, err := dep.ParseHealthServices(s...) + if err != nil { + return nil, err + } + + addDependency(used, d) + + if value, ok := brain.Recall(d); ok { + return value.([]*dep.HealthService), nil + } + + addDependency(missing, d) + + return result, nil + } +} + +// servicesFunc returns or accumulates catalog services dependencies. +func servicesFunc(brain *Brain, + used, missing map[string]dep.Dependency) func(...string) ([]*dep.CatalogService, error) { + return func(s ...string) ([]*dep.CatalogService, error) { + result := []*dep.CatalogService{} + + d, err := dep.ParseCatalogServices(s...) + if err != nil { + return nil, err + } + + addDependency(used, d) + + if value, ok := brain.Recall(d); ok { + return value.([]*dep.CatalogService), nil + } + + addDependency(missing, d) + + return result, nil + } +} + +// treeFunc returns or accumulates keyPrefix dependencies. +func treeFunc(brain *Brain, + used, missing map[string]dep.Dependency) func(string) ([]*dep.KeyPair, error) { + return func(s string) ([]*dep.KeyPair, error) { + result := []*dep.KeyPair{} + + if len(s) == 0 { + return result, nil + } + + d, err := dep.ParseStoreKeyPrefix(s) + if err != nil { + return result, err + } + + addDependency(used, d) + + // Only return non-empty top-level keys + if value, ok := brain.Recall(d); ok { + for _, pair := range value.([]*dep.KeyPair) { + parts := strings.Split(pair.Key, "/") + if parts[len(parts)-1] != "" { + result = append(result, pair) + } + } + return result, nil + } + + addDependency(missing, d) + + return result, nil + } +} + +// vaultFunc is deprecated. Use secretFunc instead. +func vaultFunc(brain *Brain, + used, missing map[string]dep.Dependency) func(string) (*dep.Secret, error) { + return func(s string) (*dep.Secret, error) { + log.Printf("[WARN] the `vault' template function has been deprecated. " + + "Please use `secret` instead!") + return secretFunc(brain, used, missing)(s) + } +} + +// byKey accepts a slice of KV pairs and returns a map of the top-level +// key to all its subkeys. For example: +// +// elasticsearch/a //=> "1" +// elasticsearch/b //=> "2" +// redis/a/b //=> "3" +// +// Passing the result from Consul through byTag would yield: +// +// map[string]map[string]string{ +// "elasticsearch": &dep.KeyPair{"a": "1"}, &dep.KeyPair{"b": "2"}, +// "redis": &dep.KeyPair{"a/b": "3"} +// } +// +// Note that the top-most key is stripped from the Key value. Keys that have no +// prefix after stripping are removed from the list. +func byKey(pairs []*dep.KeyPair) (map[string]map[string]*dep.KeyPair, error) { + m := make(map[string]map[string]*dep.KeyPair) + for _, pair := range pairs { + parts := strings.Split(pair.Key, "/") + top := parts[0] + key := strings.Join(parts[1:], "/") + + if key == "" { + // Do not add a key if it has no prefix after stripping. + continue + } + + if _, ok := m[top]; !ok { + m[top] = make(map[string]*dep.KeyPair) + } + + newPair := *pair + newPair.Key = key + m[top][key] = &newPair + } + + return m, nil +} + +// byTag is a template func that takes the provided services and +// produces a map based on Service tags. +// +// The map key is a string representing the service tag. The map value is a +// slice of Services which have the tag assigned. +func byTag(in interface{}) (map[string][]interface{}, error) { + m := make(map[string][]interface{}) + + switch typed := in.(type) { + case nil: + case []*dep.CatalogService: + for _, s := range typed { + for _, t := range s.Tags { + m[t] = append(m[t], s) + } + } + case []*dep.HealthService: + for _, s := range typed { + for _, t := range s.Tags { + m[t] = append(m[t], s) + } + } + default: + return nil, fmt.Errorf("byTag: wrong argument type %T", in) + } + + return m, nil +} + +// contains is a function that have reverse arguments of "in" and is designed to +// be used as a pipe instead of a function: +// +// {{ l | contains "thing" }} +// +func contains(v, l interface{}) (bool, error) { + return in(l, v) +} + +// env returns the value of the environment variable set +func env(s string) (string, error) { + return os.Getenv(s), nil +} + +// explode is used to expand a list of keypairs into a deeply-nested hash. +func explode(pairs []*dep.KeyPair) (map[string]interface{}, error) { + m := make(map[string]interface{}) + for _, pair := range pairs { + if err := explodeHelper(m, pair.Key, pair.Value, pair.Key); err != nil { + return nil, err + } + } + return m, nil +} + +// explodeHelper is a recursive helper for explode. +func explodeHelper(m map[string]interface{}, k, v, p string) error { + if strings.Contains(k, "/") { + parts := strings.Split(k, "/") + top := parts[0] + key := strings.Join(parts[1:], "/") + + if _, ok := m[top]; !ok { + m[top] = make(map[string]interface{}) + } + nest, ok := m[top].(map[string]interface{}) + if !ok { + return fmt.Errorf("not a map: %q: %q already has value %q", p, top, m[top]) + } + return explodeHelper(nest, key, v, k) + } else { + if k != "" { + m[k] = v + } + } + + return nil +} + +// in seaches for a given value in a given interface. +func in(l, v interface{}) (bool, error) { + lv := reflect.ValueOf(l) + vv := reflect.ValueOf(v) + + switch lv.Kind() { + case reflect.Array, reflect.Slice: + // if the slice contains 'interface' elements, then the element needs to be extracted directly to examine its type, + // otherwise it will just resolve to 'interface'. + var interfaceSlice []interface{} + if reflect.TypeOf(l).Elem().Kind() == reflect.Interface { + interfaceSlice = l.([]interface{}) + } + + for i := 0; i < lv.Len(); i++ { + var lvv reflect.Value + if interfaceSlice != nil { + lvv = reflect.ValueOf(interfaceSlice[i]) + } else { + lvv = lv.Index(i) + } + + switch lvv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch vv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if vv.Int() == lvv.Int() { + return true, nil + } + } + case reflect.Float32, reflect.Float64: + switch vv.Kind() { + case reflect.Float32, reflect.Float64: + if vv.Float() == lvv.Float() { + return true, nil + } + } + case reflect.String: + if vv.Type() == lvv.Type() && vv.String() == lvv.String() { + return true, nil + } + } + } + case reflect.String: + if vv.Type() == lv.Type() && strings.Contains(lv.String(), vv.String()) { + return true, nil + } + } + + return false, nil +} + +// loop accepts varying parameters and differs its behavior. If given one +// parameter, loop will return a goroutine that begins at 0 and loops until the +// given int, increasing the index by 1 each iteration. If given two parameters, +// loop will return a goroutine that begins at the first parameter and loops +// up to but not including the second parameter. +// +// // Prints 0 1 2 3 4 +// for _, i := range loop(5) { +// print(i) +// } +// +// // Prints 5 6 7 +// for _, i := range loop(5, 8) { +// print(i) +// } +// +func loop(ints ...int64) (<-chan int64, error) { + var start, stop int64 + switch len(ints) { + case 1: + start, stop = 0, ints[0] + case 2: + start, stop = ints[0], ints[1] + default: + return nil, fmt.Errorf("loop: wrong number of arguments, expected 1 or 2"+ + ", but got %d", len(ints)) + } + + ch := make(chan int64) + + go func() { + for i := start; i < stop; i++ { + ch <- i + } + close(ch) + }() + + return ch, nil +} + +// join is a version of strings.Join that can be piped +func join(sep string, a []string) (string, error) { + return strings.Join(a, sep), nil +} + +// TrimSpace is a version of strings.TrimSpace that can be piped +func trimSpace(s string) (string, error) { + return strings.TrimSpace(s), nil +} + +// parseBool parses a string into a boolean +func parseBool(s string) (bool, error) { + if s == "" { + return false, nil + } + + result, err := strconv.ParseBool(s) + if err != nil { + return false, fmt.Errorf("parseBool: %s", err) + } + return result, nil +} + +// parseFloat parses a string into a base 10 float +func parseFloat(s string) (float64, error) { + if s == "" { + return 0.0, nil + } + + result, err := strconv.ParseFloat(s, 10) + if err != nil { + return 0, fmt.Errorf("parseFloat: %s", err) + } + return result, nil +} + +// parseInt parses a string into a base 10 int +func parseInt(s string) (int64, error) { + if s == "" { + return 0, nil + } + + result, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("parseInt: %s", err) + } + return result, nil +} + +// parseJSON returns a structure for valid JSON +func parseJSON(s string) (interface{}, error) { + if s == "" { + return map[string]interface{}{}, nil + } + + var data interface{} + if err := json.Unmarshal([]byte(s), &data); err != nil { + return nil, err + } + return data, nil +} + +// parseUint parses a string into a base 10 int +func parseUint(s string) (uint64, error) { + if s == "" { + return 0, nil + } + + result, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("parseUint: %s", err) + } + return result, nil +} + +// plugin executes a subprocess as the given command string. It is assumed the +// resulting command returns JSON which is then parsed and returned as the +// value for use in the template. +func plugin(name string, args ...string) (string, error) { + if name == "" { + return "", nil + } + + stdout, stderr := new(bytes.Buffer), new(bytes.Buffer) + + // Strip and trim each arg or else some plugins get confused with the newline + // characters + jsons := make([]string, 0, len(args)) + for _, arg := range args { + if v := strings.TrimSpace(arg); v != "" { + jsons = append(jsons, v) + } + } + + cmd := exec.Command(name, jsons...) + cmd.Stdout = stdout + cmd.Stderr = stderr + if err := cmd.Start(); err != nil { + return "", fmt.Errorf("exec %q: %s\n\nstdout:\n\n%s\n\nstderr:\n\n%s", + name, err, stdout.Bytes(), stderr.Bytes()) + } + + done := make(chan error, 1) + go func() { + done <- cmd.Wait() + }() + + select { + case <-time.After(5 * time.Second): + if cmd.Process != nil { + if err := cmd.Process.Kill(); err != nil { + return "", fmt.Errorf("exec %q: failed to kill", name) + } + } + <-done // Allow the goroutine to exit + return "", fmt.Errorf("exec %q: did not finish", name) + case err := <-done: + if err != nil { + return "", fmt.Errorf("exec %q: %s\n\nstdout:\n\n%s\n\nstderr:\n\n%s", + name, err, stdout.Bytes(), stderr.Bytes()) + } + } + + return strings.TrimSpace(stdout.String()), nil +} + +// replaceAll replaces all occurrences of a value in a string with the given +// replacement value. +func replaceAll(f, t, s string) (string, error) { + return strings.Replace(s, f, t, -1), nil +} + +// regexReplaceAll replaces all occurrences of a regular expression with +// the given replacement value. +func regexReplaceAll(re, pl, s string) (string, error) { + compiled, err := regexp.Compile(re) + if err != nil { + return "", err + } + return compiled.ReplaceAllString(s, pl), nil +} + +// regexMatch returns true or false if the string matches +// the given regular expression +func regexMatch(re, s string) (bool, error) { + compiled, err := regexp.Compile(re) + if err != nil { + return false, err + } + return compiled.MatchString(s), nil +} + +// split is a version of strings.Split that can be piped +func split(sep, s string) ([]string, error) { + s = strings.TrimSpace(s) + if s == "" { + return []string{}, nil + } + return strings.Split(s, sep), nil +} + +// timestamp returns the current UNIX timestamp in UTC. If an argument is +// specified, it will be used to format the timestamp. +func timestamp(s ...string) (string, error) { + switch len(s) { + case 0: + return now().Format(time.RFC3339), nil + case 1: + if s[0] == "unix" { + return strconv.FormatInt(now().Unix(), 10), nil + } + return now().Format(s[0]), nil + default: + return "", fmt.Errorf("timestamp: wrong number of arguments, expected 0 or 1"+ + ", but got %d", len(s)) + } +} + +// toLower converts the given string (usually by a pipe) to lowercase. +func toLower(s string) (string, error) { + return strings.ToLower(s), nil +} + +// toJSON converts the given structure into a deeply nested JSON string. +func toJSON(i interface{}) (string, error) { + result, err := json.Marshal(i) + if err != nil { + return "", fmt.Errorf("toJSON: %s", err) + } + return string(bytes.TrimSpace(result)), err +} + +// toJSONPretty converts the given structure into a deeply nested pretty JSON +// string. +func toJSONPretty(m map[string]interface{}) (string, error) { + result, err := json.MarshalIndent(m, "", " ") + if err != nil { + return "", fmt.Errorf("toJSONPretty: %s", err) + } + return string(bytes.TrimSpace(result)), err +} + +// toTitle converts the given string (usually by a pipe) to titlecase. +func toTitle(s string) (string, error) { + return strings.Title(s), nil +} + +// toUpper converts the given string (usually by a pipe) to uppercase. +func toUpper(s string) (string, error) { + return strings.ToUpper(s), nil +} + +// toYAML converts the given structure into a deeply nested YAML string. +func toYAML(m map[string]interface{}) (string, error) { + result, err := yaml.Marshal(m) + if err != nil { + return "", fmt.Errorf("toYAML: %s", err) + } + return string(bytes.TrimSpace(result)), nil +} + +// toTOML converts the given structure into a deeply nested TOML string. +func toTOML(m map[string]interface{}) (string, error) { + buf := bytes.NewBuffer([]byte{}) + enc := toml.NewEncoder(buf) + if err := enc.Encode(m); err != nil { + return "", fmt.Errorf("toTOML: %s", err) + } + result, err := ioutil.ReadAll(buf) + if err != nil { + return "", fmt.Errorf("toTOML: %s", err) + } + return string(bytes.TrimSpace(result)), nil +} + +// add returns the sum of a and b. +func add(b, a interface{}) (interface{}, error) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() + bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Int() + int64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return float64(av.Int()) + bv.Float(), nil + default: + return nil, fmt.Errorf("add: unknown type for %q (%T)", bv, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64(av.Uint()) + bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Uint() + bv.Uint(), nil + case reflect.Float32, reflect.Float64: + return float64(av.Uint()) + bv.Float(), nil + default: + return nil, fmt.Errorf("add: unknown type for %q (%T)", bv, b) + } + case reflect.Float32, reflect.Float64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Float() + float64(bv.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Float() + float64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return av.Float() + bv.Float(), nil + default: + return nil, fmt.Errorf("add: unknown type for %q (%T)", bv, b) + } + default: + return nil, fmt.Errorf("add: unknown type for %q (%T)", av, a) + } +} + +// subtract returns the difference of b from a. +func subtract(b, a interface{}) (interface{}, error) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() - bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Int() - int64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return float64(av.Int()) - bv.Float(), nil + default: + return nil, fmt.Errorf("subtract: unknown type for %q (%T)", bv, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64(av.Uint()) - bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Uint() - bv.Uint(), nil + case reflect.Float32, reflect.Float64: + return float64(av.Uint()) - bv.Float(), nil + default: + return nil, fmt.Errorf("subtract: unknown type for %q (%T)", bv, b) + } + case reflect.Float32, reflect.Float64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Float() - float64(bv.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Float() - float64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return av.Float() - bv.Float(), nil + default: + return nil, fmt.Errorf("subtract: unknown type for %q (%T)", bv, b) + } + default: + return nil, fmt.Errorf("subtract: unknown type for %q (%T)", av, a) + } +} + +// multiply returns the product of a and b. +func multiply(b, a interface{}) (interface{}, error) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() * bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Int() * int64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return float64(av.Int()) * bv.Float(), nil + default: + return nil, fmt.Errorf("multiply: unknown type for %q (%T)", bv, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64(av.Uint()) * bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Uint() * bv.Uint(), nil + case reflect.Float32, reflect.Float64: + return float64(av.Uint()) * bv.Float(), nil + default: + return nil, fmt.Errorf("multiply: unknown type for %q (%T)", bv, b) + } + case reflect.Float32, reflect.Float64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Float() * float64(bv.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Float() * float64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return av.Float() * bv.Float(), nil + default: + return nil, fmt.Errorf("multiply: unknown type for %q (%T)", bv, b) + } + default: + return nil, fmt.Errorf("multiply: unknown type for %q (%T)", av, a) + } +} + +// divide returns the division of b from a. +func divide(b, a interface{}) (interface{}, error) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() / bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Int() / int64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return float64(av.Int()) / bv.Float(), nil + default: + return nil, fmt.Errorf("divide: unknown type for %q (%T)", bv, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64(av.Uint()) / bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Uint() / bv.Uint(), nil + case reflect.Float32, reflect.Float64: + return float64(av.Uint()) / bv.Float(), nil + default: + return nil, fmt.Errorf("divide: unknown type for %q (%T)", bv, b) + } + case reflect.Float32, reflect.Float64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Float() / float64(bv.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Float() / float64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return av.Float() / bv.Float(), nil + default: + return nil, fmt.Errorf("divide: unknown type for %q (%T)", bv, b) + } + default: + return nil, fmt.Errorf("divide: unknown type for %q (%T)", av, a) + } +} + +// addDependency adds the given Dependency to the map. +func addDependency(m map[string]dep.Dependency, d dep.Dependency) { + if _, ok := m[d.HashCode()]; !ok { + m[d.HashCode()] = d + } +} diff --git a/vendor/github.com/hashicorp/consul-template/watch/mapstructure.go b/vendor/github.com/hashicorp/consul-template/watch/mapstructure.go new file mode 100644 index 00000000000..ef4e6d2d04c --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/watch/mapstructure.go @@ -0,0 +1,27 @@ +package watch + +import ( + "reflect" + + "github.com/mitchellh/mapstructure" +) + +// StringToWaitDurationHookFunc returns a function that converts strings to wait +// value. This is designed to be used with mapstructure for parsing out a wait +// value. +func StringToWaitDurationHookFunc() mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(new(Wait)) { + return data, nil + } + + // Convert it by parsing + return ParseWait(data.(string)) + } +} diff --git a/vendor/github.com/hashicorp/consul-template/watch/view.go b/vendor/github.com/hashicorp/consul-template/watch/view.go new file mode 100644 index 00000000000..9cea79a73ae --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/watch/view.go @@ -0,0 +1,218 @@ +package watch + +import ( + "fmt" + "log" + "reflect" + "sync" + "time" + + dep "github.com/hashicorp/consul-template/dependency" +) + +const ( + // The amount of time to do a blocking query for + defaultWaitTime = 60 * time.Second +) + +// View is a representation of a Dependency and the most recent data it has +// received from Consul. +type View struct { + // Dependency is the dependency that is associated with this View + Dependency dep.Dependency + + // config is the configuration for the watcher that created this view and + // contains important information about how this view should behave when + // polling including retry functions and handling stale queries. + config *WatcherConfig + + // Data is the most-recently-received data from Consul for this View + dataLock sync.RWMutex + data interface{} + receivedData bool + lastIndex uint64 + + // stopCh is used to stop polling on this View + stopCh chan struct{} +} + +// NewView creates a new view object from the given Consul API client and +// Dependency. If an error occurs, it will be returned. +func NewView(config *WatcherConfig, d dep.Dependency) (*View, error) { + if config == nil { + return nil, fmt.Errorf("view: missing config") + } + + if d == nil { + return nil, fmt.Errorf("view: missing dependency") + } + + return &View{ + Dependency: d, + config: config, + stopCh: make(chan struct{}), + }, nil +} + +// Data returns the most-recently-received data from Consul for this View. +func (v *View) Data() interface{} { + v.dataLock.RLock() + defer v.dataLock.RUnlock() + return v.data +} + +// DataAndLastIndex returns the most-recently-received data from Consul for +// this view, along with the last index. This is atomic so you will get the +// index that goes with the data you are fetching. +func (v *View) DataAndLastIndex() (interface{}, uint64) { + v.dataLock.RLock() + defer v.dataLock.RUnlock() + return v.data, v.lastIndex +} + +// poll queries the Consul instance for data using the fetch function, but also +// accounts for interrupts on the interrupt channel. This allows the poll +// function to be fired in a goroutine, but then halted even if the fetch +// function is in the middle of a blocking query. +func (v *View) poll(viewCh chan<- *View, errCh chan<- error) { + defaultRetry := v.config.RetryFunc(1 * time.Second) + currentRetry := defaultRetry + + for { + doneCh, fetchErrCh := make(chan struct{}, 1), make(chan error, 1) + go v.fetch(doneCh, fetchErrCh) + + select { + case <-doneCh: + // Reset the retry to avoid exponentially incrementing retries when we + // have some successful requests + currentRetry = defaultRetry + + log.Printf("[INFO] (view) %s received data", v.display()) + select { + case <-v.stopCh: + case viewCh <- v: + } + + // If we are operating in once mode, do not loop - we received data at + // least once which is the API promise here. + if v.config.Once { + return + } + case err := <-fetchErrCh: + log.Printf("[ERR] (view) %s %s", v.display(), err) + + // Push the error back up to the watcher + select { + case <-v.stopCh: + case errCh <- err: + } + + // Sleep and retry + if v.config.RetryFunc != nil { + currentRetry = v.config.RetryFunc(currentRetry) + } + log.Printf("[INFO] (view) %s errored, retrying in %s", v.display(), currentRetry) + time.Sleep(currentRetry) + continue + case <-v.stopCh: + log.Printf("[DEBUG] (view) %s stopping poll (received on view stopCh)", v.display()) + return + } + } +} + +// fetch queries the Consul instance for the attached dependency. This API +// promises that either data will be written to doneCh or an error will be +// written to errCh. It is designed to be run in a goroutine that selects the +// result of doneCh and errCh. It is assumed that only one instance of fetch +// is running per View and therefore no locking or mutexes are used. +func (v *View) fetch(doneCh chan<- struct{}, errCh chan<- error) { + log.Printf("[DEBUG] (view) %s starting fetch", v.display()) + + var allowStale bool + if v.config.MaxStale != 0 { + allowStale = true + } + + for { + // If the view was stopped, short-circuit this loop. This prevents a bug + // where a view can get "lost" in the event Consul Template is reloaded. + select { + case <-v.stopCh: + return + default: + } + + opts := &dep.QueryOptions{ + AllowStale: allowStale, + WaitTime: defaultWaitTime, + WaitIndex: v.lastIndex, + } + data, rm, err := v.Dependency.Fetch(v.config.Clients, opts) + if err != nil { + // ErrStopped is returned by a dependency when it prematurely stopped + // because the upstream process asked for a reload or termination. The + // most likely cause is that the view was stopped due to a configuration + // reload or process interrupt, so we do not want to propagate this error + // to the runner, but we want to stop the fetch routine for this view. + if err != dep.ErrStopped { + errCh <- err + } + return + } + + if rm == nil { + errCh <- fmt.Errorf("consul returned nil response metadata; this " + + "should never happen and is probably a bug in consul-template") + return + } + + if allowStale && rm.LastContact > v.config.MaxStale { + allowStale = false + log.Printf("[DEBUG] (view) %s stale data (last contact exceeded max_stale)", v.display()) + continue + } + + if v.config.MaxStale != 0 { + allowStale = true + } + + if rm.LastIndex == v.lastIndex { + log.Printf("[DEBUG] (view) %s no new data (index was the same)", v.display()) + continue + } + + v.dataLock.Lock() + if rm.LastIndex < v.lastIndex { + log.Printf("[DEBUG] (view) %s had a lower index, resetting", v.display()) + v.lastIndex = 0 + v.dataLock.Unlock() + continue + } + v.lastIndex = rm.LastIndex + + if v.receivedData && reflect.DeepEqual(data, v.data) { + log.Printf("[DEBUG] (view) %s no new data (contents were the same)", v.display()) + v.dataLock.Unlock() + continue + } + v.data = data + v.receivedData = true + v.dataLock.Unlock() + + close(doneCh) + return + } +} + +// display returns a string that represents this view. +func (v *View) display() string { + return v.Dependency.Display() +} + +// stop halts polling of this view. +func (v *View) stop() { + v.Dependency.Stop() + close(v.stopCh) +} diff --git a/vendor/github.com/hashicorp/consul-template/watch/wait.go b/vendor/github.com/hashicorp/consul-template/watch/wait.go new file mode 100644 index 00000000000..72933a88459 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/watch/wait.go @@ -0,0 +1,87 @@ +package watch + +import ( + "errors" + "fmt" + "strings" + "time" +) + +// Wait is the Min/Max duration used by the Watcher +type Wait struct { + // Min and Max are the minimum and maximum time, respectively, to wait for + // data changes before rendering a new template to disk. + Min time.Duration `json:"min" mapstructure:"min"` + Max time.Duration `json:"max" mapstructure:"max"` +} + +// ParseWait parses a string of the format `minimum(:maximum)` into a Wait +// struct. +func ParseWait(s string) (*Wait, error) { + if len(strings.TrimSpace(s)) < 1 { + return nil, errors.New("cannot specify empty wait interval") + } + + parts := strings.Split(s, ":") + + var min, max time.Duration + var err error + + if len(parts) == 1 { + min, err = time.ParseDuration(strings.TrimSpace(parts[0])) + if err != nil { + return nil, err + } + + max = 4 * min + } else if len(parts) == 2 { + min, err = time.ParseDuration(strings.TrimSpace(parts[0])) + if err != nil { + return nil, err + } + + max, err = time.ParseDuration(strings.TrimSpace(parts[1])) + if err != nil { + return nil, err + } + } else { + return nil, errors.New("invalid wait interval format") + } + + if min < 0 || max < 0 { + return nil, errors.New("cannot specify a negative wait interval") + } + + if max < min { + return nil, errors.New("wait interval max must be larger than min") + } + + return &Wait{min, max}, nil +} + +// IsActive returns true if this wait is active (non-zero). +func (w *Wait) IsActive() bool { + return w.Min != 0 && w.Max != 0 +} + +// WaitVar implements the Flag.Value interface and allows the user to specify +// a watch interval using Go's flag parsing library. +type WaitVar Wait + +// Set sets the value in the format min[:max] for a wait timer. +func (w *WaitVar) Set(value string) error { + wait, err := ParseWait(value) + if err != nil { + return err + } + + w.Min = wait.Min + w.Max = wait.Max + + return nil +} + +// String returns the string format for this wait variable +func (w *WaitVar) String() string { + return fmt.Sprintf("%s:%s", w.Min, w.Max) +} diff --git a/vendor/github.com/hashicorp/consul-template/watch/watcher.go b/vendor/github.com/hashicorp/consul-template/watch/watcher.go new file mode 100644 index 00000000000..39d98f1365b --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/watch/watcher.go @@ -0,0 +1,211 @@ +package watch + +import ( + "fmt" + "log" + "sync" + "time" + + dep "github.com/hashicorp/consul-template/dependency" +) + +// RetryFunc is a function that defines the retry for a given watcher. The +// function parameter is the current retry (which might be nil), and the +// return value is the new retry. In this way, you can build complex retry +// functions that are based off the previous values. +type RetryFunc func(time.Duration) time.Duration + +// DefaultRetryFunc is the default return function, which just echos whatever +// duration it was given. +var DefaultRetryFunc RetryFunc = func(t time.Duration) time.Duration { + return t +} + +// dataBufferSize is the default number of views to process in a batch. +const dataBufferSize = 2048 + +// Watcher is a top-level manager for views that poll Consul for data. +type Watcher struct { + sync.Mutex + + // DataCh is the chan where Views will be published. + DataCh chan *View + + // ErrCh is the chan where any errors will be published. + ErrCh chan error + + // config is the internal configuration of this watcher. + config *WatcherConfig + + // depViewMap is a map of Templates to Views. Templates are keyed by + // HashCode(). + depViewMap map[string]*View +} + +// WatcherConfig is the configuration for a particular Watcher. +type WatcherConfig struct { + // Client is the mechanism for communicating with the Consul API. + Clients *dep.ClientSet + + // Once is used to determine if the views should poll for data exactly once. + Once bool + + // MaxStale is the maximum staleness of a query. If specified, Consul will + // distribute work among all servers instead of just the leader. Specifying + // this option assumes the use of AllowStale. + MaxStale time.Duration + + // RetryFunc is a RetryFunc that represents the way retrys and backoffs + // should occur. + RetryFunc RetryFunc + + // RenewVault determines if the watcher should renew the Vault token as a + // background job. + RenewVault bool +} + +// NewWatcher creates a new watcher using the given API client. +func NewWatcher(config *WatcherConfig) (*Watcher, error) { + watcher := &Watcher{config: config} + if err := watcher.init(); err != nil { + return nil, err + } + + return watcher, nil +} + +// Add adds the given dependency to the list of monitored depedencies +// and start the associated view. If the dependency already exists, no action is +// taken. +// +// If the Dependency already existed, it this function will return false. If the +// view was successfully created, it will return true. If an error occurs while +// creating the view, it will be returned here (but future errors returned by +// the view will happen on the channel). +func (w *Watcher) Add(d dep.Dependency) (bool, error) { + w.Lock() + defer w.Unlock() + + log.Printf("[INFO] (watcher) adding %s", d.Display()) + + if _, ok := w.depViewMap[d.HashCode()]; ok { + log.Printf("[DEBUG] (watcher) %s already exists, skipping", d.Display()) + return false, nil + } + + v, err := NewView(w.config, d) + if err != nil { + return false, err + } + + log.Printf("[DEBUG] (watcher) %s starting", d.Display()) + + w.depViewMap[d.HashCode()] = v + go v.poll(w.DataCh, w.ErrCh) + + return true, nil +} + +// Watching determines if the given dependency is being watched. +func (w *Watcher) Watching(d dep.Dependency) bool { + w.Lock() + defer w.Unlock() + + _, ok := w.depViewMap[d.HashCode()] + return ok +} + +// ForceWatching is used to force setting the internal state of watching +// a depedency. This is only used for unit testing purposes. +func (w *Watcher) ForceWatching(d dep.Dependency, enabled bool) { + w.Lock() + defer w.Unlock() + + if enabled { + w.depViewMap[d.HashCode()] = nil + } else { + delete(w.depViewMap, d.HashCode()) + } +} + +// Remove removes the given dependency from the list and stops the +// associated View. If a View for the given dependency does not exist, this +// function will return false. If the View does exist, this function will return +// true upon successful deletion. +func (w *Watcher) Remove(d dep.Dependency) bool { + w.Lock() + defer w.Unlock() + + log.Printf("[INFO] (watcher) removing %s", d.Display()) + + if view, ok := w.depViewMap[d.HashCode()]; ok { + log.Printf("[DEBUG] (watcher) actually removing %s", d.Display()) + view.stop() + delete(w.depViewMap, d.HashCode()) + return true + } + + log.Printf("[DEBUG] (watcher) %s did not exist, skipping", d.Display()) + return false +} + +// Size returns the number of views this watcher is watching. +func (w *Watcher) Size() int { + w.Lock() + defer w.Unlock() + return len(w.depViewMap) +} + +// Stop halts this watcher and any currently polling views immediately. If a +// view was in the middle of a poll, no data will be returned. +func (w *Watcher) Stop() { + w.Lock() + defer w.Unlock() + + log.Printf("[INFO] (watcher) stopping all views") + + for _, view := range w.depViewMap { + if view == nil { + continue + } + log.Printf("[DEBUG] (watcher) stopping %s", view.Dependency.Display()) + view.stop() + } + + // Reset the map to have no views + w.depViewMap = make(map[string]*View) + + // Close any idle TCP connections + w.config.Clients.Stop() +} + +// init sets up the initial values for the watcher. +func (w *Watcher) init() error { + if w.config == nil { + return fmt.Errorf("watcher: missing config") + } + + if w.config.RetryFunc == nil { + w.config.RetryFunc = DefaultRetryFunc + } + + // Setup the channels + w.DataCh = make(chan *View, dataBufferSize) + w.ErrCh = make(chan error) + + // Setup our map of dependencies to views + w.depViewMap = make(map[string]*View) + + // Start a watcher for the Vault renew if that config was specified + if w.config.RenewVault { + vt, err := dep.ParseVaultToken() + if err != nil { + return fmt.Errorf("watcher: %s", err) + } + if _, err := w.Add(vt); err != nil { + return fmt.Errorf("watcher: %s", err) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/consul/acl/acl.go b/vendor/github.com/hashicorp/consul/acl/acl.go new file mode 100644 index 00000000000..f13dc5b5693 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/acl/acl.go @@ -0,0 +1,476 @@ +package acl + +import ( + "github.com/armon/go-radix" +) + +var ( + // allowAll is a singleton policy which allows all + // non-management actions + allowAll ACL + + // denyAll is a singleton policy which denies all actions + denyAll ACL + + // manageAll is a singleton policy which allows all + // actions, including management + manageAll ACL +) + +func init() { + // Setup the singletons + allowAll = &StaticACL{ + allowManage: false, + defaultAllow: true, + } + denyAll = &StaticACL{ + allowManage: false, + defaultAllow: false, + } + manageAll = &StaticACL{ + allowManage: true, + defaultAllow: true, + } +} + +// ACL is the interface for policy enforcement. +type ACL interface { + // KeyRead checks for permission to read a given key + KeyRead(string) bool + + // KeyWrite checks for permission to write a given key + KeyWrite(string) bool + + // KeyWritePrefix checks for permission to write to an + // entire key prefix. This means there must be no sub-policies + // that deny a write. + KeyWritePrefix(string) bool + + // ServiceWrite checks for permission to read a given service + ServiceWrite(string) bool + + // ServiceRead checks for permission to read a given service + ServiceRead(string) bool + + // EventRead determines if a specific event can be queried. + EventRead(string) bool + + // EventWrite determines if a specific event may be fired. + EventWrite(string) bool + + // PrepardQueryRead determines if a specific prepared query can be read + // to show its contents (this is not used for execution). + PreparedQueryRead(string) bool + + // PreparedQueryWrite determines if a specific prepared query can be + // created, modified, or deleted. + PreparedQueryWrite(string) bool + + // KeyringRead determines if the encryption keyring used in + // the gossip layer can be read. + KeyringRead() bool + + // KeyringWrite determines if the keyring can be manipulated + KeyringWrite() bool + + // OperatorRead determines if the read-only Consul operator functions + // can be used. + OperatorRead() bool + + // OperatorWrite determines if the state-changing Consul operator + // functions can be used. + OperatorWrite() bool + + // ACLList checks for permission to list all the ACLs + ACLList() bool + + // ACLModify checks for permission to manipulate ACLs + ACLModify() bool +} + +// StaticACL is used to implement a base ACL policy. It either +// allows or denies all requests. This can be used as a parent +// ACL to act in a blacklist or whitelist mode. +type StaticACL struct { + allowManage bool + defaultAllow bool +} + +func (s *StaticACL) KeyRead(string) bool { + return s.defaultAllow +} + +func (s *StaticACL) KeyWrite(string) bool { + return s.defaultAllow +} + +func (s *StaticACL) KeyWritePrefix(string) bool { + return s.defaultAllow +} + +func (s *StaticACL) ServiceRead(string) bool { + return s.defaultAllow +} + +func (s *StaticACL) ServiceWrite(string) bool { + return s.defaultAllow +} + +func (s *StaticACL) EventRead(string) bool { + return s.defaultAllow +} + +func (s *StaticACL) EventWrite(string) bool { + return s.defaultAllow +} + +func (s *StaticACL) PreparedQueryRead(string) bool { + return s.defaultAllow +} + +func (s *StaticACL) PreparedQueryWrite(string) bool { + return s.defaultAllow +} + +func (s *StaticACL) KeyringRead() bool { + return s.defaultAllow +} + +func (s *StaticACL) KeyringWrite() bool { + return s.defaultAllow +} + +func (s *StaticACL) OperatorRead() bool { + return s.defaultAllow +} + +func (s *StaticACL) OperatorWrite() bool { + return s.defaultAllow +} + +func (s *StaticACL) ACLList() bool { + return s.allowManage +} + +func (s *StaticACL) ACLModify() bool { + return s.allowManage +} + +// AllowAll returns an ACL rule that allows all operations +func AllowAll() ACL { + return allowAll +} + +// DenyAll returns an ACL rule that denies all operations +func DenyAll() ACL { + return denyAll +} + +// ManageAll returns an ACL rule that can manage all resources +func ManageAll() ACL { + return manageAll +} + +// RootACL returns a possible ACL if the ID matches a root policy +func RootACL(id string) ACL { + switch id { + case "allow": + return allowAll + case "deny": + return denyAll + case "manage": + return manageAll + default: + return nil + } +} + +// PolicyACL is used to wrap a set of ACL policies to provide +// the ACL interface. +type PolicyACL struct { + // parent is used to resolve policy if we have + // no matching rule. + parent ACL + + // keyRules contains the key policies + keyRules *radix.Tree + + // serviceRules contains the service policies + serviceRules *radix.Tree + + // eventRules contains the user event policies + eventRules *radix.Tree + + // preparedQueryRules contains the prepared query policies + preparedQueryRules *radix.Tree + + // keyringRule contains the keyring policies. The keyring has + // a very simple yes/no without prefix matching, so here we + // don't need to use a radix tree. + keyringRule string + + // operatorRule contains the operator policies. + operatorRule string +} + +// New is used to construct a policy based ACL from a set of policies +// and a parent policy to resolve missing cases. +func New(parent ACL, policy *Policy) (*PolicyACL, error) { + p := &PolicyACL{ + parent: parent, + keyRules: radix.New(), + serviceRules: radix.New(), + eventRules: radix.New(), + preparedQueryRules: radix.New(), + } + + // Load the key policy + for _, kp := range policy.Keys { + p.keyRules.Insert(kp.Prefix, kp.Policy) + } + + // Load the service policy + for _, sp := range policy.Services { + p.serviceRules.Insert(sp.Name, sp.Policy) + } + + // Load the event policy + for _, ep := range policy.Events { + p.eventRules.Insert(ep.Event, ep.Policy) + } + + // Load the prepared query policy + for _, pq := range policy.PreparedQueries { + p.preparedQueryRules.Insert(pq.Prefix, pq.Policy) + } + + // Load the keyring policy + p.keyringRule = policy.Keyring + + // Load the operator policy + p.operatorRule = policy.Operator + + return p, nil +} + +// KeyRead returns if a key is allowed to be read +func (p *PolicyACL) KeyRead(key string) bool { + // Look for a matching rule + _, rule, ok := p.keyRules.LongestPrefix(key) + if ok { + switch rule.(string) { + case PolicyRead, PolicyWrite: + return true + default: + return false + } + } + + // No matching rule, use the parent. + return p.parent.KeyRead(key) +} + +// KeyWrite returns if a key is allowed to be written +func (p *PolicyACL) KeyWrite(key string) bool { + // Look for a matching rule + _, rule, ok := p.keyRules.LongestPrefix(key) + if ok { + switch rule.(string) { + case PolicyWrite: + return true + default: + return false + } + } + + // No matching rule, use the parent. + return p.parent.KeyWrite(key) +} + +// KeyWritePrefix returns if a prefix is allowed to be written +func (p *PolicyACL) KeyWritePrefix(prefix string) bool { + // Look for a matching rule that denies + _, rule, ok := p.keyRules.LongestPrefix(prefix) + if ok && rule.(string) != PolicyWrite { + return false + } + + // Look if any of our children have a deny policy + deny := false + p.keyRules.WalkPrefix(prefix, func(path string, rule interface{}) bool { + // We have a rule to prevent a write in a sub-directory! + if rule.(string) != PolicyWrite { + deny = true + return true + } + return false + }) + + // Deny the write if any sub-rules may be violated + if deny { + return false + } + + // If we had a matching rule, done + if ok { + return true + } + + // No matching rule, use the parent. + return p.parent.KeyWritePrefix(prefix) +} + +// ServiceRead checks if reading (discovery) of a service is allowed +func (p *PolicyACL) ServiceRead(name string) bool { + // Check for an exact rule or catch-all + _, rule, ok := p.serviceRules.LongestPrefix(name) + + if ok { + switch rule { + case PolicyRead, PolicyWrite: + return true + default: + return false + } + } + + // No matching rule, use the parent. + return p.parent.ServiceRead(name) +} + +// ServiceWrite checks if writing (registering) a service is allowed +func (p *PolicyACL) ServiceWrite(name string) bool { + // Check for an exact rule or catch-all + _, rule, ok := p.serviceRules.LongestPrefix(name) + + if ok { + switch rule { + case PolicyWrite: + return true + default: + return false + } + } + + // No matching rule, use the parent. + return p.parent.ServiceWrite(name) +} + +// EventRead is used to determine if the policy allows for a +// specific user event to be read. +func (p *PolicyACL) EventRead(name string) bool { + // Longest-prefix match on event names + if _, rule, ok := p.eventRules.LongestPrefix(name); ok { + switch rule { + case PolicyRead, PolicyWrite: + return true + default: + return false + } + } + + // Nothing matched, use parent + return p.parent.EventRead(name) +} + +// EventWrite is used to determine if new events can be created +// (fired) by the policy. +func (p *PolicyACL) EventWrite(name string) bool { + // Longest-prefix match event names + if _, rule, ok := p.eventRules.LongestPrefix(name); ok { + return rule == PolicyWrite + } + + // No match, use parent + return p.parent.EventWrite(name) +} + +// PreparedQueryRead checks if reading (listing) of a prepared query is +// allowed - this isn't execution, just listing its contents. +func (p *PolicyACL) PreparedQueryRead(prefix string) bool { + // Check for an exact rule or catch-all + _, rule, ok := p.preparedQueryRules.LongestPrefix(prefix) + + if ok { + switch rule { + case PolicyRead, PolicyWrite: + return true + default: + return false + } + } + + // No matching rule, use the parent. + return p.parent.PreparedQueryRead(prefix) +} + +// PreparedQueryWrite checks if writing (creating, updating, or deleting) of a +// prepared query is allowed. +func (p *PolicyACL) PreparedQueryWrite(prefix string) bool { + // Check for an exact rule or catch-all + _, rule, ok := p.preparedQueryRules.LongestPrefix(prefix) + + if ok { + switch rule { + case PolicyWrite: + return true + default: + return false + } + } + + // No matching rule, use the parent. + return p.parent.PreparedQueryWrite(prefix) +} + +// KeyringRead is used to determine if the keyring can be +// read by the current ACL token. +func (p *PolicyACL) KeyringRead() bool { + switch p.keyringRule { + case PolicyRead, PolicyWrite: + return true + case PolicyDeny: + return false + default: + return p.parent.KeyringRead() + } +} + +// KeyringWrite determines if the keyring can be manipulated. +func (p *PolicyACL) KeyringWrite() bool { + if p.keyringRule == PolicyWrite { + return true + } + return p.parent.KeyringWrite() +} + +// OperatorRead determines if the read-only operator functions are allowed. +func (p *PolicyACL) OperatorRead() bool { + switch p.operatorRule { + case PolicyRead, PolicyWrite: + return true + case PolicyDeny: + return false + default: + return p.parent.OperatorRead() + } +} + +// OperatorWrite determines if the state-changing operator functions are +// allowed. +func (p *PolicyACL) OperatorWrite() bool { + if p.operatorRule == PolicyWrite { + return true + } + return p.parent.OperatorWrite() +} + +// ACLList checks if listing of ACLs is allowed +func (p *PolicyACL) ACLList() bool { + return p.parent.ACLList() +} + +// ACLModify checks if modification of ACLs is allowed +func (p *PolicyACL) ACLModify() bool { + return p.parent.ACLModify() +} diff --git a/vendor/github.com/hashicorp/consul/acl/cache.go b/vendor/github.com/hashicorp/consul/acl/cache.go new file mode 100644 index 00000000000..0387f9fbe9b --- /dev/null +++ b/vendor/github.com/hashicorp/consul/acl/cache.go @@ -0,0 +1,177 @@ +package acl + +import ( + "crypto/md5" + "fmt" + + "github.com/hashicorp/golang-lru" +) + +// FaultFunc is a function used to fault in the parent, +// rules for an ACL given its ID +type FaultFunc func(id string) (string, string, error) + +// aclEntry allows us to store the ACL with it's policy ID +type aclEntry struct { + ACL ACL + Parent string + RuleID string +} + +// Cache is used to implement policy and ACL caching +type Cache struct { + faultfn FaultFunc + aclCache *lru.TwoQueueCache // Cache id -> acl + policyCache *lru.TwoQueueCache // Cache policy -> acl + ruleCache *lru.TwoQueueCache // Cache rules -> policy +} + +// NewCache constructs a new policy and ACL cache of a given size +func NewCache(size int, faultfn FaultFunc) (*Cache, error) { + if size <= 0 { + return nil, fmt.Errorf("Must provide positive cache size") + } + + rc, err := lru.New2Q(size) + if err != nil { + return nil, err + } + + pc, err := lru.New2Q(size) + if err != nil { + return nil, err + } + + ac, err := lru.New2Q(size) + if err != nil { + return nil, err + } + + c := &Cache{ + faultfn: faultfn, + aclCache: ac, + policyCache: pc, + ruleCache: rc, + } + return c, nil +} + +// GetPolicy is used to get a potentially cached policy set. +// If not cached, it will be parsed, and then cached. +func (c *Cache) GetPolicy(rules string) (*Policy, error) { + return c.getPolicy(RuleID(rules), rules) +} + +// getPolicy is an internal method to get a cached policy, +// but it assumes a pre-computed ID +func (c *Cache) getPolicy(id, rules string) (*Policy, error) { + raw, ok := c.ruleCache.Get(id) + if ok { + return raw.(*Policy), nil + } + policy, err := Parse(rules) + if err != nil { + return nil, err + } + policy.ID = id + c.ruleCache.Add(id, policy) + return policy, nil + +} + +// RuleID is used to generate an ID for a rule +func RuleID(rules string) string { + return fmt.Sprintf("%x", md5.Sum([]byte(rules))) +} + +// policyID returns the cache ID for a policy +func (c *Cache) policyID(parent, ruleID string) string { + return parent + ":" + ruleID +} + +// GetACLPolicy is used to get the potentially cached ACL +// policy. If not cached, it will be generated and then cached. +func (c *Cache) GetACLPolicy(id string) (string, *Policy, error) { + // Check for a cached acl + if raw, ok := c.aclCache.Get(id); ok { + cached := raw.(aclEntry) + if raw, ok := c.ruleCache.Get(cached.RuleID); ok { + return cached.Parent, raw.(*Policy), nil + } + } + + // Fault in the rules + parent, rules, err := c.faultfn(id) + if err != nil { + return "", nil, err + } + + // Get cached + policy, err := c.GetPolicy(rules) + return parent, policy, err +} + +// GetACL is used to get a potentially cached ACL policy. +// If not cached, it will be generated and then cached. +func (c *Cache) GetACL(id string) (ACL, error) { + // Look for the ACL directly + raw, ok := c.aclCache.Get(id) + if ok { + return raw.(aclEntry).ACL, nil + } + + // Get the rules + parentID, rules, err := c.faultfn(id) + if err != nil { + return nil, err + } + ruleID := RuleID(rules) + + // Check for a compiled ACL + policyID := c.policyID(parentID, ruleID) + var compiled ACL + if raw, ok := c.policyCache.Get(policyID); ok { + compiled = raw.(ACL) + } else { + // Get the policy + policy, err := c.getPolicy(ruleID, rules) + if err != nil { + return nil, err + } + + // Get the parent ACL + parent := RootACL(parentID) + if parent == nil { + parent, err = c.GetACL(parentID) + if err != nil { + return nil, err + } + } + + // Compile the ACL + acl, err := New(parent, policy) + if err != nil { + return nil, err + } + + // Cache the compiled ACL + c.policyCache.Add(policyID, acl) + compiled = acl + } + + // Cache and return the ACL + c.aclCache.Add(id, aclEntry{compiled, parentID, ruleID}) + return compiled, nil +} + +// ClearACL is used to clear the ACL cache if any +func (c *Cache) ClearACL(id string) { + c.aclCache.Remove(id) +} + +// Purge is used to clear all the ACL caches. The +// rule and policy caches are not purged, since they +// are content-hashed anyways. +func (c *Cache) Purge() { + c.aclCache.Purge() +} diff --git a/vendor/github.com/hashicorp/consul/acl/policy.go b/vendor/github.com/hashicorp/consul/acl/policy.go new file mode 100644 index 00000000000..ae69067fea9 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/acl/policy.go @@ -0,0 +1,135 @@ +package acl + +import ( + "fmt" + + "github.com/hashicorp/hcl" +) + +const ( + PolicyDeny = "deny" + PolicyRead = "read" + PolicyWrite = "write" +) + +// Policy is used to represent the policy specified by +// an ACL configuration. +type Policy struct { + ID string `hcl:"-"` + Keys []*KeyPolicy `hcl:"key,expand"` + Services []*ServicePolicy `hcl:"service,expand"` + Events []*EventPolicy `hcl:"event,expand"` + PreparedQueries []*PreparedQueryPolicy `hcl:"query,expand"` + Keyring string `hcl:"keyring"` + Operator string `hcl:"operator"` +} + +// KeyPolicy represents a policy for a key +type KeyPolicy struct { + Prefix string `hcl:",key"` + Policy string +} + +func (k *KeyPolicy) GoString() string { + return fmt.Sprintf("%#v", *k) +} + +// ServicePolicy represents a policy for a service +type ServicePolicy struct { + Name string `hcl:",key"` + Policy string +} + +func (k *ServicePolicy) GoString() string { + return fmt.Sprintf("%#v", *k) +} + +// EventPolicy represents a user event policy. +type EventPolicy struct { + Event string `hcl:",key"` + Policy string +} + +func (e *EventPolicy) GoString() string { + return fmt.Sprintf("%#v", *e) +} + +// PreparedQueryPolicy represents a prepared query policy. +type PreparedQueryPolicy struct { + Prefix string `hcl:",key"` + Policy string +} + +func (e *PreparedQueryPolicy) GoString() string { + return fmt.Sprintf("%#v", *e) +} + +// isPolicyValid makes sure the given string matches one of the valid policies. +func isPolicyValid(policy string) bool { + switch policy { + case PolicyDeny: + return true + case PolicyRead: + return true + case PolicyWrite: + return true + default: + return false + } +} + +// Parse is used to parse the specified ACL rules into an +// intermediary set of policies, before being compiled into +// the ACL +func Parse(rules string) (*Policy, error) { + // Decode the rules + p := &Policy{} + if rules == "" { + // Hot path for empty rules + return p, nil + } + + if err := hcl.Decode(p, rules); err != nil { + return nil, fmt.Errorf("Failed to parse ACL rules: %v", err) + } + + // Validate the key policy + for _, kp := range p.Keys { + if !isPolicyValid(kp.Policy) { + return nil, fmt.Errorf("Invalid key policy: %#v", kp) + } + } + + // Validate the service policy + for _, sp := range p.Services { + if !isPolicyValid(sp.Policy) { + return nil, fmt.Errorf("Invalid service policy: %#v", sp) + } + } + + // Validate the user event policies + for _, ep := range p.Events { + if !isPolicyValid(ep.Policy) { + return nil, fmt.Errorf("Invalid event policy: %#v", ep) + } + } + + // Validate the prepared query policies + for _, pq := range p.PreparedQueries { + if !isPolicyValid(pq.Policy) { + return nil, fmt.Errorf("Invalid query policy: %#v", pq) + } + } + + // Validate the keyring policy - this one is allowed to be empty + if p.Keyring != "" && !isPolicyValid(p.Keyring) { + return nil, fmt.Errorf("Invalid keyring policy: %#v", p.Keyring) + } + + // Validate the operator policy - this one is allowed to be empty + if p.Operator != "" && !isPolicyValid(p.Operator) { + return nil, fmt.Errorf("Invalid operator policy: %#v", p.Operator) + } + + return p, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index 67855c67fbc..87a6c10016c 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -62,8 +62,7 @@ type AgentCheckRegistration struct { AgentServiceCheck } -// AgentServiceCheck is used to create an associated -// check for a service +// AgentServiceCheck is used to define a node or service level check type AgentServiceCheck struct { Script string `json:",omitempty"` DockerContainerID string `json:",omitempty"` @@ -74,6 +73,14 @@ type AgentServiceCheck struct { HTTP string `json:",omitempty"` TCP string `json:",omitempty"` Status string `json:",omitempty"` + + // In Consul 0.7 and later, checks that are associated with a service + // may also contain this optional DeregisterCriticalServiceAfter field, + // which is a timeout in the same Go time format as Interval and TTL. If + // a check is in the critical state for more than this configured value, + // then its associated service (and all of its associated checks) will + // automatically be deregistered. + DeregisterCriticalServiceAfter string `json:",omitempty"` } type AgentServiceChecks []*AgentServiceCheck @@ -198,27 +205,42 @@ func (a *Agent) ServiceDeregister(serviceID string) error { return nil } -// PassTTL is used to set a TTL check to the passing state +// PassTTL is used to set a TTL check to the passing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. func (a *Agent) PassTTL(checkID, note string) error { return a.updateTTL(checkID, note, "pass") } -// WarnTTL is used to set a TTL check to the warning state +// WarnTTL is used to set a TTL check to the warning state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. func (a *Agent) WarnTTL(checkID, note string) error { return a.updateTTL(checkID, note, "warn") } -// FailTTL is used to set a TTL check to the failing state +// FailTTL is used to set a TTL check to the failing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. func (a *Agent) FailTTL(checkID, note string) error { return a.updateTTL(checkID, note, "fail") } // updateTTL is used to update the TTL of a check. This is the internal -// method that uses the old API that's present in Consul versions prior -// to 0.6.4. Since Consul didn't have an analogous "update" API before it -// seemed ok to break this (former) UpdateTTL in favor of the new UpdateTTL -// below, but keep the old Pass/Warn/Fail methods using the old API under the -// hood. +// method that uses the old API that's present in Consul versions prior to +// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed +// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, +// but keep the old Pass/Warn/Fail methods using the old API under the hood. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 and the server endpoints will +// be removed in 0.9. func (a *Agent) updateTTL(checkID, note, status string) error { switch status { case "pass": @@ -240,8 +262,9 @@ func (a *Agent) updateTTL(checkID, note, status string) error { // checkUpdate is the payload for a PUT for a check update. type checkUpdate struct { - // Status us one of the structs.Health* states, "passing", "warning", or - // "critical". + // Status is one of the api.Health* states: HealthPassing + // ("passing"), HealthWarning ("warning"), or HealthCritical + // ("critical"). Status string // Output is the information to post to the UI for operators as the diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index 590b858e1fc..dd811fde4bf 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -80,6 +80,9 @@ type QueryMeta struct { // How long did the request take RequestTime time.Duration + + // Is address translation enabled for HTTP responses on this agent + AddressTranslationEnabled bool } // WriteMeta is used to return meta data about a write @@ -330,6 +333,7 @@ type request struct { url *url.URL params url.Values body io.Reader + header http.Header obj interface{} } @@ -355,7 +359,7 @@ func (r *request) setQueryOptions(q *QueryOptions) { r.params.Set("wait", durToMsec(q.WaitTime)) } if q.Token != "" { - r.params.Set("token", q.Token) + r.header.Set("X-Consul-Token", q.Token) } if q.Near != "" { r.params.Set("near", q.Near) @@ -399,7 +403,7 @@ func (r *request) setWriteOptions(q *WriteOptions) { r.params.Set("dc", q.Datacenter) } if q.Token != "" { - r.params.Set("token", q.Token) + r.header.Set("X-Consul-Token", q.Token) } } @@ -426,6 +430,7 @@ func (r *request) toHTTP() (*http.Request, error) { req.URL.Host = r.url.Host req.URL.Scheme = r.url.Scheme req.Host = r.url.Host + req.Header = r.header // Setup auth if r.config.HttpAuth != nil { @@ -446,6 +451,7 @@ func (c *Client) newRequest(method, path string) *request { Path: path, }, params: make(map[string][]string), + header: make(http.Header), } if c.config.Datacenter != "" { r.params.Set("dc", c.config.Datacenter) @@ -454,7 +460,7 @@ func (c *Client) newRequest(method, path string) *request { r.params.Set("wait", durToMsec(r.config.WaitTime)) } if c.config.Token != "" { - r.params.Set("token", r.config.Token) + r.header.Set("X-Consul-Token", r.config.Token) } return r } @@ -539,6 +545,15 @@ func parseQueryMeta(resp *http.Response, q *QueryMeta) error { default: q.KnownLeader = false } + + // Parse X-Consul-Translate-Addresses + switch header.Get("X-Consul-Translate-Addresses") { + case "true": + q.AddressTranslationEnabled = true + default: + q.AddressTranslationEnabled = false + } + return nil } diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go index 52a00b3043f..337772ec0bf 100644 --- a/vendor/github.com/hashicorp/consul/api/catalog.go +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -1,13 +1,15 @@ package api type Node struct { - Node string - Address string + Node string + Address string + TaggedAddresses map[string]string } type CatalogService struct { Node string Address string + TaggedAddresses map[string]string ServiceID string ServiceName string ServiceAddress string @@ -22,11 +24,12 @@ type CatalogNode struct { } type CatalogRegistration struct { - Node string - Address string - Datacenter string - Service *AgentService - Check *AgentCheck + Node string + Address string + TaggedAddresses map[string]string + Datacenter string + Service *AgentService + Check *AgentCheck } type CatalogDeregistration struct { diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go index 5bb403f554f..74da949c8d1 100644 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -8,7 +8,6 @@ const ( // HealthAny is special, and is used as a wild card, // not as a specific state. HealthAny = "any" - HealthUnknown = "unknown" HealthPassing = "passing" HealthWarning = "warning" HealthCritical = "critical" @@ -122,7 +121,6 @@ func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMet case HealthWarning: case HealthCritical: case HealthPassing: - case HealthUnknown: default: return nil, nil, fmt.Errorf("Unsupported state: %v", state) } diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go index 688b3a09d2f..3dac2583c12 100644 --- a/vendor/github.com/hashicorp/consul/api/kv.go +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -23,6 +23,43 @@ type KVPair struct { // KVPairs is a list of KVPair objects type KVPairs []*KVPair +// KVOp constants give possible operations available in a KVTxn. +type KVOp string + +const ( + KVSet KVOp = "set" + KVDelete = "delete" + KVDeleteCAS = "delete-cas" + KVDeleteTree = "delete-tree" + KVCAS = "cas" + KVLock = "lock" + KVUnlock = "unlock" + KVGet = "get" + KVGetTree = "get-tree" + KVCheckSession = "check-session" + KVCheckIndex = "check-index" +) + +// KVTxnOp defines a single operation inside a transaction. +type KVTxnOp struct { + Verb string + Key string + Value []byte + Flags uint64 + Index uint64 + Session string +} + +// KVTxnOps defines a set of operations to be performed inside a single +// transaction. +type KVTxnOps []*KVTxnOp + +// KVTxnResponse has the outcome of a transaction. +type KVTxnResponse struct { + Results []*KVPair + Errors TxnErrors +} + // KV is used to manipulate the K/V API type KV struct { c *Client @@ -238,3 +275,122 @@ func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOption res := strings.Contains(string(buf.Bytes()), "true") return res, qm, nil } + +// TxnOp is the internal format we send to Consul. It's not specific to KV, +// though currently only KV operations are supported. +type TxnOp struct { + KV *KVTxnOp +} + +// TxnOps is a list of transaction operations. +type TxnOps []*TxnOp + +// TxnResult is the internal format we receive from Consul. +type TxnResult struct { + KV *KVPair +} + +// TxnResults is a list of TxnResult objects. +type TxnResults []*TxnResult + +// TxnError is used to return information about an operation in a transaction. +type TxnError struct { + OpIndex int + What string +} + +// TxnErrors is a list of TxnError objects. +type TxnErrors []*TxnError + +// TxnResponse is the internal format we receive from Consul. +type TxnResponse struct { + Results TxnResults + Errors TxnErrors +} + +// Txn is used to apply multiple KV operations in a single, atomic transaction. +// +// Note that Go will perform the required base64 encoding on the values +// automatically because the type is a byte slice. Transactions are defined as a +// list of operations to perform, using the KVOp constants and KVTxnOp structure +// to define operations. If any operation fails, none of the changes are applied +// to the state store. Note that this hides the internal raw transaction interface +// and munges the input and output types into KV-specific ones for ease of use. +// If there are more non-KV operations in the future we may break out a new +// transaction API client, but it will be easy to keep this KV-specific variant +// supported. +// +// Even though this is generally a write operation, we take a QueryOptions input +// and return a QueryMeta output. If the transaction contains only read ops, then +// Consul will fast-path it to a different endpoint internally which supports +// consistency controls, but not blocking. If there are write operations then +// the request will always be routed through raft and any consistency settings +// will be ignored. +// +// Here's an example: +// +// ops := KVTxnOps{ +// &KVTxnOp{ +// Verb: KVLock, +// Key: "test/lock", +// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", +// Value: []byte("hello"), +// }, +// &KVTxnOp{ +// Verb: KVGet, +// Key: "another/key", +// }, +// } +// ok, response, _, err := kv.Txn(&ops, nil) +// +// If there is a problem making the transaction request then an error will be +// returned. Otherwise, the ok value will be true if the transaction succeeded +// or false if it was rolled back. The response is a structured return value which +// will have the outcome of the transaction. Its Results member will have entries +// for each operation. Deleted keys will have a nil entry in the, and to save +// space, the Value of each key in the Results will be nil unless the operation +// is a KVGet. If the transaction was rolled back, the Errors member will have +// entries referencing the index of the operation that failed along with an error +// message. +func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { + r := k.c.newRequest("PUT", "/v1/txn") + r.setQueryOptions(q) + + // Convert into the internal format since this is an all-KV txn. + ops := make(TxnOps, 0, len(txn)) + for _, kvOp := range txn { + ops = append(ops, &TxnOp{KV: kvOp}) + } + r.obj = ops + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return false, nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { + var txnResp TxnResponse + if err := decodeBody(resp, &txnResp); err != nil { + return false, nil, nil, err + } + + // Convert from the internal format. + kvResp := KVTxnResponse{ + Errors: txnResp.Errors, + } + for _, result := range txnResp.Results { + kvResp.Results = append(kvResp.Results, result.KV) + } + return resp.StatusCode == http.StatusOK, &kvResp, qm, nil + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) +} diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go new file mode 100644 index 00000000000..48d74f3ca6a --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator.go @@ -0,0 +1,81 @@ +package api + +// Operator can be used to perform low-level operator tasks for Consul. +type Operator struct { + c *Client +} + +// Operator returns a handle to the operator endpoints. +func (c *Client) Operator() *Operator { + return &Operator{c} +} + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Consul. + ID string + + // Node is the node name of the server, as known by Consul, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address string + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Consul. + Voter bool +} + +// RaftConfigration is returned when querying for the current Raft configuration. +type RaftConfiguration struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// RaftGetConfiguration is used to query the current Raft peer set. +func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/raft/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out RaftConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by address in the form of +// "IP:port". +func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + // TODO (slackpad) Currently we made address a query parameter. Once + // IDs are in place this will be DELETE /v1/operator/raft/peer/. + r.params.Set("address", string(address)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go index c8141887c46..63e741e050d 100644 --- a/vendor/github.com/hashicorp/consul/api/prepared_query.go +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -25,6 +25,11 @@ type ServiceQuery struct { // Service is the service to query. Service string + // Near allows baking in the name of a node to automatically distance- + // sort from. The magic "_agent" value is supported, which sorts near + // the agent which initiated the request by default. + Near string + // Failover controls what we do if there are no healthy nodes in the // local datacenter. Failover QueryDatacenterOptions @@ -40,6 +45,17 @@ type ServiceQuery struct { Tags []string } +// QueryTemplate carries the arguments for creating a templated query. +type QueryTemplate struct { + // Type specifies the type of the query template. Currently only + // "name_prefix_match" is supported. This field is required. + Type string + + // Regexp allows specifying a regex pattern to match against the name + // of the query being executed. + Regexp string +} + // PrepatedQueryDefinition defines a complete prepared query. type PreparedQueryDefinition struct { // ID is this UUID-based ID for the query, always generated by Consul. @@ -67,6 +83,11 @@ type PreparedQueryDefinition struct { // DNS has options that control how the results of this query are // served over DNS. DNS QueryDNSOptions + + // Template is used to pass through the arguments for creating a + // prepared query with an attached template. If a template is given, + // interpolations are possible in other struct fields. + Template QueryTemplate } // PreparedQueryExecuteResponse has the results of executing a query. diff --git a/vendor/github.com/hashicorp/consul/consul/structs/operator.go b/vendor/github.com/hashicorp/consul/consul/structs/operator.go new file mode 100644 index 00000000000..d564400bf97 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/consul/structs/operator.go @@ -0,0 +1,57 @@ +package structs + +import ( + "github.com/hashicorp/raft" +) + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Consul. + ID raft.ServerID + + // Node is the node name of the server, as known by Consul, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address raft.ServerAddress + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Consul. + Voter bool +} + +// RaftConfigrationResponse is returned when querying for the current Raft +// configuration. +type RaftConfigurationResponse struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// RaftPeerByAddressRequest is used by the Operator endpoint to apply a Raft +// operation on a specific Raft peer by address in the form of "IP:port". +type RaftPeerByAddressRequest struct { + // Datacenter is the target this request is intended for. + Datacenter string + + // Address is the peer to remove, in the form "IP:port". + Address raft.ServerAddress + + // WriteRequest holds the ACL token to go along with this request. + WriteRequest +} + +// RequestDatacenter returns the datacenter for a given request. +func (op *RaftPeerByAddressRequest) RequestDatacenter() string { + return op.Datacenter +} diff --git a/vendor/github.com/hashicorp/consul/consul/structs/prepared_query.go b/vendor/github.com/hashicorp/consul/consul/structs/prepared_query.go new file mode 100644 index 00000000000..5e9c31847ba --- /dev/null +++ b/vendor/github.com/hashicorp/consul/consul/structs/prepared_query.go @@ -0,0 +1,252 @@ +package structs + +// QueryDatacenterOptions sets options about how we fail over if there are no +// healthy nodes in the local datacenter. +type QueryDatacenterOptions struct { + // NearestN is set to the number of remote datacenters to try, based on + // network coordinates. + NearestN int + + // Datacenters is a fixed list of datacenters to try after NearestN. We + // never try a datacenter multiple times, so those are subtracted from + // this list before proceeding. + Datacenters []string +} + +// QueryDNSOptions controls settings when query results are served over DNS. +type QueryDNSOptions struct { + // TTL is the time to live for the served DNS results. + TTL string +} + +// ServiceQuery is used to query for a set of healthy nodes offering a specific +// service. +type ServiceQuery struct { + // Service is the service to query. + Service string + + // Failover controls what we do if there are no healthy nodes in the + // local datacenter. + Failover QueryDatacenterOptions + + // If OnlyPassing is true then we will only include nodes with passing + // health checks (critical AND warning checks will cause a node to be + // discarded) + OnlyPassing bool + + // Near allows the query to always prefer the node nearest the given + // node. If the node does not exist, results are returned in their + // normal randomly-shuffled order. Supplying the magic "_agent" value + // is supported to sort near the agent which initiated the request. + Near string + + // Tags are a set of required and/or disallowed tags. If a tag is in + // this list it must be present. If the tag is preceded with "!" then + // it is disallowed. + Tags []string +} + +const ( + // QueryTemplateTypeNamePrefixMatch uses the Name field of the query as + // a prefix to select the template. + QueryTemplateTypeNamePrefixMatch = "name_prefix_match" +) + +// QueryTemplateOptions controls settings if this query is a template. +type QueryTemplateOptions struct { + // Type, if non-empty, means that this query is a template. This is + // set to one of the QueryTemplateType* constants above. + Type string + + // Regexp is an optional regular expression to use to parse the full + // name, once the prefix match has selected a template. This can be + // used to extract parts of the name and choose a service name, set + // tags, etc. + Regexp string +} + +// PreparedQuery defines a complete prepared query, and is the structure we +// maintain in the state store. +type PreparedQuery struct { + // ID is this UUID-based ID for the query, always generated by Consul. + ID string + + // Name is an optional friendly name for the query supplied by the + // user. NOTE - if this feature is used then it will reduce the security + // of any read ACL associated with this query/service since this name + // can be used to locate nodes with supplying any ACL. + Name string + + // Session is an optional session to tie this query's lifetime to. If + // this is omitted then the query will not expire. + Session string + + // Token is the ACL token used when the query was created, and it is + // used when a query is subsequently executed. This token, or a token + // with management privileges, must be used to change the query later. + Token string + + // Template is used to configure this query as a template, which will + // respond to queries based on the Name, and then will be rendered + // before it is executed. + Template QueryTemplateOptions + + // Service defines a service query (leaving things open for other types + // later). + Service ServiceQuery + + // DNS has options that control how the results of this query are + // served over DNS. + DNS QueryDNSOptions + + RaftIndex +} + +// GetACLPrefix returns the prefix to look up the prepared_query ACL policy for +// this query, and whether the prefix applies to this query. You always need to +// check the ok value before using the prefix. +func (pq *PreparedQuery) GetACLPrefix() (string, bool) { + if pq.Name != "" || pq.Template.Type != "" { + return pq.Name, true + } + + return "", false +} + +type PreparedQueries []*PreparedQuery + +type IndexedPreparedQueries struct { + Queries PreparedQueries + QueryMeta +} + +type PreparedQueryOp string + +const ( + PreparedQueryCreate PreparedQueryOp = "create" + PreparedQueryUpdate PreparedQueryOp = "update" + PreparedQueryDelete PreparedQueryOp = "delete" +) + +// QueryRequest is used to create or change prepared queries. +type PreparedQueryRequest struct { + // Datacenter is the target this request is intended for. + Datacenter string + + // Op is the operation to apply. + Op PreparedQueryOp + + // Query is the query itself. + Query *PreparedQuery + + // WriteRequest holds the ACL token to go along with this request. + WriteRequest +} + +// RequestDatacenter returns the datacenter for a given request. +func (q *PreparedQueryRequest) RequestDatacenter() string { + return q.Datacenter +} + +// PreparedQuerySpecificRequest is used to get information about a prepared +// query. +type PreparedQuerySpecificRequest struct { + // Datacenter is the target this request is intended for. + Datacenter string + + // QueryID is the ID of a query. + QueryID string + + // QueryOptions (unfortunately named here) controls the consistency + // settings for the query lookup itself, as well as the service lookups. + QueryOptions +} + +// RequestDatacenter returns the datacenter for a given request. +func (q *PreparedQuerySpecificRequest) RequestDatacenter() string { + return q.Datacenter +} + +// PreparedQueryExecuteRequest is used to execute a prepared query. +type PreparedQueryExecuteRequest struct { + // Datacenter is the target this request is intended for. + Datacenter string + + // QueryIDOrName is the ID of a query _or_ the name of one, either can + // be provided. + QueryIDOrName string + + // Limit will trim the resulting list down to the given limit. + Limit int + + // Source is used to sort the results relative to a given node using + // network coordinates. + Source QuerySource + + // Agent is used to carry around a reference to the agent which initiated + // the execute request. Used to distance-sort relative to the local node. + Agent QuerySource + + // QueryOptions (unfortunately named here) controls the consistency + // settings for the query lookup itself, as well as the service lookups. + QueryOptions +} + +// RequestDatacenter returns the datacenter for a given request. +func (q *PreparedQueryExecuteRequest) RequestDatacenter() string { + return q.Datacenter +} + +// PreparedQueryExecuteRemoteRequest is used when running a local query in a +// remote datacenter. +type PreparedQueryExecuteRemoteRequest struct { + // Datacenter is the target this request is intended for. + Datacenter string + + // Query is a copy of the query to execute. We have to ship the entire + // query over since it won't be present in the remote state store. + Query PreparedQuery + + // Limit will trim the resulting list down to the given limit. + Limit int + + // QueryOptions (unfortunately named here) controls the consistency + // settings for the the service lookups. + QueryOptions +} + +// RequestDatacenter returns the datacenter for a given request. +func (q *PreparedQueryExecuteRemoteRequest) RequestDatacenter() string { + return q.Datacenter +} + +// PreparedQueryExecuteResponse has the results of executing a query. +type PreparedQueryExecuteResponse struct { + // Service is the service that was queried. + Service string + + // Nodes has the nodes that were output by the query. + Nodes CheckServiceNodes + + // DNS has the options for serving these results over DNS. + DNS QueryDNSOptions + + // Datacenter is the datacenter that these results came from. + Datacenter string + + // Failovers is a count of how many times we had to query a remote + // datacenter. + Failovers int + + // QueryMeta has freshness information about the query. + QueryMeta +} + +// PreparedQueryExplainResponse has the results when explaining a query/ +type PreparedQueryExplainResponse struct { + // Query has the fully-rendered query. + Query PreparedQuery + + // QueryMeta has freshness information about the query. + QueryMeta +} diff --git a/vendor/github.com/hashicorp/consul/consul/structs/structs.go b/vendor/github.com/hashicorp/consul/consul/structs/structs.go new file mode 100644 index 00000000000..837d34a8bd4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/consul/structs/structs.go @@ -0,0 +1,930 @@ +package structs + +import ( + "bytes" + "fmt" + "math/rand" + "reflect" + "time" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/types" + "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/serf/coordinate" +) + +var ( + ErrNoLeader = fmt.Errorf("No cluster leader") + ErrNoDCPath = fmt.Errorf("No path to datacenter") + ErrNoServers = fmt.Errorf("No known Consul servers") +) + +type MessageType uint8 + +// RaftIndex is used to track the index used while creating +// or modifying a given struct type. +type RaftIndex struct { + CreateIndex uint64 + ModifyIndex uint64 +} + +const ( + RegisterRequestType MessageType = iota + DeregisterRequestType + KVSRequestType + SessionRequestType + ACLRequestType + TombstoneRequestType + CoordinateBatchUpdateType + PreparedQueryRequestType + TxnRequestType +) + +const ( + // IgnoreUnknownTypeFlag is set along with a MessageType + // to indicate that the message type can be safely ignored + // if it is not recognized. This is for future proofing, so + // that new commands can be added in a way that won't cause + // old servers to crash when the FSM attempts to process them. + IgnoreUnknownTypeFlag MessageType = 128 +) + +const ( + // HealthAny is special, and is used as a wild card, + // not as a specific state. + HealthAny = "any" + HealthPassing = "passing" + HealthWarning = "warning" + HealthCritical = "critical" +) + +func ValidStatus(s string) bool { + return s == HealthPassing || + s == HealthWarning || + s == HealthCritical +} + +const ( + // Client tokens have rules applied + ACLTypeClient = "client" + + // Management tokens have an always allow policy. + // They are used for token management. + ACLTypeManagement = "management" +) + +const ( + // MaxLockDelay provides a maximum LockDelay value for + // a session. Any value above this will not be respected. + MaxLockDelay = 60 * time.Second +) + +// RPCInfo is used to describe common information about query +type RPCInfo interface { + RequestDatacenter() string + IsRead() bool + AllowStaleRead() bool + ACLToken() string +} + +// QueryOptions is used to specify various flags for read queries +type QueryOptions struct { + // Token is the ACL token ID. If not provided, the 'anonymous' + // token is assumed for backwards compatibility. + Token string + + // If set, wait until query exceeds given index. Must be provided + // with MaxQueryTime. + MinQueryIndex uint64 + + // Provided with MinQueryIndex to wait for change. + MaxQueryTime time.Duration + + // If set, any follower can service the request. Results + // may be arbitrarily stale. + AllowStale bool + + // If set, the leader must verify leadership prior to + // servicing the request. Prevents a stale read. + RequireConsistent bool +} + +// QueryOption only applies to reads, so always true +func (q QueryOptions) IsRead() bool { + return true +} + +func (q QueryOptions) AllowStaleRead() bool { + return q.AllowStale +} + +func (q QueryOptions) ACLToken() string { + return q.Token +} + +type WriteRequest struct { + // Token is the ACL token ID. If not provided, the 'anonymous' + // token is assumed for backwards compatibility. + Token string +} + +// WriteRequest only applies to writes, always false +func (w WriteRequest) IsRead() bool { + return false +} + +func (w WriteRequest) AllowStaleRead() bool { + return false +} + +func (w WriteRequest) ACLToken() string { + return w.Token +} + +// QueryMeta allows a query response to include potentially +// useful metadata about a query +type QueryMeta struct { + // This is the index associated with the read + Index uint64 + + // If AllowStale is used, this is time elapsed since + // last contact between the follower and leader. This + // can be used to gauge staleness. + LastContact time.Duration + + // Used to indicate if there is a known leader node + KnownLeader bool +} + +// RegisterRequest is used for the Catalog.Register endpoint +// to register a node as providing a service. If no service +// is provided, the node is registered. +type RegisterRequest struct { + Datacenter string + Node string + Address string + TaggedAddresses map[string]string + Service *NodeService + Check *HealthCheck + Checks HealthChecks + WriteRequest +} + +func (r *RegisterRequest) RequestDatacenter() string { + return r.Datacenter +} + +// DeregisterRequest is used for the Catalog.Deregister endpoint +// to deregister a node as providing a service. If no service is +// provided the entire node is deregistered. +type DeregisterRequest struct { + Datacenter string + Node string + ServiceID string + CheckID types.CheckID + WriteRequest +} + +func (r *DeregisterRequest) RequestDatacenter() string { + return r.Datacenter +} + +// QuerySource is used to pass along information about the source node +// in queries so that we can adjust the response based on its network +// coordinates. +type QuerySource struct { + Datacenter string + Node string +} + +// DCSpecificRequest is used to query about a specific DC +type DCSpecificRequest struct { + Datacenter string + Source QuerySource + QueryOptions +} + +func (r *DCSpecificRequest) RequestDatacenter() string { + return r.Datacenter +} + +// ServiceSpecificRequest is used to query about a specific service +type ServiceSpecificRequest struct { + Datacenter string + ServiceName string + ServiceTag string + TagFilter bool // Controls tag filtering + Source QuerySource + QueryOptions +} + +func (r *ServiceSpecificRequest) RequestDatacenter() string { + return r.Datacenter +} + +// NodeSpecificRequest is used to request the information about a single node +type NodeSpecificRequest struct { + Datacenter string + Node string + QueryOptions +} + +func (r *NodeSpecificRequest) RequestDatacenter() string { + return r.Datacenter +} + +// ChecksInStateRequest is used to query for nodes in a state +type ChecksInStateRequest struct { + Datacenter string + State string + Source QuerySource + QueryOptions +} + +func (r *ChecksInStateRequest) RequestDatacenter() string { + return r.Datacenter +} + +// Used to return information about a node +type Node struct { + Node string + Address string + TaggedAddresses map[string]string + + RaftIndex +} +type Nodes []*Node + +// Used to return information about a provided services. +// Maps service name to available tags +type Services map[string][]string + +// ServiceNode represents a node that is part of a service. Address and +// TaggedAddresses are node-related fields that are always empty in the state +// store and are filled in on the way out by parseServiceNodes(). This is also +// why PartialClone() skips them, because we know they are blank already so it +// would be a waste of time to copy them. +type ServiceNode struct { + Node string + Address string + TaggedAddresses map[string]string + ServiceID string + ServiceName string + ServiceTags []string + ServiceAddress string + ServicePort int + ServiceEnableTagOverride bool + + RaftIndex +} + +// PartialClone() returns a clone of the given service node, minus the node- +// related fields that get filled in later, Address and TaggedAddresses. +func (s *ServiceNode) PartialClone() *ServiceNode { + tags := make([]string, len(s.ServiceTags)) + copy(tags, s.ServiceTags) + + return &ServiceNode{ + Node: s.Node, + // Skip Address, see above. + // Skip TaggedAddresses, see above. + ServiceID: s.ServiceID, + ServiceName: s.ServiceName, + ServiceTags: tags, + ServiceAddress: s.ServiceAddress, + ServicePort: s.ServicePort, + ServiceEnableTagOverride: s.ServiceEnableTagOverride, + RaftIndex: RaftIndex{ + CreateIndex: s.CreateIndex, + ModifyIndex: s.ModifyIndex, + }, + } +} + +// ToNodeService converts the given service node to a node service. +func (s *ServiceNode) ToNodeService() *NodeService { + return &NodeService{ + ID: s.ServiceID, + Service: s.ServiceName, + Tags: s.ServiceTags, + Address: s.ServiceAddress, + Port: s.ServicePort, + EnableTagOverride: s.ServiceEnableTagOverride, + RaftIndex: RaftIndex{ + CreateIndex: s.CreateIndex, + ModifyIndex: s.ModifyIndex, + }, + } +} + +type ServiceNodes []*ServiceNode + +// NodeService is a service provided by a node +type NodeService struct { + ID string + Service string + Tags []string + Address string + Port int + EnableTagOverride bool + + RaftIndex +} + +// IsSame checks if one NodeService is the same as another, without looking +// at the Raft information (that's why we didn't call it IsEqual). This is +// useful for seeing if an update would be idempotent for all the functional +// parts of the structure. +func (s *NodeService) IsSame(other *NodeService) bool { + if s.ID != other.ID || + s.Service != other.Service || + !reflect.DeepEqual(s.Tags, other.Tags) || + s.Address != other.Address || + s.Port != other.Port || + s.EnableTagOverride != other.EnableTagOverride { + return false + } + + return true +} + +// ToServiceNode converts the given node service to a service node. +func (s *NodeService) ToServiceNode(node string) *ServiceNode { + return &ServiceNode{ + Node: node, + // Skip Address, see ServiceNode definition. + // Skip TaggedAddresses, see ServiceNode definition. + ServiceID: s.ID, + ServiceName: s.Service, + ServiceTags: s.Tags, + ServiceAddress: s.Address, + ServicePort: s.Port, + ServiceEnableTagOverride: s.EnableTagOverride, + RaftIndex: RaftIndex{ + CreateIndex: s.CreateIndex, + ModifyIndex: s.ModifyIndex, + }, + } +} + +type NodeServices struct { + Node *Node + Services map[string]*NodeService +} + +// HealthCheck represents a single check on a given node +type HealthCheck struct { + Node string + CheckID types.CheckID // Unique per-node ID + Name string // Check name + Status string // The current check status + Notes string // Additional notes with the status + Output string // Holds output of script runs + ServiceID string // optional associated service + ServiceName string // optional service name + + RaftIndex +} + +// IsSame checks if one HealthCheck is the same as another, without looking +// at the Raft information (that's why we didn't call it IsEqual). This is +// useful for seeing if an update would be idempotent for all the functional +// parts of the structure. +func (c *HealthCheck) IsSame(other *HealthCheck) bool { + if c.Node != other.Node || + c.CheckID != other.CheckID || + c.Name != other.Name || + c.Status != other.Status || + c.Notes != other.Notes || + c.Output != other.Output || + c.ServiceID != other.ServiceID || + c.ServiceName != other.ServiceName { + return false + } + + return true +} + +// Clone returns a distinct clone of the HealthCheck. +func (c *HealthCheck) Clone() *HealthCheck { + clone := new(HealthCheck) + *clone = *c + return clone +} + +type HealthChecks []*HealthCheck + +// CheckServiceNode is used to provide the node, its service +// definition, as well as a HealthCheck that is associated. +type CheckServiceNode struct { + Node *Node + Service *NodeService + Checks HealthChecks +} +type CheckServiceNodes []CheckServiceNode + +// Shuffle does an in-place random shuffle using the Fisher-Yates algorithm. +func (nodes CheckServiceNodes) Shuffle() { + for i := len(nodes) - 1; i > 0; i-- { + j := rand.Int31n(int32(i + 1)) + nodes[i], nodes[j] = nodes[j], nodes[i] + } +} + +// Filter removes nodes that are failing health checks (and any non-passing +// check if that option is selected). Note that this returns the filtered +// results AND modifies the receiver for performance. +func (nodes CheckServiceNodes) Filter(onlyPassing bool) CheckServiceNodes { + n := len(nodes) +OUTER: + for i := 0; i < n; i++ { + node := nodes[i] + for _, check := range node.Checks { + if check.Status == HealthCritical || + (onlyPassing && check.Status != HealthPassing) { + nodes[i], nodes[n-1] = nodes[n-1], CheckServiceNode{} + n-- + i-- + continue OUTER + } + } + } + return nodes[:n] +} + +// NodeInfo is used to dump all associated information about +// a node. This is currently used for the UI only, as it is +// rather expensive to generate. +type NodeInfo struct { + Node string + Address string + TaggedAddresses map[string]string + Services []*NodeService + Checks []*HealthCheck +} + +// NodeDump is used to dump all the nodes with all their +// associated data. This is currently used for the UI only, +// as it is rather expensive to generate. +type NodeDump []*NodeInfo + +type IndexedNodes struct { + Nodes Nodes + QueryMeta +} + +type IndexedServices struct { + Services Services + QueryMeta +} + +type IndexedServiceNodes struct { + ServiceNodes ServiceNodes + QueryMeta +} + +type IndexedNodeServices struct { + NodeServices *NodeServices + QueryMeta +} + +type IndexedHealthChecks struct { + HealthChecks HealthChecks + QueryMeta +} + +type IndexedCheckServiceNodes struct { + Nodes CheckServiceNodes + QueryMeta +} + +type IndexedNodeDump struct { + Dump NodeDump + QueryMeta +} + +// DirEntry is used to represent a directory entry. This is +// used for values in our Key-Value store. +type DirEntry struct { + LockIndex uint64 + Key string + Flags uint64 + Value []byte + Session string `json:",omitempty"` + + RaftIndex +} + +// Returns a clone of the given directory entry. +func (d *DirEntry) Clone() *DirEntry { + return &DirEntry{ + LockIndex: d.LockIndex, + Key: d.Key, + Flags: d.Flags, + Value: d.Value, + Session: d.Session, + RaftIndex: RaftIndex{ + CreateIndex: d.CreateIndex, + ModifyIndex: d.ModifyIndex, + }, + } +} + +type DirEntries []*DirEntry + +type KVSOp string + +const ( + KVSSet KVSOp = "set" + KVSDelete = "delete" + KVSDeleteCAS = "delete-cas" // Delete with check-and-set + KVSDeleteTree = "delete-tree" + KVSCAS = "cas" // Check-and-set + KVSLock = "lock" // Lock a key + KVSUnlock = "unlock" // Unlock a key + + // The following operations are only available inside of atomic + // transactions via the Txn request. + KVSGet = "get" // Read the key during the transaction. + KVSGetTree = "get-tree" // Read all keys with the given prefix during the transaction. + KVSCheckSession = "check-session" // Check the session holds the key. + KVSCheckIndex = "check-index" // Check the modify index of the key. +) + +// IsWrite returns true if the given operation alters the state store. +func (op KVSOp) IsWrite() bool { + switch op { + case KVSGet, KVSGetTree, KVSCheckSession, KVSCheckIndex: + return false + + default: + return true + } +} + +// KVSRequest is used to operate on the Key-Value store +type KVSRequest struct { + Datacenter string + Op KVSOp // Which operation are we performing + DirEnt DirEntry // Which directory entry + WriteRequest +} + +func (r *KVSRequest) RequestDatacenter() string { + return r.Datacenter +} + +// KeyRequest is used to request a key, or key prefix +type KeyRequest struct { + Datacenter string + Key string + QueryOptions +} + +func (r *KeyRequest) RequestDatacenter() string { + return r.Datacenter +} + +// KeyListRequest is used to list keys +type KeyListRequest struct { + Datacenter string + Prefix string + Seperator string + QueryOptions +} + +func (r *KeyListRequest) RequestDatacenter() string { + return r.Datacenter +} + +type IndexedDirEntries struct { + Entries DirEntries + QueryMeta +} + +type IndexedKeyList struct { + Keys []string + QueryMeta +} + +type SessionBehavior string + +const ( + SessionKeysRelease SessionBehavior = "release" + SessionKeysDelete = "delete" +) + +const ( + SessionTTLMax = 24 * time.Hour + SessionTTLMultiplier = 2 +) + +// Session is used to represent an open session in the KV store. +// This issued to associate node checks with acquired locks. +type Session struct { + ID string + Name string + Node string + Checks []types.CheckID + LockDelay time.Duration + Behavior SessionBehavior // What to do when session is invalidated + TTL string + + RaftIndex +} +type Sessions []*Session + +type SessionOp string + +const ( + SessionCreate SessionOp = "create" + SessionDestroy = "destroy" +) + +// SessionRequest is used to operate on sessions +type SessionRequest struct { + Datacenter string + Op SessionOp // Which operation are we performing + Session Session // Which session + WriteRequest +} + +func (r *SessionRequest) RequestDatacenter() string { + return r.Datacenter +} + +// SessionSpecificRequest is used to request a session by ID +type SessionSpecificRequest struct { + Datacenter string + Session string + QueryOptions +} + +func (r *SessionSpecificRequest) RequestDatacenter() string { + return r.Datacenter +} + +type IndexedSessions struct { + Sessions Sessions + QueryMeta +} + +// ACL is used to represent a token and its rules +type ACL struct { + ID string + Name string + Type string + Rules string + + RaftIndex +} +type ACLs []*ACL + +type ACLOp string + +const ( + ACLSet ACLOp = "set" + ACLForceSet = "force-set" // Deprecated, left to backwards compatibility + ACLDelete = "delete" +) + +// IsSame checks if one ACL is the same as another, without looking +// at the Raft information (that's why we didn't call it IsEqual). This is +// useful for seeing if an update would be idempotent for all the functional +// parts of the structure. +func (a *ACL) IsSame(other *ACL) bool { + if a.ID != other.ID || + a.Name != other.Name || + a.Type != other.Type || + a.Rules != other.Rules { + return false + } + + return true +} + +// ACLRequest is used to create, update or delete an ACL +type ACLRequest struct { + Datacenter string + Op ACLOp + ACL ACL + WriteRequest +} + +func (r *ACLRequest) RequestDatacenter() string { + return r.Datacenter +} + +// ACLRequests is a list of ACL change requests. +type ACLRequests []*ACLRequest + +// ACLSpecificRequest is used to request an ACL by ID +type ACLSpecificRequest struct { + Datacenter string + ACL string + QueryOptions +} + +func (r *ACLSpecificRequest) RequestDatacenter() string { + return r.Datacenter +} + +// ACLPolicyRequest is used to request an ACL by ID, conditionally +// filtering on an ID +type ACLPolicyRequest struct { + Datacenter string + ACL string + ETag string + QueryOptions +} + +func (r *ACLPolicyRequest) RequestDatacenter() string { + return r.Datacenter +} + +type IndexedACLs struct { + ACLs ACLs + QueryMeta +} + +type ACLPolicy struct { + ETag string + Parent string + Policy *acl.Policy + TTL time.Duration + QueryMeta +} + +// ACLReplicationStatus provides information about the health of the ACL +// replication system. +type ACLReplicationStatus struct { + Enabled bool + Running bool + SourceDatacenter string + ReplicatedIndex uint64 + LastSuccess time.Time + LastError time.Time +} + +// Coordinate stores a node name with its associated network coordinate. +type Coordinate struct { + Node string + Coord *coordinate.Coordinate +} + +type Coordinates []*Coordinate + +// IndexedCoordinate is used to represent a single node's coordinate from the state +// store. +type IndexedCoordinate struct { + Coord *coordinate.Coordinate + QueryMeta +} + +// IndexedCoordinates is used to represent a list of nodes and their +// corresponding raw coordinates. +type IndexedCoordinates struct { + Coordinates Coordinates + QueryMeta +} + +// DatacenterMap is used to represent a list of nodes with their raw coordinates, +// associated with a datacenter. +type DatacenterMap struct { + Datacenter string + Coordinates Coordinates +} + +// CoordinateUpdateRequest is used to update the network coordinate of a given +// node. +type CoordinateUpdateRequest struct { + Datacenter string + Node string + Coord *coordinate.Coordinate + WriteRequest +} + +// RequestDatacenter returns the datacenter for a given update request. +func (c *CoordinateUpdateRequest) RequestDatacenter() string { + return c.Datacenter +} + +// EventFireRequest is used to ask a server to fire +// a Serf event. It is a bit odd, since it doesn't depend on +// the catalog or leader. Any node can respond, so it's not quite +// like a standard write request. This is used only internally. +type EventFireRequest struct { + Datacenter string + Name string + Payload []byte + + // Not using WriteRequest so that any server can process + // the request. It is a bit unusual... + QueryOptions +} + +func (r *EventFireRequest) RequestDatacenter() string { + return r.Datacenter +} + +// EventFireResponse is used to respond to a fire request. +type EventFireResponse struct { + QueryMeta +} + +type TombstoneOp string + +const ( + TombstoneReap TombstoneOp = "reap" +) + +// TombstoneRequest is used to trigger a reaping of the tombstones +type TombstoneRequest struct { + Datacenter string + Op TombstoneOp + ReapIndex uint64 + WriteRequest +} + +func (r *TombstoneRequest) RequestDatacenter() string { + return r.Datacenter +} + +// msgpackHandle is a shared handle for encoding/decoding of structs +var msgpackHandle = &codec.MsgpackHandle{} + +// Decode is used to decode a MsgPack encoded object +func Decode(buf []byte, out interface{}) error { + return codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out) +} + +// Encode is used to encode a MsgPack object with type prefix +func Encode(t MessageType, msg interface{}) ([]byte, error) { + var buf bytes.Buffer + buf.WriteByte(uint8(t)) + err := codec.NewEncoder(&buf, msgpackHandle).Encode(msg) + return buf.Bytes(), err +} + +// CompoundResponse is an interface for gathering multiple responses. It is +// used in cross-datacenter RPC calls where more than 1 datacenter is +// expected to reply. +type CompoundResponse interface { + // Add adds a new response to the compound response + Add(interface{}) + + // New returns an empty response object which can be passed around by + // reference, and then passed to Add() later on. + New() interface{} +} + +type KeyringOp string + +const ( + KeyringList KeyringOp = "list" + KeyringInstall = "install" + KeyringUse = "use" + KeyringRemove = "remove" +) + +// KeyringRequest encapsulates a request to modify an encryption keyring. +// It can be used for install, remove, or use key type operations. +type KeyringRequest struct { + Operation KeyringOp + Key string + Datacenter string + Forwarded bool + QueryOptions +} + +func (r *KeyringRequest) RequestDatacenter() string { + return r.Datacenter +} + +// KeyringResponse is a unified key response and can be used for install, +// remove, use, as well as listing key queries. +type KeyringResponse struct { + WAN bool + Datacenter string + Messages map[string]string + Keys map[string]int + NumNodes int + Error string +} + +// KeyringResponses holds multiple responses to keyring queries. Each +// datacenter replies independently, and KeyringResponses is used as a +// container for the set of all responses. +type KeyringResponses struct { + Responses []*KeyringResponse + QueryMeta +} + +func (r *KeyringResponses) Add(v interface{}) { + val := v.(*KeyringResponses) + r.Responses = append(r.Responses, val.Responses...) +} + +func (r *KeyringResponses) New() interface{} { + return new(KeyringResponses) +} diff --git a/vendor/github.com/hashicorp/consul/consul/structs/txn.go b/vendor/github.com/hashicorp/consul/consul/structs/txn.go new file mode 100644 index 00000000000..3f8035b97ef --- /dev/null +++ b/vendor/github.com/hashicorp/consul/consul/structs/txn.go @@ -0,0 +1,85 @@ +package structs + +import ( + "fmt" +) + +// TxnKVOp is used to define a single operation on the KVS inside a +// transaction +type TxnKVOp struct { + Verb KVSOp + DirEnt DirEntry +} + +// TxnKVResult is used to define the result of a single operation on the KVS +// inside a transaction. +type TxnKVResult *DirEntry + +// TxnOp is used to define a single operation inside a transaction. Only one +// of the types should be filled out per entry. +type TxnOp struct { + KV *TxnKVOp +} + +// TxnOps is a list of operations within a transaction. +type TxnOps []*TxnOp + +// TxnRequest is used to apply multiple operations to the state store in a +// single transaction +type TxnRequest struct { + Datacenter string + Ops TxnOps + WriteRequest +} + +func (r *TxnRequest) RequestDatacenter() string { + return r.Datacenter +} + +// TxnReadRequest is used as a fast path for read-only transactions that don't +// modify the state store. +type TxnReadRequest struct { + Datacenter string + Ops TxnOps + QueryOptions +} + +func (r *TxnReadRequest) RequestDatacenter() string { + return r.Datacenter +} + +// TxnError is used to return information about an error for a specific +// operation. +type TxnError struct { + OpIndex int + What string +} + +// Error returns the string representation of an atomic error. +func (e TxnError) Error() string { + return fmt.Sprintf("op %d: %s", e.OpIndex, e.What) +} + +// TxnErrors is a list of TxnError entries. +type TxnErrors []*TxnError + +// TxnResult is used to define the result of a given operation inside a +// transaction. Only one of the types should be filled out per entry. +type TxnResult struct { + KV TxnKVResult +} + +// TxnResults is a list of TxnResult entries. +type TxnResults []*TxnResult + +// TxnResponse is the structure returned by a TxnRequest. +type TxnResponse struct { + Results TxnResults + Errors TxnErrors +} + +// TxnReadResponse is the structure returned by a TxnReadRequest. +type TxnReadResponse struct { + TxnResponse + QueryMeta +} diff --git a/vendor/github.com/hashicorp/consul/testutil/README.md b/vendor/github.com/hashicorp/consul/testutil/README.md new file mode 100644 index 00000000000..21eb01d2a7f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/testutil/README.md @@ -0,0 +1,65 @@ +Consul Testing Utilities +======================== + +This package provides some generic helpers to facilitate testing in Consul. + +TestServer +========== + +TestServer is a harness for managing Consul agents and initializing them with +test data. Using it, you can form test clusters, create services, add health +checks, manipulate the K/V store, etc. This test harness is completely decoupled +from Consul's core and API client, meaning it can be easily imported and used in +external unit tests for various applications. It works by invoking the Consul +CLI, which means it is a requirement to have Consul installed in the `$PATH`. + +Following is an example usage: + +```go +package my_program + +import ( + "testing" + + "github.com/hashicorp/consul/consul/structs" + "github.com/hashicorp/consul/testutil" +) + +func TestMain(t *testing.T) { + // Create a test Consul server + srv1 := testutil.NewTestServer(t) + defer srv1.Stop() + + // Create a secondary server, passing in configuration + // to avoid bootstrapping as we are forming a cluster. + srv2 := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) { + c.Bootstrap = false + }) + defer srv2.Stop() + + // Join the servers together + srv1.JoinLAN(srv2.LANAddr) + + // Create a test key/value pair + srv1.SetKV("foo", []byte("bar")) + + // Create lots of test key/value pairs + srv1.PopulateKV(map[string][]byte{ + "bar": []byte("123"), + "baz": []byte("456"), + }) + + // Create a service + srv1.AddService("redis", structs.HealthPassing, []string{"master"}) + + // Create a service check + srv1.AddCheck("service:redis", "redis", structs.HealthPassing) + + // Create a node check + srv1.AddCheck("mem", "", structs.HealthCritical) + + // The HTTPAddr field contains the address of the Consul + // API on the new test server instance. + println(srv1.HTTPAddr) +} +``` diff --git a/vendor/github.com/hashicorp/consul/testutil/server.go b/vendor/github.com/hashicorp/consul/testutil/server.go new file mode 100644 index 00000000000..aad60e3866b --- /dev/null +++ b/vendor/github.com/hashicorp/consul/testutil/server.go @@ -0,0 +1,494 @@ +package testutil + +// TestServer is a test helper. It uses a fork/exec model to create +// a test Consul server instance in the background and initialize it +// with some data and/or services. The test server can then be used +// to run a unit test, and offers an easy API to tear itself down +// when the test has completed. The only prerequisite is to have a consul +// binary available on the $PATH. +// +// This package does not use Consul's official API client. This is +// because we use TestServer to test the API client, which would +// otherwise cause an import cycle. + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "os" + "os/exec" + "strings" + "sync/atomic" + + "github.com/hashicorp/consul/consul/structs" + "github.com/hashicorp/go-cleanhttp" +) + +// offset is used to atomically increment the port numbers. +var offset uint64 + +// TestPerformanceConfig configures the performance parameters. +type TestPerformanceConfig struct { + RaftMultiplier uint `json:"raft_multiplier,omitempty"` +} + +// TestPortConfig configures the various ports used for services +// provided by the Consul server. +type TestPortConfig struct { + DNS int `json:"dns,omitempty"` + HTTP int `json:"http,omitempty"` + RPC int `json:"rpc,omitempty"` + SerfLan int `json:"serf_lan,omitempty"` + SerfWan int `json:"serf_wan,omitempty"` + Server int `json:"server,omitempty"` +} + +// TestAddressConfig contains the bind addresses for various +// components of the Consul server. +type TestAddressConfig struct { + HTTP string `json:"http,omitempty"` +} + +// TestServerConfig is the main server configuration struct. +type TestServerConfig struct { + NodeName string `json:"node_name"` + Performance *TestPerformanceConfig `json:"performance,omitempty"` + Bootstrap bool `json:"bootstrap,omitempty"` + Server bool `json:"server,omitempty"` + DataDir string `json:"data_dir,omitempty"` + Datacenter string `json:"datacenter,omitempty"` + DisableCheckpoint bool `json:"disable_update_check"` + LogLevel string `json:"log_level,omitempty"` + Bind string `json:"bind_addr,omitempty"` + Addresses *TestAddressConfig `json:"addresses,omitempty"` + Ports *TestPortConfig `json:"ports,omitempty"` + ACLMasterToken string `json:"acl_master_token,omitempty"` + ACLDatacenter string `json:"acl_datacenter,omitempty"` + ACLDefaultPolicy string `json:"acl_default_policy,omitempty"` + Stdout, Stderr io.Writer `json:"-"` +} + +// ServerConfigCallback is a function interface which can be +// passed to NewTestServerConfig to modify the server config. +type ServerConfigCallback func(c *TestServerConfig) + +// defaultServerConfig returns a new TestServerConfig struct +// with all of the listen ports incremented by one. +func defaultServerConfig() *TestServerConfig { + idx := int(atomic.AddUint64(&offset, 1)) + + return &TestServerConfig{ + NodeName: fmt.Sprintf("node%d", idx), + DisableCheckpoint: true, + Performance: &TestPerformanceConfig{ + RaftMultiplier: 1, + }, + Bootstrap: true, + Server: true, + LogLevel: "debug", + Bind: "127.0.0.1", + Addresses: &TestAddressConfig{}, + Ports: &TestPortConfig{ + DNS: 20000 + idx, + HTTP: 21000 + idx, + RPC: 22000 + idx, + SerfLan: 23000 + idx, + SerfWan: 24000 + idx, + Server: 25000 + idx, + }, + } +} + +// TestService is used to serialize a service definition. +type TestService struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Address string `json:",omitempty"` + Port int `json:",omitempty"` +} + +// TestCheck is used to serialize a check definition. +type TestCheck struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + ServiceID string `json:",omitempty"` + TTL string `json:",omitempty"` +} + +// TestingT is an interface wrapper around TestingT +type TestingT interface { + Logf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatal(args ...interface{}) + Skip(args ...interface{}) +} + +// TestKVResponse is what we use to decode KV data. +type TestKVResponse struct { + Value string +} + +// TestServer is the main server wrapper struct. +type TestServer struct { + cmd *exec.Cmd + Config *TestServerConfig + t TestingT + + HTTPAddr string + LANAddr string + WANAddr string + + HttpClient *http.Client +} + +// NewTestServer is an easy helper method to create a new Consul +// test server with the most basic configuration. +func NewTestServer(t TestingT) *TestServer { + return NewTestServerConfig(t, nil) +} + +// NewTestServerConfig creates a new TestServer, and makes a call to +// an optional callback function to modify the configuration. +func NewTestServerConfig(t TestingT, cb ServerConfigCallback) *TestServer { + if path, err := exec.LookPath("consul"); err != nil || path == "" { + t.Skip("consul not found on $PATH, skipping") + } + + dataDir, err := ioutil.TempDir("", "consul") + if err != nil { + t.Fatalf("err: %s", err) + } + + configFile, err := ioutil.TempFile(dataDir, "config") + if err != nil { + defer os.RemoveAll(dataDir) + t.Fatalf("err: %s", err) + } + + consulConfig := defaultServerConfig() + consulConfig.DataDir = dataDir + + if cb != nil { + cb(consulConfig) + } + + configContent, err := json.Marshal(consulConfig) + if err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := configFile.Write(configContent); err != nil { + t.Fatalf("err: %s", err) + } + configFile.Close() + + stdout := io.Writer(os.Stdout) + if consulConfig.Stdout != nil { + stdout = consulConfig.Stdout + } + + stderr := io.Writer(os.Stderr) + if consulConfig.Stderr != nil { + stderr = consulConfig.Stderr + } + + // Start the server + cmd := exec.Command("consul", "agent", "-config-file", configFile.Name()) + cmd.Stdout = stdout + cmd.Stderr = stderr + if err := cmd.Start(); err != nil { + t.Fatalf("err: %s", err) + } + + var httpAddr string + var client *http.Client + if strings.HasPrefix(consulConfig.Addresses.HTTP, "unix://") { + httpAddr = consulConfig.Addresses.HTTP + trans := cleanhttp.DefaultTransport() + trans.Dial = func(_, _ string) (net.Conn, error) { + return net.Dial("unix", httpAddr[7:]) + } + client = &http.Client{ + Transport: trans, + } + } else { + httpAddr = fmt.Sprintf("127.0.0.1:%d", consulConfig.Ports.HTTP) + client = cleanhttp.DefaultClient() + } + + server := &TestServer{ + Config: consulConfig, + cmd: cmd, + t: t, + + HTTPAddr: httpAddr, + LANAddr: fmt.Sprintf("127.0.0.1:%d", consulConfig.Ports.SerfLan), + WANAddr: fmt.Sprintf("127.0.0.1:%d", consulConfig.Ports.SerfWan), + + HttpClient: client, + } + + // Wait for the server to be ready + if consulConfig.Bootstrap { + server.waitForLeader() + } else { + server.waitForAPI() + } + + return server +} + +// Stop stops the test Consul server, and removes the Consul data +// directory once we are done. +func (s *TestServer) Stop() { + defer os.RemoveAll(s.Config.DataDir) + + if err := s.cmd.Process.Kill(); err != nil { + s.t.Errorf("err: %s", err) + } + + // wait for the process to exit to be sure that the data dir can be + // deleted on all platforms. + s.cmd.Wait() +} + +// waitForAPI waits for only the agent HTTP endpoint to start +// responding. This is an indication that the agent has started, +// but will likely return before a leader is elected. +func (s *TestServer) waitForAPI() { + WaitForResult(func() (bool, error) { + resp, err := s.HttpClient.Get(s.url("/v1/agent/self")) + if err != nil { + return false, err + } + defer resp.Body.Close() + if err := s.requireOK(resp); err != nil { + return false, err + } + return true, nil + }, func(err error) { + defer s.Stop() + s.t.Fatalf("err: %s", err) + }) +} + +// waitForLeader waits for the Consul server's HTTP API to become +// available, and then waits for a known leader and an index of +// 1 or more to be observed to confirm leader election is done. +func (s *TestServer) waitForLeader() { + WaitForResult(func() (bool, error) { + // Query the API and check the status code + resp, err := s.HttpClient.Get(s.url("/v1/catalog/nodes")) + if err != nil { + return false, err + } + defer resp.Body.Close() + if err := s.requireOK(resp); err != nil { + return false, err + } + + // Ensure we have a leader and a node registration + if leader := resp.Header.Get("X-Consul-KnownLeader"); leader != "true" { + fmt.Println(leader) + return false, fmt.Errorf("Consul leader status: %#v", leader) + } + if resp.Header.Get("X-Consul-Index") == "0" { + return false, fmt.Errorf("Consul index is 0") + } + return true, nil + }, func(err error) { + defer s.Stop() + s.t.Fatalf("err: %s", err) + }) +} + +// url is a helper function which takes a relative URL and +// makes it into a proper URL against the local Consul server. +func (s *TestServer) url(path string) string { + return fmt.Sprintf("http://127.0.0.1:%d%s", s.Config.Ports.HTTP, path) +} + +// requireOK checks the HTTP response code and ensures it is acceptable. +func (s *TestServer) requireOK(resp *http.Response) error { + if resp.StatusCode != 200 { + return fmt.Errorf("Bad status code: %d", resp.StatusCode) + } + return nil +} + +// put performs a new HTTP PUT request. +func (s *TestServer) put(path string, body io.Reader) *http.Response { + req, err := http.NewRequest("PUT", s.url(path), body) + if err != nil { + s.t.Fatalf("err: %s", err) + } + resp, err := s.HttpClient.Do(req) + if err != nil { + s.t.Fatalf("err: %s", err) + } + if err := s.requireOK(resp); err != nil { + defer resp.Body.Close() + s.t.Fatal(err) + } + return resp +} + +// get performs a new HTTP GET request. +func (s *TestServer) get(path string) *http.Response { + resp, err := s.HttpClient.Get(s.url(path)) + if err != nil { + s.t.Fatalf("err: %s", err) + } + if err := s.requireOK(resp); err != nil { + defer resp.Body.Close() + s.t.Fatal(err) + } + return resp +} + +// encodePayload returns a new io.Reader wrapping the encoded contents +// of the payload, suitable for passing directly to a new request. +func (s *TestServer) encodePayload(payload interface{}) io.Reader { + var encoded bytes.Buffer + enc := json.NewEncoder(&encoded) + if err := enc.Encode(payload); err != nil { + s.t.Fatalf("err: %s", err) + } + return &encoded +} + +// JoinLAN is used to join nodes within the same datacenter. +func (s *TestServer) JoinLAN(addr string) { + resp := s.get("/v1/agent/join/" + addr) + resp.Body.Close() +} + +// JoinWAN is used to join remote datacenters together. +func (s *TestServer) JoinWAN(addr string) { + resp := s.get("/v1/agent/join/" + addr + "?wan=1") + resp.Body.Close() +} + +// SetKV sets an individual key in the K/V store. +func (s *TestServer) SetKV(key string, val []byte) { + resp := s.put("/v1/kv/"+key, bytes.NewBuffer(val)) + resp.Body.Close() +} + +// GetKV retrieves a single key and returns its value +func (s *TestServer) GetKV(key string) []byte { + resp := s.get("/v1/kv/" + key) + defer resp.Body.Close() + + raw, err := ioutil.ReadAll(resp.Body) + if err != nil { + s.t.Fatalf("err: %s", err) + } + + var result []*TestKVResponse + if err := json.Unmarshal(raw, &result); err != nil { + s.t.Fatalf("err: %s", err) + } + if len(result) < 1 { + s.t.Fatalf("key does not exist: %s", key) + } + + v, err := base64.StdEncoding.DecodeString(result[0].Value) + if err != nil { + s.t.Fatalf("err: %s", err) + } + + return v +} + +// PopulateKV fills the Consul KV with data from a generic map. +func (s *TestServer) PopulateKV(data map[string][]byte) { + for k, v := range data { + s.SetKV(k, v) + } +} + +// ListKV returns a list of keys present in the KV store. This will list all +// keys under the given prefix recursively and return them as a slice. +func (s *TestServer) ListKV(prefix string) []string { + resp := s.get("/v1/kv/" + prefix + "?keys") + defer resp.Body.Close() + + raw, err := ioutil.ReadAll(resp.Body) + if err != nil { + s.t.Fatalf("err: %s", err) + } + + var result []string + if err := json.Unmarshal(raw, &result); err != nil { + s.t.Fatalf("err: %s", err) + } + return result +} + +// AddService adds a new service to the Consul instance. It also +// automatically adds a health check with the given status, which +// can be one of "passing", "warning", or "critical". +func (s *TestServer) AddService(name, status string, tags []string) { + svc := &TestService{ + Name: name, + Tags: tags, + } + payload := s.encodePayload(svc) + s.put("/v1/agent/service/register", payload) + + chkName := "service:" + name + chk := &TestCheck{ + Name: chkName, + ServiceID: name, + TTL: "10m", + } + payload = s.encodePayload(chk) + s.put("/v1/agent/check/register", payload) + + switch status { + case structs.HealthPassing: + s.put("/v1/agent/check/pass/"+chkName, nil) + case structs.HealthWarning: + s.put("/v1/agent/check/warn/"+chkName, nil) + case structs.HealthCritical: + s.put("/v1/agent/check/fail/"+chkName, nil) + default: + s.t.Fatalf("Unrecognized status: %s", status) + } +} + +// AddCheck adds a check to the Consul instance. If the serviceID is +// left empty (""), then the check will be associated with the node. +// The check status may be "passing", "warning", or "critical". +func (s *TestServer) AddCheck(name, serviceID, status string) { + chk := &TestCheck{ + ID: name, + Name: name, + TTL: "10m", + } + if serviceID != "" { + chk.ServiceID = serviceID + } + + payload := s.encodePayload(chk) + s.put("/v1/agent/check/register", payload) + + switch status { + case structs.HealthPassing: + s.put("/v1/agent/check/pass/"+name, nil) + case structs.HealthWarning: + s.put("/v1/agent/check/warn/"+name, nil) + case structs.HealthCritical: + s.put("/v1/agent/check/fail/"+name, nil) + default: + s.t.Fatalf("Unrecognized status: %s", status) + } +} diff --git a/vendor/github.com/hashicorp/consul/testutil/wait.go b/vendor/github.com/hashicorp/consul/testutil/wait.go new file mode 100644 index 00000000000..ae2439437ba --- /dev/null +++ b/vendor/github.com/hashicorp/consul/testutil/wait.go @@ -0,0 +1,44 @@ +package testutil + +import ( + "github.com/hashicorp/consul/consul/structs" + "testing" + "time" +) + +type testFn func() (bool, error) +type errorFn func(error) + +func WaitForResult(test testFn, error errorFn) { + retries := 1000 + + for retries > 0 { + time.Sleep(10 * time.Millisecond) + retries-- + + success, err := test() + if success { + return + } + + if retries == 0 { + error(err) + } + } +} + +type rpcFn func(string, interface{}, interface{}) error + +func WaitForLeader(t *testing.T, rpc rpcFn, dc string) structs.IndexedNodes { + var out structs.IndexedNodes + WaitForResult(func() (bool, error) { + args := &structs.DCSpecificRequest{ + Datacenter: dc, + } + err := rpc("Catalog.ListNodes", args, &out) + return out.QueryMeta.KnownLeader && out.Index > 0, err + }, func(err error) { + t.Fatalf("failed to find leader: %v", err) + }) + return out +} diff --git a/vendor/github.com/hashicorp/consul/types/README.md b/vendor/github.com/hashicorp/consul/types/README.md new file mode 100644 index 00000000000..da662f4a1c3 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/types/README.md @@ -0,0 +1,39 @@ +# Consul `types` Package + +The Go language has a strong type system built into the language. The +`types` package corrals named types into a single package that is terminal in +`go`'s import graph. The `types` package should not have any downstream +dependencies. Each subsystem that defines its own set of types exists in its +own file, but all types are defined in the same package. + +# Why + +> Everything should be made as simple as possible, but not simpler. + +`string` is a useful container and underlying type for identifiers, however +the `string` type is effectively opaque to the compiler in terms of how a +given string is intended to be used. For instance, there is nothing +preventing the following from happening: + +```go +// `map` of Widgets, looked up by ID +var widgetLookup map[string]*Widget +// ... +var widgetID string = "widgetID" +w, found := widgetLookup[widgetID] + +// Bad! +var widgetName string = "name of widget" +w, found := widgetLookup[widgetName] +``` + +but this class of problem is entirely preventable: + +```go +type WidgetID string +var widgetLookup map[WidgetID]*Widget +var widgetName +``` + +TL;DR: intentions and idioms aren't statically checked by compilers. The +`types` package uses Go's strong type system to prevent this class of bug. diff --git a/vendor/github.com/hashicorp/consul/types/checks.go b/vendor/github.com/hashicorp/consul/types/checks.go new file mode 100644 index 00000000000..25a136b4f4b --- /dev/null +++ b/vendor/github.com/hashicorp/consul/types/checks.go @@ -0,0 +1,5 @@ +package types + +// CheckID is a strongly typed string used to uniquely represent a Consul +// Check on an Agent (a CheckID is not globally unique). +type CheckID string diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/LICENSE b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/LICENSE new file mode 100644 index 00000000000..c33dcc7c928 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/Makefile b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/Makefile new file mode 100644 index 00000000000..61499c50760 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/Makefile @@ -0,0 +1,17 @@ +DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...) + +test: + go test -timeout=45s ./... + +integ: test + INTEG_TESTS=yes go test -timeout=3s -run=Integ ./... + +deps: + go get -d -v ./... + echo $(DEPS) | xargs -n1 go get -d + +cov: + INTEG_TESTS=yes gocov test github.com/hashicorp/raft | gocov-html > /tmp/coverage.html + open /tmp/coverage.html + +.PHONY: test cov integ deps diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/README.md b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/README.md new file mode 100644 index 00000000000..8778b13dc5c --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/README.md @@ -0,0 +1,89 @@ +raft [![Build Status](https://travis-ci.org/hashicorp/raft.png)](https://travis-ci.org/hashicorp/raft) +==== + +raft is a [Go](http://www.golang.org) library that manages a replicated +log and can be used with an FSM to manage replicated state machines. It +is library for providing [consensus](http://en.wikipedia.org/wiki/Consensus_(computer_science)). + +The use cases for such a library are far-reaching as replicated state +machines are a key component of many distributed systems. They enable +building Consistent, Partition Tolerant (CP) systems, with limited +fault tolerance as well. + +## Building + +If you wish to build raft you'll need Go version 1.2+ installed. + +Please check your installation with: + +``` +go version +``` + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/raft). + +To prevent complications with cgo, the primary backend `MDBStore` is in a separate repository, +called [raft-mdb](http://github.com/hashicorp/raft-mdb). That is the recommended implementation +for the `LogStore` and `StableStore`. + +A pure Go backend using [BoltDB](https://github.com/boltdb/bolt) is also available called +[raft-boltdb](https://github.com/hashicorp/raft-boltdb). It can also be used as a `LogStore` +and `StableStore`. + +## Protocol + +raft is based on ["Raft: In Search of an Understandable Consensus Algorithm"](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf) + +A high level overview of the Raft protocol is described below, but for details please read the full +[Raft paper](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf) +followed by the raft source. Any questions about the raft protocol should be sent to the +[raft-dev mailing list](https://groups.google.com/forum/#!forum/raft-dev). + +### Protocol Description + +Raft nodes are always in one of three states: follower, candidate or leader. All +nodes initially start out as a follower. In this state, nodes can accept log entries +from a leader and cast votes. If no entries are received for some time, nodes +self-promote to the candidate state. In the candidate state nodes request votes from +their peers. If a candidate receives a quorum of votes, then it is promoted to a leader. +The leader must accept new log entries and replicate to all the other followers. +In addition, if stale reads are not acceptable, all queries must also be performed on +the leader. + +Once a cluster has a leader, it is able to accept new log entries. A client can +request that a leader append a new log entry, which is an opaque binary blob to +Raft. The leader then writes the entry to durable storage and attempts to replicate +to a quorum of followers. Once the log entry is considered *committed*, it can be +*applied* to a finite state machine. The finite state machine is application specific, +and is implemented using an interface. + +An obvious question relates to the unbounded nature of a replicated log. Raft provides +a mechanism by which the current state is snapshotted, and the log is compacted. Because +of the FSM abstraction, restoring the state of the FSM must result in the same state +as a replay of old logs. This allows Raft to capture the FSM state at a point in time, +and then remove all the logs that were used to reach that state. This is performed automatically +without user intervention, and prevents unbounded disk usage as well as minimizing +time spent replaying logs. + +Lastly, there is the issue of updating the peer set when new servers are joining +or existing servers are leaving. As long as a quorum of nodes is available, this +is not an issue as Raft provides mechanisms to dynamically update the peer set. +If a quorum of nodes is unavailable, then this becomes a very challenging issue. +For example, suppose there are only 2 peers, A and B. The quorum size is also +2, meaning both nodes must agree to commit a log entry. If either A or B fails, +it is now impossible to reach quorum. This means the cluster is unable to add, +or remove a node, or commit any additional log entries. This results in *unavailability*. +At this point, manual intervention would be required to remove either A or B, +and to restart the remaining node in bootstrap mode. + +A Raft cluster of 3 nodes can tolerate a single node failure, while a cluster +of 5 can tolerate 2 node failures. The recommended configuration is to either +run 3 or 5 raft servers. This maximizes availability without +greatly sacrificing performance. + +In terms of performance, Raft is comparable to Paxos. Assuming stable leadership, +committing a log entry requires a single round trip to half of the cluster. +Thus performance is bound by disk I/O and network latency. + diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/api.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/api.go new file mode 100644 index 00000000000..ff14131c4aa --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/api.go @@ -0,0 +1,935 @@ +package raft + +import ( + "errors" + "fmt" + "log" + "os" + "strconv" + "sync" + "time" + + "github.com/armon/go-metrics" +) + +var ( + // ErrLeader is returned when an operation can't be completed on a + // leader node. + ErrLeader = errors.New("node is the leader") + + // ErrNotLeader is returned when an operation can't be completed on a + // follower or candidate node. + ErrNotLeader = errors.New("node is not the leader") + + // ErrLeadershipLost is returned when a leader fails to commit a log entry + // because it's been deposed in the process. + ErrLeadershipLost = errors.New("leadership lost while committing log") + + // ErrRaftShutdown is returned when operations are requested against an + // inactive Raft. + ErrRaftShutdown = errors.New("raft is already shutdown") + + // ErrEnqueueTimeout is returned when a command fails due to a timeout. + ErrEnqueueTimeout = errors.New("timed out enqueuing operation") + + // ErrNothingNewToSnapshot is returned when trying to create a snapshot + // but there's nothing new commited to the FSM since we started. + ErrNothingNewToSnapshot = errors.New("nothing new to snapshot") + + // ErrUnsupportedProtocol is returned when an operation is attempted + // that's not supported by the current protocol version. + ErrUnsupportedProtocol = errors.New("operation not supported with current protocol version") + + // ErrCantBootstrap is returned when attempt is made to bootstrap a + // cluster that already has state present. + ErrCantBootstrap = errors.New("bootstrap only works on new clusters") +) + +// Raft implements a Raft node. +type Raft struct { + raftState + + // protocolVersion is used to inter-operate with Raft servers running + // different versions of the library. See comments in config.go for more + // details. + protocolVersion ProtocolVersion + + // applyCh is used to async send logs to the main thread to + // be committed and applied to the FSM. + applyCh chan *logFuture + + // Configuration provided at Raft initialization + conf Config + + // FSM is the client state machine to apply commands to + fsm FSM + + // fsmCommitCh is used to trigger async application of logs to the fsm + fsmCommitCh chan commitTuple + + // fsmRestoreCh is used to trigger a restore from snapshot + fsmRestoreCh chan *restoreFuture + + // fsmSnapshotCh is used to trigger a new snapshot being taken + fsmSnapshotCh chan *reqSnapshotFuture + + // lastContact is the last time we had contact from the + // leader node. This can be used to gauge staleness. + lastContact time.Time + lastContactLock sync.RWMutex + + // Leader is the current cluster leader + leader ServerAddress + leaderLock sync.RWMutex + + // leaderCh is used to notify of leadership changes + leaderCh chan bool + + // leaderState used only while state is leader + leaderState leaderState + + // Stores our local server ID, used to avoid sending RPCs to ourself + localID ServerID + + // Stores our local addr + localAddr ServerAddress + + // Used for our logging + logger *log.Logger + + // LogStore provides durable storage for logs + logs LogStore + + // Used to request the leader to make configuration changes. + configurationChangeCh chan *configurationChangeFuture + + // Tracks the latest configuration and latest committed configuration from + // the log/snapshot. + configurations configurations + + // RPC chan comes from the transport layer + rpcCh <-chan RPC + + // Shutdown channel to exit, protected to prevent concurrent exits + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex + + // snapshots is used to store and retrieve snapshots + snapshots SnapshotStore + + // snapshotCh is used for user triggered snapshots + snapshotCh chan *snapshotFuture + + // stable is a StableStore implementation for durable state + // It provides stable storage for many fields in raftState + stable StableStore + + // The transport layer we use + trans Transport + + // verifyCh is used to async send verify futures to the main thread + // to verify we are still the leader + verifyCh chan *verifyFuture + + // configurationsCh is used to get the configuration data safely from + // outside of the main thread. + configurationsCh chan *configurationsFuture + + // bootstrapCh is used to attempt an initial bootstrap from outside of + // the main thread. + bootstrapCh chan *bootstrapFuture + + // List of observers and the mutex that protects them. The observers list + // is indexed by an artificial ID which is used for deregistration. + observersLock sync.RWMutex + observers map[uint64]*Observer +} + +// BootstrapCluster initializes a server's storage with the given cluster +// configuration. This should only be called at the beginning of time for the +// cluster, and you absolutely must make sure that you call it with the same +// configuration on all the Voter servers. There is no need to bootstrap +// Nonvoter and Staging servers. +// +// One sane approach is to boostrap a single server with a configuration +// listing just itself as a Voter, then invoke AddVoter() on it to add other +// servers to the cluster. +func BootstrapCluster(conf *Config, logs LogStore, stable StableStore, + snaps SnapshotStore, trans Transport, configuration Configuration) error { + // Validate the Raft server config. + if err := ValidateConfig(conf); err != nil { + return err + } + + // Sanity check the Raft peer configuration. + if err := checkConfiguration(configuration); err != nil { + return err + } + + // Make sure the cluster is in a clean state. + hasState, err := HasExistingState(logs, stable, snaps) + if err != nil { + return fmt.Errorf("failed to check for existing state: %v", err) + } + if hasState { + return ErrCantBootstrap + } + + // Set current term to 1. + if err := stable.SetUint64(keyCurrentTerm, 1); err != nil { + return fmt.Errorf("failed to save current term: %v", err) + } + + // Append configuration entry to log. + entry := &Log{ + Index: 1, + Term: 1, + } + if conf.ProtocolVersion < 3 { + entry.Type = LogRemovePeerDeprecated + entry.Data = encodePeers(configuration, trans) + } else { + entry.Type = LogConfiguration + entry.Data = encodeConfiguration(configuration) + } + if err := logs.StoreLog(entry); err != nil { + return fmt.Errorf("failed to append configuration entry to log: %v", err) + } + + return nil +} + +// RecoverCluster is used to manually force a new configuration in order to +// recover from a loss of quorum where the current configuration cannot be +// restored, such as when several servers die at the same time. This works by +// reading all the current state for this server, creating a snapshot with the +// supplied configuration, and then truncating the Raft log. This is the only +// safe way to force a given configuration without actually altering the log to +// insert any new entries, which could cause conflicts with other servers with +// different state. +// +// WARNING! This operation implicitly commits all entries in the Raft log, so +// in general this is an extremely unsafe operation. If you've lost your other +// servers and are performing a manual recovery, then you've also lost the +// commit information, so this is likely the best you can do, but you should be +// aware that calling this can cause Raft log entries that were in the process +// of being replicated but not yet be committed to be committed. +// +// Note the FSM passed here is used for the snapshot operations and will be +// left in a state that should not be used by the application. Be sure to +// discard this FSM and any associated state and provide a fresh one when +// calling NewRaft later. +// +// A typical way to recover the cluster is to shut down all servers and then +// run RecoverCluster on every server using an identical configuration. When +// the cluster is then restarted, and election should occur and then Raft will +// resume normal operation. If it's desired to make a particular server the +// leader, this can be used to inject a new configuration with that server as +// the sole voter, and then join up other new clean-state peer servers using +// the usual APIs in order to bring the cluster back into a known state. +func RecoverCluster(conf *Config, fsm FSM, logs LogStore, stable StableStore, + snaps SnapshotStore, trans Transport, configuration Configuration) error { + // Validate the Raft server config. + if err := ValidateConfig(conf); err != nil { + return err + } + + // Sanity check the Raft peer configuration. + if err := checkConfiguration(configuration); err != nil { + return err + } + + // Refuse to recover if there's no existing state. This would be safe to + // do, but it is likely an indication of an operator error where they + // expect data to be there and it's not. By refusing, we force them + // to show intent to start a cluster fresh by explicitly doing a + // bootstrap, rather than quietly fire up a fresh cluster here. + hasState, err := HasExistingState(logs, stable, snaps) + if err != nil { + return fmt.Errorf("failed to check for existing state: %v", err) + } + if !hasState { + return fmt.Errorf("refused to recover cluster with no initial state, this is probably an operator error") + } + + // Attempt to restore any snapshots we find, newest to oldest. + var snapshotIndex uint64 + var snapshotTerm uint64 + snapshots, err := snaps.List() + if err != nil { + return fmt.Errorf("failed to list snapshots: %v", err) + } + for _, snapshot := range snapshots { + _, source, err := snaps.Open(snapshot.ID) + if err != nil { + // Skip this one and try the next. We will detect if we + // couldn't open any snapshots. + continue + } + defer source.Close() + + if err := fsm.Restore(source); err != nil { + // Same here, skip and try the next one. + continue + } + + snapshotIndex = snapshot.Index + snapshotTerm = snapshot.Term + break + } + if len(snapshots) > 0 && (snapshotIndex == 0 || snapshotTerm == 0) { + return fmt.Errorf("failed to restore any of the available snapshots") + } + + // The snapshot information is the best known end point for the data + // until we play back the Raft log entries. + lastIndex := snapshotIndex + lastTerm := snapshotTerm + + // Apply any Raft log entries past the snapshot. + lastLogIndex, err := logs.LastIndex() + if err != nil { + return fmt.Errorf("failed to find last log: %v", err) + } + for index := snapshotIndex + 1; index <= lastLogIndex; index++ { + var entry Log + if err := logs.GetLog(index, &entry); err != nil { + return fmt.Errorf("failed to get log at index %d: %v", index, err) + } + if entry.Type == LogCommand { + _ = fsm.Apply(&entry) + } + lastIndex = entry.Index + lastTerm = entry.Term + } + + // Create a new snapshot, placing the configuration in as if it was + // committed at index 1. + snapshot, err := fsm.Snapshot() + if err != nil { + return fmt.Errorf("failed to snapshot FSM: %v", err) + } + version := getSnapshotVersion(conf.ProtocolVersion) + sink, err := snaps.Create(version, lastIndex, lastTerm, configuration, 1, trans) + if err != nil { + return fmt.Errorf("failed to create snapshot: %v", err) + } + if err := snapshot.Persist(sink); err != nil { + return fmt.Errorf("failed to persist snapshot: %v", err) + } + if err := sink.Close(); err != nil { + return fmt.Errorf("failed to finalize snapshot: %v", err) + } + + // Compact the log so that we don't get bad interference from any + // configuration change log entries that might be there. + firstLogIndex, err := logs.FirstIndex() + if err != nil { + return fmt.Errorf("failed to get first log index: %v", err) + } + if err := logs.DeleteRange(firstLogIndex, lastLogIndex); err != nil { + return fmt.Errorf("log compaction failed: %v", err) + } + + return nil +} + +// HasExistingState returns true if the server has any existing state (logs, +// knowledge of a current term, or any snapshots). +func HasExistingState(logs LogStore, stable StableStore, snaps SnapshotStore) (bool, error) { + // Make sure we don't have a current term. + currentTerm, err := stable.GetUint64(keyCurrentTerm) + if err == nil { + if currentTerm > 0 { + return true, nil + } + } else { + if err.Error() != "not found" { + return false, fmt.Errorf("failed to read current term: %v", err) + } + } + + // Make sure we have an empty log. + lastIndex, err := logs.LastIndex() + if err != nil { + return false, fmt.Errorf("failed to get last log index: %v", err) + } + if lastIndex > 0 { + return true, nil + } + + // Make sure we have no snapshots + snapshots, err := snaps.List() + if err != nil { + return false, fmt.Errorf("failed to list snapshots: %v", err) + } + if len(snapshots) > 0 { + return true, nil + } + + return false, nil +} + +// NewRaft is used to construct a new Raft node. It takes a configuration, as well +// as implementations of various interfaces that are required. If we have any +// old state, such as snapshots, logs, peers, etc, all those will be restored +// when creating the Raft node. +func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps SnapshotStore, trans Transport) (*Raft, error) { + // Validate the configuration. + if err := ValidateConfig(conf); err != nil { + return nil, err + } + + // Ensure we have a LogOutput. + var logger *log.Logger + if conf.Logger != nil { + logger = conf.Logger + } else { + if conf.LogOutput == nil { + conf.LogOutput = os.Stderr + } + logger = log.New(conf.LogOutput, "", log.LstdFlags) + } + + // Try to restore the current term. + currentTerm, err := stable.GetUint64(keyCurrentTerm) + if err != nil && err.Error() != "not found" { + return nil, fmt.Errorf("failed to load current term: %v", err) + } + + // Read the index of the last log entry. + lastIndex, err := logs.LastIndex() + if err != nil { + return nil, fmt.Errorf("failed to find last log: %v", err) + } + + // Get the last log entry. + var lastLog Log + if lastIndex > 0 { + if err = logs.GetLog(lastIndex, &lastLog); err != nil { + return nil, fmt.Errorf("failed to get last log at index %d: %v", lastIndex, err) + } + } + + // Make sure we have a valid server address and ID. + protocolVersion := conf.ProtocolVersion + localAddr := ServerAddress(trans.LocalAddr()) + localID := conf.LocalID + + // TODO (slackpad) - When we deprecate protocol version 2, remove this + // along with the AddPeer() and RemovePeer() APIs. + if protocolVersion < 3 && string(localID) != string(localAddr) { + return nil, fmt.Errorf("when running with ProtocolVersion < 3, LocalID must be set to the network address") + } + + // Create Raft struct. + r := &Raft{ + protocolVersion: protocolVersion, + applyCh: make(chan *logFuture), + conf: *conf, + fsm: fsm, + fsmCommitCh: make(chan commitTuple, 128), + fsmRestoreCh: make(chan *restoreFuture), + fsmSnapshotCh: make(chan *reqSnapshotFuture), + leaderCh: make(chan bool), + localID: localID, + localAddr: localAddr, + logger: logger, + logs: logs, + configurationChangeCh: make(chan *configurationChangeFuture), + configurations: configurations{}, + rpcCh: trans.Consumer(), + snapshots: snaps, + snapshotCh: make(chan *snapshotFuture), + shutdownCh: make(chan struct{}), + stable: stable, + trans: trans, + verifyCh: make(chan *verifyFuture, 64), + configurationsCh: make(chan *configurationsFuture, 8), + bootstrapCh: make(chan *bootstrapFuture), + observers: make(map[uint64]*Observer), + } + + // Initialize as a follower. + r.setState(Follower) + + // Start as leader if specified. This should only be used + // for testing purposes. + if conf.StartAsLeader { + r.setState(Leader) + r.setLeader(r.localAddr) + } + + // Restore the current term and the last log. + r.setCurrentTerm(currentTerm) + r.setLastLog(lastLog.Index, lastLog.Term) + + // Attempt to restore a snapshot if there are any. + if err := r.restoreSnapshot(); err != nil { + return nil, err + } + + // Scan through the log for any configuration change entries. + snapshotIndex, _ := r.getLastSnapshot() + for index := snapshotIndex + 1; index <= lastLog.Index; index++ { + var entry Log + if err := r.logs.GetLog(index, &entry); err != nil { + r.logger.Printf("[ERR] raft: Failed to get log at %d: %v", index, err) + panic(err) + } + r.processConfigurationLogEntry(&entry) + } + r.logger.Printf("[INFO] raft: Initial configuration (index=%d): %+v", + r.configurations.latestIndex, r.configurations.latest.Servers) + + // Setup a heartbeat fast-path to avoid head-of-line + // blocking where possible. It MUST be safe for this + // to be called concurrently with a blocking RPC. + trans.SetHeartbeatHandler(r.processHeartbeat) + + // Start the background work. + r.goFunc(r.run) + r.goFunc(r.runFSM) + r.goFunc(r.runSnapshots) + return r, nil +} + +// restoreSnapshot attempts to restore the latest snapshots, and fails if none +// of them can be restored. This is called at initialization time, and is +// completely unsafe to call at any other time. +func (r *Raft) restoreSnapshot() error { + snapshots, err := r.snapshots.List() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to list snapshots: %v", err) + return err + } + + // Try to load in order of newest to oldest + for _, snapshot := range snapshots { + _, source, err := r.snapshots.Open(snapshot.ID) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to open snapshot %v: %v", snapshot.ID, err) + continue + } + defer source.Close() + + if err := r.fsm.Restore(source); err != nil { + r.logger.Printf("[ERR] raft: Failed to restore snapshot %v: %v", snapshot.ID, err) + continue + } + + // Log success + r.logger.Printf("[INFO] raft: Restored from snapshot %v", snapshot.ID) + + // Update the lastApplied so we don't replay old logs + r.setLastApplied(snapshot.Index) + + // Update the last stable snapshot info + r.setLastSnapshot(snapshot.Index, snapshot.Term) + + // Update the configuration + if snapshot.Version > 0 { + r.configurations.committed = snapshot.Configuration + r.configurations.committedIndex = snapshot.ConfigurationIndex + r.configurations.latest = snapshot.Configuration + r.configurations.latestIndex = snapshot.ConfigurationIndex + } else { + configuration := decodePeers(snapshot.Peers, r.trans) + r.configurations.committed = configuration + r.configurations.committedIndex = snapshot.Index + r.configurations.latest = configuration + r.configurations.latestIndex = snapshot.Index + } + + // Success! + return nil + } + + // If we had snapshots and failed to load them, its an error + if len(snapshots) > 0 { + return fmt.Errorf("failed to load any existing snapshots") + } + return nil +} + +// BootstrapCluster is equivalent to non-member BootstrapCluster but can be +// called on an un-bootstrapped Raft instance after it has been created. This +// should only be called at the beginning of time for the cluster, and you +// absolutely must make sure that you call it with the same configuration on all +// the Voter servers. There is no need to bootstrap Nonvoter and Staging +// servers. +func (r *Raft) BootstrapCluster(configuration Configuration) Future { + bootstrapReq := &bootstrapFuture{} + bootstrapReq.init() + bootstrapReq.configuration = configuration + select { + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.bootstrapCh <- bootstrapReq: + return bootstrapReq + } +} + +// Leader is used to return the current leader of the cluster. +// It may return empty string if there is no current leader +// or the leader is unknown. +func (r *Raft) Leader() ServerAddress { + r.leaderLock.RLock() + leader := r.leader + r.leaderLock.RUnlock() + return leader +} + +// Apply is used to apply a command to the FSM in a highly consistent +// manner. This returns a future that can be used to wait on the application. +// An optional timeout can be provided to limit the amount of time we wait +// for the command to be started. This must be run on the leader or it +// will fail. +func (r *Raft) Apply(cmd []byte, timeout time.Duration) ApplyFuture { + metrics.IncrCounter([]string{"raft", "apply"}, 1) + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Create a log future, no index or term yet + logFuture := &logFuture{ + log: Log{ + Type: LogCommand, + Data: cmd, + }, + } + logFuture.init() + + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.applyCh <- logFuture: + return logFuture + } +} + +// Barrier is used to issue a command that blocks until all preceeding +// operations have been applied to the FSM. It can be used to ensure the +// FSM reflects all queued writes. An optional timeout can be provided to +// limit the amount of time we wait for the command to be started. This +// must be run on the leader or it will fail. +func (r *Raft) Barrier(timeout time.Duration) Future { + metrics.IncrCounter([]string{"raft", "barrier"}, 1) + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Create a log future, no index or term yet + logFuture := &logFuture{ + log: Log{ + Type: LogBarrier, + }, + } + logFuture.init() + + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.applyCh <- logFuture: + return logFuture + } +} + +// VerifyLeader is used to ensure the current node is still +// the leader. This can be done to prevent stale reads when a +// new leader has potentially been elected. +func (r *Raft) VerifyLeader() Future { + metrics.IncrCounter([]string{"raft", "verify_leader"}, 1) + verifyFuture := &verifyFuture{} + verifyFuture.init() + select { + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.verifyCh <- verifyFuture: + return verifyFuture + } +} + +// GetConfiguration returns the latest configuration and its associated index +// currently in use. This may not yet be committed. This must not be called on +// the main thread (which can access the information directly). +func (r *Raft) GetConfiguration() ConfigurationFuture { + configReq := &configurationsFuture{} + configReq.init() + select { + case <-r.shutdownCh: + configReq.respond(ErrRaftShutdown) + return configReq + case r.configurationsCh <- configReq: + return configReq + } +} + +// AddPeer (deprecated) is used to add a new peer into the cluster. This must be +// run on the leader or it will fail. Use AddVoter/AddNonvoter instead. +func (r *Raft) AddPeer(peer ServerAddress) Future { + if r.protocolVersion > 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: AddStaging, + serverID: ServerID(peer), + serverAddress: peer, + prevIndex: 0, + }, 0) +} + +// RemovePeer (deprecated) is used to remove a peer from the cluster. If the +// current leader is being removed, it will cause a new election +// to occur. This must be run on the leader or it will fail. +// Use RemoveServer instead. +func (r *Raft) RemovePeer(peer ServerAddress) Future { + if r.protocolVersion > 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: RemoveServer, + serverID: ServerID(peer), + prevIndex: 0, + }, 0) +} + +// AddVoter will add the given server to the cluster as a staging server. If the +// server is already in the cluster as a voter, this does nothing. This must be +// run on the leader or it will fail. The leader will promote the staging server +// to a voter once that server is ready. If nonzero, prevIndex is the index of +// the only configuration upon which this change may be applied; if another +// configuration entry has been added in the meantime, this request will fail. +// If nonzero, timeout is how long this server should wait before the +// configuration change log entry is appended. +func (r *Raft) AddVoter(id ServerID, address ServerAddress, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: AddStaging, + serverID: id, + serverAddress: address, + prevIndex: prevIndex, + }, timeout) +} + +// AddNonvoter will add the given server to the cluster but won't assign it a +// vote. The server will receive log entries, but it won't participate in +// elections or log entry commitment. If the server is already in the cluster as +// a staging server or voter, this does nothing. This must be run on the leader +// or it will fail. For prevIndex and timeout, see AddVoter. +func (r *Raft) AddNonvoter(id ServerID, address ServerAddress, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 3 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: AddNonvoter, + serverID: id, + serverAddress: address, + prevIndex: prevIndex, + }, timeout) +} + +// RemoveServer will remove the given server from the cluster. If the current +// leader is being removed, it will cause a new election to occur. This must be +// run on the leader or it will fail. For prevIndex and timeout, see AddVoter. +func (r *Raft) RemoveServer(id ServerID, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: RemoveServer, + serverID: id, + prevIndex: prevIndex, + }, timeout) +} + +// DemoteVoter will take away a server's vote, if it has one. If present, the +// server will continue to receive log entries, but it won't participate in +// elections or log entry commitment. If the server is not in the cluster, this +// does nothing. This must be run on the leader or it will fail. For prevIndex +// and timeout, see AddVoter. +func (r *Raft) DemoteVoter(id ServerID, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 3 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: DemoteVoter, + serverID: id, + prevIndex: prevIndex, + }, timeout) +} + +// Shutdown is used to stop the Raft background routines. +// This is not a graceful operation. Provides a future that +// can be used to block until all background routines have exited. +func (r *Raft) Shutdown() Future { + r.shutdownLock.Lock() + defer r.shutdownLock.Unlock() + + if !r.shutdown { + close(r.shutdownCh) + r.shutdown = true + r.setState(Shutdown) + return &shutdownFuture{r} + } + + // avoid closing transport twice + return &shutdownFuture{nil} +} + +// Snapshot is used to manually force Raft to take a snapshot. +// Returns a future that can be used to block until complete. +func (r *Raft) Snapshot() Future { + snapFuture := &snapshotFuture{} + snapFuture.init() + select { + case r.snapshotCh <- snapFuture: + return snapFuture + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + } + +} + +// State is used to return the current raft state. +func (r *Raft) State() RaftState { + return r.getState() +} + +// LeaderCh is used to get a channel which delivers signals on +// acquiring or losing leadership. It sends true if we become +// the leader, and false if we lose it. The channel is not buffered, +// and does not block on writes. +func (r *Raft) LeaderCh() <-chan bool { + return r.leaderCh +} + +// String returns a string representation of this Raft node. +func (r *Raft) String() string { + return fmt.Sprintf("Node at %s [%v]", r.localAddr, r.getState()) +} + +// LastContact returns the time of last contact by a leader. +// This only makes sense if we are currently a follower. +func (r *Raft) LastContact() time.Time { + r.lastContactLock.RLock() + last := r.lastContact + r.lastContactLock.RUnlock() + return last +} + +// Stats is used to return a map of various internal stats. This +// should only be used for informative purposes or debugging. +// +// Keys are: "state", "term", "last_log_index", "last_log_term", +// "commit_index", "applied_index", "fsm_pending", +// "last_snapshot_index", "last_snapshot_term", +// "latest_configuration", "last_contact", and "num_peers". +// +// The value of "state" is a numerical value representing a +// RaftState const. +// +// The value of "latest_configuration" is a string which contains +// the id of each server, its suffrage status, and its address. +// +// The value of "last_contact" is either "never" if there +// has been no contact with a leader, "0" if the node is in the +// leader state, or the time since last contact with a leader +// formatted as a string. +// +// The value of "num_peers" is the number of other voting servers in the +// cluster, not including this node. If this node isn't part of the +// configuration then this will be "0". +// +// All other values are uint64s, formatted as strings. +func (r *Raft) Stats() map[string]string { + toString := func(v uint64) string { + return strconv.FormatUint(v, 10) + } + lastLogIndex, lastLogTerm := r.getLastLog() + lastSnapIndex, lastSnapTerm := r.getLastSnapshot() + s := map[string]string{ + "state": r.getState().String(), + "term": toString(r.getCurrentTerm()), + "last_log_index": toString(lastLogIndex), + "last_log_term": toString(lastLogTerm), + "commit_index": toString(r.getCommitIndex()), + "applied_index": toString(r.getLastApplied()), + "fsm_pending": toString(uint64(len(r.fsmCommitCh))), + "last_snapshot_index": toString(lastSnapIndex), + "last_snapshot_term": toString(lastSnapTerm), + "protocol_version": toString(uint64(r.protocolVersion)), + "protocol_version_min": toString(uint64(ProtocolVersionMin)), + "protocol_version_max": toString(uint64(ProtocolVersionMax)), + "snapshot_version_min": toString(uint64(SnapshotVersionMin)), + "snapshot_version_max": toString(uint64(SnapshotVersionMax)), + } + + future := r.GetConfiguration() + if err := future.Error(); err != nil { + r.logger.Printf("[WARN] raft: could not get configuration for Stats: %v", err) + } else { + configuration := future.Configuration() + s["latest_configuration_index"] = toString(future.Index()) + s["latest_configuration"] = fmt.Sprintf("%+v", configuration.Servers) + + // This is a legacy metric that we've seen people use in the wild. + hasUs := false + numPeers := 0 + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + if server.ID == r.localID { + hasUs = true + } else { + numPeers++ + } + } + } + if !hasUs { + numPeers = 0 + } + s["num_peers"] = toString(uint64(numPeers)) + } + + last := r.LastContact() + if last.IsZero() { + s["last_contact"] = "never" + } else if r.getState() == Leader { + s["last_contact"] = "0" + } else { + s["last_contact"] = fmt.Sprintf("%v", time.Now().Sub(last)) + } + return s +} + +// LastIndex returns the last index in stable storage, +// either from the last log or from the last snapshot. +func (r *Raft) LastIndex() uint64 { + return r.getLastIndex() +} + +// AppliedIndex returns the last index applied to the FSM. This is generally +// lagging behind the last index, especially for indexes that are persisted but +// have not yet been considered committed by the leader. NOTE - this reflects +// the last index that was sent to the application's FSM over the apply channel +// but DOES NOT mean that the application's FSM has yet consumed it and applied +// it to its internal state. Thus, the application's state may lag behind this +// index. +func (r *Raft) AppliedIndex() uint64 { + return r.getLastApplied() +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/commands.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/commands.go new file mode 100644 index 00000000000..5d89e7bcdb1 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/commands.go @@ -0,0 +1,151 @@ +package raft + +// RPCHeader is a common sub-structure used to pass along protocol version and +// other information about the cluster. For older Raft implementations before +// versioning was added this will default to a zero-valued structure when read +// by newer Raft versions. +type RPCHeader struct { + // ProtocolVersion is the version of the protocol the sender is + // speaking. + ProtocolVersion ProtocolVersion +} + +// WithRPCHeader is an interface that exposes the RPC header. +type WithRPCHeader interface { + GetRPCHeader() RPCHeader +} + +// AppendEntriesRequest is the command used to append entries to the +// replicated log. +type AppendEntriesRequest struct { + RPCHeader + + // Provide the current term and leader + Term uint64 + Leader []byte + + // Provide the previous entries for integrity checking + PrevLogEntry uint64 + PrevLogTerm uint64 + + // New entries to commit + Entries []*Log + + // Commit index on the leader + LeaderCommitIndex uint64 +} + +// See WithRPCHeader. +func (r *AppendEntriesRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// AppendEntriesResponse is the response returned from an +// AppendEntriesRequest. +type AppendEntriesResponse struct { + RPCHeader + + // Newer term if leader is out of date + Term uint64 + + // Last Log is a hint to help accelerate rebuilding slow nodes + LastLog uint64 + + // We may not succeed if we have a conflicting entry + Success bool + + // There are scenarios where this request didn't succeed + // but there's no need to wait/back-off the next attempt. + NoRetryBackoff bool +} + +// See WithRPCHeader. +func (r *AppendEntriesResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// RequestVoteRequest is the command used by a candidate to ask a Raft peer +// for a vote in an election. +type RequestVoteRequest struct { + RPCHeader + + // Provide the term and our id + Term uint64 + Candidate []byte + + // Used to ensure safety + LastLogIndex uint64 + LastLogTerm uint64 +} + +// See WithRPCHeader. +func (r *RequestVoteRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// RequestVoteResponse is the response returned from a RequestVoteRequest. +type RequestVoteResponse struct { + RPCHeader + + // Newer term if leader is out of date. + Term uint64 + + // Peers is deprecated, but required by servers that only understand + // protocol version 0. This is not populated in protocol version 2 + // and later. + Peers []byte + + // Is the vote granted. + Granted bool +} + +// See WithRPCHeader. +func (r *RequestVoteResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// InstallSnapshotRequest is the command sent to a Raft peer to bootstrap its +// log (and state machine) from a snapshot on another peer. +type InstallSnapshotRequest struct { + RPCHeader + SnapshotVersion SnapshotVersion + + Term uint64 + Leader []byte + + // These are the last index/term included in the snapshot + LastLogIndex uint64 + LastLogTerm uint64 + + // Peer Set in the snapshot. This is deprecated in favor of Configuration + // but remains here in case we receive an InstallSnapshot from a leader + // that's running old code. + Peers []byte + + // Cluster membership. + Configuration []byte + // Log index where 'Configuration' entry was originally written. + ConfigurationIndex uint64 + + // Size of the snapshot + Size int64 +} + +// See WithRPCHeader. +func (r *InstallSnapshotRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// InstallSnapshotResponse is the response returned from an +// InstallSnapshotRequest. +type InstallSnapshotResponse struct { + RPCHeader + + Term uint64 + Success bool +} + +// See WithRPCHeader. +func (r *InstallSnapshotResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/commitment.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/commitment.go new file mode 100644 index 00000000000..b5ba2634ef2 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/commitment.go @@ -0,0 +1,101 @@ +package raft + +import ( + "sort" + "sync" +) + +// Commitment is used to advance the leader's commit index. The leader and +// replication goroutines report in newly written entries with Match(), and +// this notifies on commitCh when the commit index has advanced. +type commitment struct { + // protectes matchIndexes and commitIndex + sync.Mutex + // notified when commitIndex increases + commitCh chan struct{} + // voter ID to log index: the server stores up through this log entry + matchIndexes map[ServerID]uint64 + // a quorum stores up through this log entry. monotonically increases. + commitIndex uint64 + // the first index of this leader's term: this needs to be replicated to a + // majority of the cluster before this leader may mark anything committed + // (per Raft's commitment rule) + startIndex uint64 +} + +// newCommitment returns an commitment struct that notifies the provided +// channel when log entries have been committed. A new commitment struct is +// created each time this server becomes leader for a particular term. +// 'configuration' is the servers in the cluster. +// 'startIndex' is the first index created in this term (see +// its description above). +func newCommitment(commitCh chan struct{}, configuration Configuration, startIndex uint64) *commitment { + matchIndexes := make(map[ServerID]uint64) + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + matchIndexes[server.ID] = 0 + } + } + return &commitment{ + commitCh: commitCh, + matchIndexes: matchIndexes, + commitIndex: 0, + startIndex: startIndex, + } +} + +// Called when a new cluster membership configuration is created: it will be +// used to determine commitment from now on. 'configuration' is the servers in +// the cluster. +func (c *commitment) setConfiguration(configuration Configuration) { + c.Lock() + defer c.Unlock() + oldMatchIndexes := c.matchIndexes + c.matchIndexes = make(map[ServerID]uint64) + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + c.matchIndexes[server.ID] = oldMatchIndexes[server.ID] // defaults to 0 + } + } + c.recalculate() +} + +// Called by leader after commitCh is notified +func (c *commitment) getCommitIndex() uint64 { + c.Lock() + defer c.Unlock() + return c.commitIndex +} + +// Match is called once a server completes writing entries to disk: either the +// leader has written the new entry or a follower has replied to an +// AppendEntries RPC. The given server's disk agrees with this server's log up +// through the given index. +func (c *commitment) match(server ServerID, matchIndex uint64) { + c.Lock() + defer c.Unlock() + if prev, hasVote := c.matchIndexes[server]; hasVote && matchIndex > prev { + c.matchIndexes[server] = matchIndex + c.recalculate() + } +} + +// Internal helper to calculate new commitIndex from matchIndexes. +// Must be called with lock held. +func (c *commitment) recalculate() { + if len(c.matchIndexes) == 0 { + return + } + + matched := make([]uint64, 0, len(c.matchIndexes)) + for _, idx := range c.matchIndexes { + matched = append(matched, idx) + } + sort.Sort(uint64Slice(matched)) + quorumMatchIndex := matched[(len(matched)-1)/2] + + if quorumMatchIndex > c.commitIndex && quorumMatchIndex >= c.startIndex { + c.commitIndex = quorumMatchIndex + asyncNotifyCh(c.commitCh) + } +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/config.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/config.go new file mode 100644 index 00000000000..c1ce03ac22b --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/config.go @@ -0,0 +1,258 @@ +package raft + +import ( + "fmt" + "io" + "log" + "time" +) + +// These are the versions of the protocol (which includes RPC messages as +// well as Raft-specific log entries) that this server can _understand_. Use +// the ProtocolVersion member of the Config object to control the version of +// the protocol to use when _speaking_ to other servers. Note that depending on +// the protocol version being spoken, some otherwise understood RPC messages +// may be refused. See dispositionRPC for details of this logic. +// +// There are notes about the upgrade path in the description of the versions +// below. If you are starting a fresh cluster then there's no reason not to +// jump right to the latest protocol version. If you need to interoperate with +// older, version 0 Raft servers you'll need to drive the cluster through the +// different versions in order. +// +// The version details are complicated, but here's a summary of what's required +// to get from a version 0 cluster to version 3: +// +// 1. In version N of your app that starts using the new Raft library with +// versioning, set ProtocolVersion to 1. +// 2. Make version N+1 of your app require version N as a prerequisite (all +// servers must be upgraded). For version N+1 of your app set ProtocolVersion +// to 2. +// 3. Similarly, make version N+2 of your app require version N+1 as a +// prerequisite. For version N+2 of your app, set ProtocolVersion to 3. +// +// During this upgrade, older cluster members will still have Server IDs equal +// to their network addresses. To upgrade an older member and give it an ID, it +// needs to leave the cluster and re-enter: +// +// 1. Remove the server from the cluster with RemoveServer, using its network +// address as its ServerID. +// 2. Update the server's config to a better ID (restarting the server). +// 3. Add the server back to the cluster with AddVoter, using its new ID. +// +// You can do this during the rolling upgrade from N+1 to N+2 of your app, or +// as a rolling change at any time after the upgrade. +// +// Version History +// +// 0: Original Raft library before versioning was added. Servers running this +// version of the Raft library use AddPeerDeprecated/RemovePeerDeprecated +// for all configuration changes, and have no support for LogConfiguration. +// 1: First versioned protocol, used to interoperate with old servers, and begin +// the migration path to newer versions of the protocol. Under this version +// all configuration changes are propagated using the now-deprecated +// RemovePeerDeprecated Raft log entry. This means that server IDs are always +// set to be the same as the server addresses (since the old log entry type +// cannot transmit an ID), and only AddPeer/RemovePeer APIs are supported. +// Servers running this version of the protocol can understand the new +// LogConfiguration Raft log entry but will never generate one so they can +// remain compatible with version 0 Raft servers in the cluster. +// 2: Transitional protocol used when migrating an existing cluster to the new +// server ID system. Server IDs are still set to be the same as server +// addresses, but all configuration changes are propagated using the new +// LogConfiguration Raft log entry type, which can carry full ID information. +// This version supports the old AddPeer/RemovePeer APIs as well as the new +// ID-based AddVoter/RemoveServer APIs which should be used when adding +// version 3 servers to the cluster later. This version sheds all +// interoperability with version 0 servers, but can interoperate with newer +// Raft servers running with protocol version 1 since they can understand the +// new LogConfiguration Raft log entry, and this version can still understand +// their RemovePeerDeprecated Raft log entries. We need this protocol version +// as an intermediate step between 1 and 3 so that servers will propagate the +// ID information that will come from newly-added (or -rolled) servers using +// protocol version 3, but since they are still using their address-based IDs +// from the previous step they will still be able to track commitments and +// their own voting status properly. If we skipped this step, servers would +// be started with their new IDs, but they wouldn't see themselves in the old +// address-based configuration, so none of the servers would think they had a +// vote. +// 3: Protocol adding full support for server IDs and new ID-based server APIs +// (AddVoter, AddNonvoter, etc.), old AddPeer/RemovePeer APIs are no longer +// supported. Version 2 servers should be swapped out by removing them from +// the cluster one-by-one and re-adding them with updated configuration for +// this protocol version, along with their server ID. The remove/add cycle +// is required to populate their server ID. Note that removing must be done +// by ID, which will be the old server's address. +type ProtocolVersion int + +const ( + ProtocolVersionMin ProtocolVersion = 0 + ProtocolVersionMax = 3 +) + +// These are versions of snapshots that this server can _understand_. Currently, +// it is always assumed that this server generates the latest version, though +// this may be changed in the future to include a configurable version. +// +// Version History +// +// 0: Original Raft library before versioning was added. The peers portion of +// these snapshots is encoded in the legacy format which requires decodePeers +// to parse. This version of snapshots should only be produced by the +// unversioned Raft library. +// 1: New format which adds support for a full configuration structure and its +// associated log index, with support for server IDs and non-voting server +// modes. To ease upgrades, this also includes the legacy peers structure but +// that will never be used by servers that understand version 1 snapshots. +// Since the original Raft library didn't enforce any versioning, we must +// include the legacy peers structure for this version, but we can deprecate +// it in the next snapshot version. +type SnapshotVersion int + +const ( + SnapshotVersionMin SnapshotVersion = 0 + SnapshotVersionMax = 1 +) + +// Config provides any necessary configuration for the Raft server. +type Config struct { + // ProtocolVersion allows a Raft server to inter-operate with older + // Raft servers running an older version of the code. This is used to + // version the wire protocol as well as Raft-specific log entries that + // the server uses when _speaking_ to other servers. There is currently + // no auto-negotiation of versions so all servers must be manually + // configured with compatible versions. See ProtocolVersionMin and + // ProtocolVersionMax for the versions of the protocol that this server + // can _understand_. + ProtocolVersion ProtocolVersion + + // HeartbeatTimeout specifies the time in follower state without + // a leader before we attempt an election. + HeartbeatTimeout time.Duration + + // ElectionTimeout specifies the time in candidate state without + // a leader before we attempt an election. + ElectionTimeout time.Duration + + // CommitTimeout controls the time without an Apply() operation + // before we heartbeat to ensure a timely commit. Due to random + // staggering, may be delayed as much as 2x this value. + CommitTimeout time.Duration + + // MaxAppendEntries controls the maximum number of append entries + // to send at once. We want to strike a balance between efficiency + // and avoiding waste if the follower is going to reject because of + // an inconsistent log. + MaxAppendEntries int + + // If we are a member of a cluster, and RemovePeer is invoked for the + // local node, then we forget all peers and transition into the follower state. + // If ShutdownOnRemove is is set, we additional shutdown Raft. Otherwise, + // we can become a leader of a cluster containing only this node. + ShutdownOnRemove bool + + // TrailingLogs controls how many logs we leave after a snapshot. This is + // used so that we can quickly replay logs on a follower instead of being + // forced to send an entire snapshot. + TrailingLogs uint64 + + // SnapshotInterval controls how often we check if we should perform a snapshot. + // We randomly stagger between this value and 2x this value to avoid the entire + // cluster from performing a snapshot at once. + SnapshotInterval time.Duration + + // SnapshotThreshold controls how many outstanding logs there must be before + // we perform a snapshot. This is to prevent excessive snapshots when we can + // just replay a small set of logs. + SnapshotThreshold uint64 + + // LeaderLeaseTimeout is used to control how long the "lease" lasts + // for being the leader without being able to contact a quorum + // of nodes. If we reach this interval without contact, we will + // step down as leader. + LeaderLeaseTimeout time.Duration + + // StartAsLeader forces Raft to start in the leader state. This should + // never be used except for testing purposes, as it can cause a split-brain. + StartAsLeader bool + + // The unique ID for this server across all time. When running with + // ProtocolVersion < 3, you must set this to be the same as the network + // address of your transport. + LocalID ServerID + + // NotifyCh is used to provide a channel that will be notified of leadership + // changes. Raft will block writing to this channel, so it should either be + // buffered or aggressively consumed. + NotifyCh chan<- bool + + // LogOutput is used as a sink for logs, unless Logger is specified. + // Defaults to os.Stderr. + LogOutput io.Writer + + // Logger is a user-provided logger. If nil, a logger writing to LogOutput + // is used. + Logger *log.Logger +} + +// DefaultConfig returns a Config with usable defaults. +func DefaultConfig() *Config { + return &Config{ + ProtocolVersion: ProtocolVersionMax, + HeartbeatTimeout: 1000 * time.Millisecond, + ElectionTimeout: 1000 * time.Millisecond, + CommitTimeout: 50 * time.Millisecond, + MaxAppendEntries: 64, + ShutdownOnRemove: true, + TrailingLogs: 10240, + SnapshotInterval: 120 * time.Second, + SnapshotThreshold: 8192, + LeaderLeaseTimeout: 500 * time.Millisecond, + } +} + +// ValidateConfig is used to validate a sane configuration +func ValidateConfig(config *Config) error { + // We don't actually support running as 0 in the library any more, but + // we do understand it. + protocolMin := ProtocolVersionMin + if protocolMin == 0 { + protocolMin = 1 + } + if config.ProtocolVersion < protocolMin || + config.ProtocolVersion > ProtocolVersionMax { + return fmt.Errorf("Protocol version %d must be >= %d and <= %d", + config.ProtocolVersion, protocolMin, ProtocolVersionMax) + } + if len(config.LocalID) == 0 { + return fmt.Errorf("LocalID cannot be empty") + } + if config.HeartbeatTimeout < 5*time.Millisecond { + return fmt.Errorf("Heartbeat timeout is too low") + } + if config.ElectionTimeout < 5*time.Millisecond { + return fmt.Errorf("Election timeout is too low") + } + if config.CommitTimeout < time.Millisecond { + return fmt.Errorf("Commit timeout is too low") + } + if config.MaxAppendEntries <= 0 { + return fmt.Errorf("MaxAppendEntries must be positive") + } + if config.MaxAppendEntries > 1024 { + return fmt.Errorf("MaxAppendEntries is too large") + } + if config.SnapshotInterval < 5*time.Millisecond { + return fmt.Errorf("Snapshot interval is too low") + } + if config.LeaderLeaseTimeout < 5*time.Millisecond { + return fmt.Errorf("Leader lease timeout is too low") + } + if config.LeaderLeaseTimeout > config.HeartbeatTimeout { + return fmt.Errorf("Leader lease timeout cannot be larger than heartbeat timeout") + } + if config.ElectionTimeout < config.HeartbeatTimeout { + return fmt.Errorf("Election timeout must be equal or greater than Heartbeat Timeout") + } + return nil +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/configuration.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/configuration.go new file mode 100644 index 00000000000..74508c5e530 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/configuration.go @@ -0,0 +1,343 @@ +package raft + +import "fmt" + +// ServerSuffrage determines whether a Server in a Configuration gets a vote. +type ServerSuffrage int + +// Note: Don't renumber these, since the numbers are written into the log. +const ( + // Voter is a server whose vote is counted in elections and whose match index + // is used in advancing the leader's commit index. + Voter ServerSuffrage = iota + // Nonvoter is a server that receives log entries but is not considered for + // elections or commitment purposes. + Nonvoter + // Staging is a server that acts like a nonvoter with one exception: once a + // staging server receives enough log entries to be sufficiently caught up to + // the leader's log, the leader will invoke a membership change to change + // the Staging server to a Voter. + Staging +) + +func (s ServerSuffrage) String() string { + switch s { + case Voter: + return "Voter" + case Nonvoter: + return "Nonvoter" + case Staging: + return "Staging" + } + return "ServerSuffrage" +} + +// ServerID is a unique string identifying a server for all time. +type ServerID string + +// ServerAddress is a network address for a server that a transport can contact. +type ServerAddress string + +// Server tracks the information about a single server in a configuration. +type Server struct { + // Suffrage determines whether the server gets a vote. + Suffrage ServerSuffrage + // ID is a unique string identifying this server for all time. + ID ServerID + // Address is its network address that a transport can contact. + Address ServerAddress +} + +// Configuration tracks which servers are in the cluster, and whether they have +// votes. This should include the local server, if it's a member of the cluster. +// The servers are listed no particular order, but each should only appear once. +// These entries are appended to the log during membership changes. +type Configuration struct { + Servers []Server +} + +// Clone makes a deep copy of a Configuration. +func (c *Configuration) Clone() (copy Configuration) { + copy.Servers = append(copy.Servers, c.Servers...) + return +} + +// ConfigurationChangeCommand is the different ways to change the cluster +// configuration. +type ConfigurationChangeCommand uint8 + +const ( + // AddStaging makes a server Staging unless its Voter. + AddStaging ConfigurationChangeCommand = iota + // AddNonvoter makes a server Nonvoter unless its Staging or Voter. + AddNonvoter + // DemoteVoter makes a server Nonvoter unless its absent. + DemoteVoter + // RemoveServer removes a server entirely from the cluster membership. + RemoveServer + // Promote is created automatically by a leader; it turns a Staging server + // into a Voter. + Promote +) + +func (c ConfigurationChangeCommand) String() string { + switch c { + case AddStaging: + return "AddStaging" + case AddNonvoter: + return "AddNonvoter" + case DemoteVoter: + return "DemoteVoter" + case RemoveServer: + return "RemoveServer" + case Promote: + return "Promote" + } + return "ConfigurationChangeCommand" +} + +// configurationChangeRequest describes a change that a leader would like to +// make to its current configuration. It's used only within a single server +// (never serialized into the log), as part of `configurationChangeFuture`. +type configurationChangeRequest struct { + command ConfigurationChangeCommand + serverID ServerID + serverAddress ServerAddress // only present for AddStaging, AddNonvoter + // prevIndex, if nonzero, is the index of the only configuration upon which + // this change may be applied; if another configuration entry has been + // added in the meantime, this request will fail. + prevIndex uint64 +} + +// configurations is state tracked on every server about its Configurations. +// Note that, per Diego's dissertation, there can be at most one uncommitted +// configuration at a time (the next configuration may not be created until the +// prior one has been committed). +// +// One downside to storing just two configurations is that if you try to take a +// snahpsot when your state machine hasn't yet applied the committedIndex, we +// have no record of the configuration that would logically fit into that +// snapshot. We disallow snapshots in that case now. An alternative approach, +// which LogCabin uses, is to track every configuration change in the +// log. +type configurations struct { + // committed is the latest configuration in the log/snapshot that has been + // committed (the one with the largest index). + committed Configuration + // committedIndex is the log index where 'committed' was written. + committedIndex uint64 + // latest is the latest configuration in the log/snapshot (may be committed + // or uncommitted) + latest Configuration + // latestIndex is the log index where 'latest' was written. + latestIndex uint64 +} + +// Clone makes a deep copy of a configurations object. +func (c *configurations) Clone() (copy configurations) { + copy.committed = c.committed.Clone() + copy.committedIndex = c.committedIndex + copy.latest = c.latest.Clone() + copy.latestIndex = c.latestIndex + return +} + +// hasVote returns true if the server identified by 'id' is a Voter in the +// provided Configuration. +func hasVote(configuration Configuration, id ServerID) bool { + for _, server := range configuration.Servers { + if server.ID == id { + return server.Suffrage == Voter + } + } + return false +} + +// checkConfiguration tests a cluster membership configuration for common +// errors. +func checkConfiguration(configuration Configuration) error { + idSet := make(map[ServerID]bool) + addressSet := make(map[ServerAddress]bool) + var voters int + for _, server := range configuration.Servers { + if server.ID == "" { + return fmt.Errorf("Empty ID in configuration: %v", configuration) + } + if server.Address == "" { + return fmt.Errorf("Empty address in configuration: %v", server) + } + if idSet[server.ID] { + return fmt.Errorf("Found duplicate ID in configuration: %v", server.ID) + } + idSet[server.ID] = true + if addressSet[server.Address] { + return fmt.Errorf("Found duplicate address in configuration: %v", server.Address) + } + addressSet[server.Address] = true + if server.Suffrage == Voter { + voters++ + } + } + if voters == 0 { + return fmt.Errorf("Need at least one voter in configuration: %v", configuration) + } + return nil +} + +// nextConfiguration generates a new Configuration from the current one and a +// configuration change request. It's split from appendConfigurationEntry so +// that it can be unit tested easily. +func nextConfiguration(current Configuration, currentIndex uint64, change configurationChangeRequest) (Configuration, error) { + if change.prevIndex > 0 && change.prevIndex != currentIndex { + return Configuration{}, fmt.Errorf("Configuration changed since %v (latest is %v)", change.prevIndex, currentIndex) + } + + configuration := current.Clone() + switch change.command { + case AddStaging: + // TODO: barf on new address? + newServer := Server{ + // TODO: This should add the server as Staging, to be automatically + // promoted to Voter later. However, the promoton to Voter is not yet + // implemented, and doing so is not trivial with the way the leader loop + // coordinates with the replication goroutines today. So, for now, the + // server will have a vote right away, and the Promote case below is + // unused. + Suffrage: Voter, + ID: change.serverID, + Address: change.serverAddress, + } + found := false + for i, server := range configuration.Servers { + if server.ID == change.serverID { + if server.Suffrage == Voter { + configuration.Servers[i].Address = change.serverAddress + } else { + configuration.Servers[i] = newServer + } + found = true + break + } + } + if !found { + configuration.Servers = append(configuration.Servers, newServer) + } + case AddNonvoter: + newServer := Server{ + Suffrage: Nonvoter, + ID: change.serverID, + Address: change.serverAddress, + } + found := false + for i, server := range configuration.Servers { + if server.ID == change.serverID { + if server.Suffrage != Nonvoter { + configuration.Servers[i].Address = change.serverAddress + } else { + configuration.Servers[i] = newServer + } + found = true + break + } + } + if !found { + configuration.Servers = append(configuration.Servers, newServer) + } + case DemoteVoter: + for i, server := range configuration.Servers { + if server.ID == change.serverID { + configuration.Servers[i].Suffrage = Nonvoter + break + } + } + case RemoveServer: + for i, server := range configuration.Servers { + if server.ID == change.serverID { + configuration.Servers = append(configuration.Servers[:i], configuration.Servers[i+1:]...) + break + } + } + case Promote: + for i, server := range configuration.Servers { + if server.ID == change.serverID && server.Suffrage == Staging { + configuration.Servers[i].Suffrage = Voter + break + } + } + } + + // Make sure we didn't do something bad like remove the last voter + if err := checkConfiguration(configuration); err != nil { + return Configuration{}, err + } + + return configuration, nil +} + +// encodePeers is used to serialize a Configuration into the old peers format. +// This is here for backwards compatibility when operating with a mix of old +// servers and should be removed once we deprecate support for protocol version 1. +func encodePeers(configuration Configuration, trans Transport) []byte { + // Gather up all the voters, other suffrage types are not supported by + // this data format. + var encPeers [][]byte + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + encPeers = append(encPeers, trans.EncodePeer(server.Address)) + } + } + + // Encode the entire array. + buf, err := encodeMsgPack(encPeers) + if err != nil { + panic(fmt.Errorf("failed to encode peers: %v", err)) + } + + return buf.Bytes() +} + +// decodePeers is used to deserialize an old list of peers into a Configuration. +// This is here for backwards compatibility with old log entries and snapshots; +// it should be removed eventually. +func decodePeers(buf []byte, trans Transport) Configuration { + // Decode the buffer first. + var encPeers [][]byte + if err := decodeMsgPack(buf, &encPeers); err != nil { + panic(fmt.Errorf("failed to decode peers: %v", err)) + } + + // Deserialize each peer. + var servers []Server + for _, enc := range encPeers { + p := trans.DecodePeer(enc) + servers = append(servers, Server{ + Suffrage: Voter, + ID: ServerID(p), + Address: ServerAddress(p), + }) + } + + return Configuration{ + Servers: servers, + } +} + +// encodeConfiguration serializes a Configuration using MsgPack, or panics on +// errors. +func encodeConfiguration(configuration Configuration) []byte { + buf, err := encodeMsgPack(configuration) + if err != nil { + panic(fmt.Errorf("failed to encode configuration: %v", err)) + } + return buf.Bytes() +} + +// decodeConfiguration deserializes a Configuration using MsgPack, or panics on +// errors. +func decodeConfiguration(buf []byte) Configuration { + var configuration Configuration + if err := decodeMsgPack(buf, &configuration); err != nil { + panic(fmt.Errorf("failed to decode configuration: %v", err)) + } + return configuration +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/discard_snapshot.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/discard_snapshot.go new file mode 100644 index 00000000000..5e93a9fe01f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/discard_snapshot.go @@ -0,0 +1,49 @@ +package raft + +import ( + "fmt" + "io" +) + +// DiscardSnapshotStore is used to successfully snapshot while +// always discarding the snapshot. This is useful for when the +// log should be truncated but no snapshot should be retained. +// This should never be used for production use, and is only +// suitable for testing. +type DiscardSnapshotStore struct{} + +type DiscardSnapshotSink struct{} + +// NewDiscardSnapshotStore is used to create a new DiscardSnapshotStore. +func NewDiscardSnapshotStore() *DiscardSnapshotStore { + return &DiscardSnapshotStore{} +} + +func (d *DiscardSnapshotStore) Create(version SnapshotVersion, index, term uint64, + configuration Configuration, configurationIndex uint64, trans Transport) (SnapshotSink, error) { + return &DiscardSnapshotSink{}, nil +} + +func (d *DiscardSnapshotStore) List() ([]*SnapshotMeta, error) { + return nil, nil +} + +func (d *DiscardSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + return nil, nil, fmt.Errorf("open is not supported") +} + +func (d *DiscardSnapshotSink) Write(b []byte) (int, error) { + return len(b), nil +} + +func (d *DiscardSnapshotSink) Close() error { + return nil +} + +func (d *DiscardSnapshotSink) ID() string { + return "discard" +} + +func (d *DiscardSnapshotSink) Cancel() error { + return nil +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/file_snapshot.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/file_snapshot.go new file mode 100644 index 00000000000..17d080134a3 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/file_snapshot.go @@ -0,0 +1,494 @@ +package raft + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "hash" + "hash/crc64" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" + "time" +) + +const ( + testPath = "permTest" + snapPath = "snapshots" + metaFilePath = "meta.json" + stateFilePath = "state.bin" + tmpSuffix = ".tmp" +) + +// FileSnapshotStore implements the SnapshotStore interface and allows +// snapshots to be made on the local disk. +type FileSnapshotStore struct { + path string + retain int + logger *log.Logger +} + +type snapMetaSlice []*fileSnapshotMeta + +// FileSnapshotSink implements SnapshotSink with a file. +type FileSnapshotSink struct { + store *FileSnapshotStore + logger *log.Logger + dir string + meta fileSnapshotMeta + + stateFile *os.File + stateHash hash.Hash64 + buffered *bufio.Writer + + closed bool +} + +// fileSnapshotMeta is stored on disk. We also put a CRC +// on disk so that we can verify the snapshot. +type fileSnapshotMeta struct { + SnapshotMeta + CRC []byte +} + +// bufferedFile is returned when we open a snapshot. This way +// reads are buffered and the file still gets closed. +type bufferedFile struct { + bh *bufio.Reader + fh *os.File +} + +func (b *bufferedFile) Read(p []byte) (n int, err error) { + return b.bh.Read(p) +} + +func (b *bufferedFile) Close() error { + return b.fh.Close() +} + +// NewFileSnapshotStoreWithLogger creates a new FileSnapshotStore based +// on a base directory. The `retain` parameter controls how many +// snapshots are retained. Must be at least 1. +func NewFileSnapshotStoreWithLogger(base string, retain int, logger *log.Logger) (*FileSnapshotStore, error) { + if retain < 1 { + return nil, fmt.Errorf("must retain at least one snapshot") + } + if logger == nil { + logger = log.New(os.Stderr, "", log.LstdFlags) + } + + // Ensure our path exists + path := filepath.Join(base, snapPath) + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("snapshot path not accessible: %v", err) + } + + // Setup the store + store := &FileSnapshotStore{ + path: path, + retain: retain, + logger: logger, + } + + // Do a permissions test + if err := store.testPermissions(); err != nil { + return nil, fmt.Errorf("permissions test failed: %v", err) + } + return store, nil +} + +// NewFileSnapshotStore creates a new FileSnapshotStore based +// on a base directory. The `retain` parameter controls how many +// snapshots are retained. Must be at least 1. +func NewFileSnapshotStore(base string, retain int, logOutput io.Writer) (*FileSnapshotStore, error) { + if logOutput == nil { + logOutput = os.Stderr + } + return NewFileSnapshotStoreWithLogger(base, retain, log.New(logOutput, "", log.LstdFlags)) +} + +// testPermissions tries to touch a file in our path to see if it works. +func (f *FileSnapshotStore) testPermissions() error { + path := filepath.Join(f.path, testPath) + fh, err := os.Create(path) + if err != nil { + return err + } + + if err = fh.Close(); err != nil { + return err + } + + if err = os.Remove(path); err != nil { + return err + } + return nil +} + +// snapshotName generates a name for the snapshot. +func snapshotName(term, index uint64) string { + now := time.Now() + msec := now.UnixNano() / int64(time.Millisecond) + return fmt.Sprintf("%d-%d-%d", term, index, msec) +} + +// Create is used to start a new snapshot +func (f *FileSnapshotStore) Create(version SnapshotVersion, index, term uint64, + configuration Configuration, configurationIndex uint64, trans Transport) (SnapshotSink, error) { + // We only support version 1 snapshots at this time. + if version != 1 { + return nil, fmt.Errorf("unsupported snapshot version %d", version) + } + + // Create a new path + name := snapshotName(term, index) + path := filepath.Join(f.path, name+tmpSuffix) + f.logger.Printf("[INFO] snapshot: Creating new snapshot at %s", path) + + // Make the directory + if err := os.MkdirAll(path, 0755); err != nil { + f.logger.Printf("[ERR] snapshot: Failed to make snapshot directory: %v", err) + return nil, err + } + + // Create the sink + sink := &FileSnapshotSink{ + store: f, + logger: f.logger, + dir: path, + meta: fileSnapshotMeta{ + SnapshotMeta: SnapshotMeta{ + Version: version, + ID: name, + Index: index, + Term: term, + Peers: encodePeers(configuration, trans), + Configuration: configuration, + ConfigurationIndex: configurationIndex, + }, + CRC: nil, + }, + } + + // Write out the meta data + if err := sink.writeMeta(); err != nil { + f.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err) + return nil, err + } + + // Open the state file + statePath := filepath.Join(path, stateFilePath) + fh, err := os.Create(statePath) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to create state file: %v", err) + return nil, err + } + sink.stateFile = fh + + // Create a CRC64 hash + sink.stateHash = crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Wrap both the hash and file in a MultiWriter with buffering + multi := io.MultiWriter(sink.stateFile, sink.stateHash) + sink.buffered = bufio.NewWriter(multi) + + // Done + return sink, nil +} + +// List returns available snapshots in the store. +func (f *FileSnapshotStore) List() ([]*SnapshotMeta, error) { + // Get the eligible snapshots + snapshots, err := f.getSnapshots() + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err) + return nil, err + } + + var snapMeta []*SnapshotMeta + for _, meta := range snapshots { + snapMeta = append(snapMeta, &meta.SnapshotMeta) + if len(snapMeta) == f.retain { + break + } + } + return snapMeta, nil +} + +// getSnapshots returns all the known snapshots. +func (f *FileSnapshotStore) getSnapshots() ([]*fileSnapshotMeta, error) { + // Get the eligible snapshots + snapshots, err := ioutil.ReadDir(f.path) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to scan snapshot dir: %v", err) + return nil, err + } + + // Populate the metadata + var snapMeta []*fileSnapshotMeta + for _, snap := range snapshots { + // Ignore any files + if !snap.IsDir() { + continue + } + + // Ignore any temporary snapshots + dirName := snap.Name() + if strings.HasSuffix(dirName, tmpSuffix) { + f.logger.Printf("[WARN] snapshot: Found temporary snapshot: %v", dirName) + continue + } + + // Try to read the meta data + meta, err := f.readMeta(dirName) + if err != nil { + f.logger.Printf("[WARN] snapshot: Failed to read metadata for %v: %v", dirName, err) + continue + } + + // Make sure we can understand this version. + if meta.Version < SnapshotVersionMin || meta.Version > SnapshotVersionMax { + f.logger.Printf("[WARN] snapshot: Snapshot version for %v not supported: %d", dirName, meta.Version) + continue + } + + // Append, but only return up to the retain count + snapMeta = append(snapMeta, meta) + } + + // Sort the snapshot, reverse so we get new -> old + sort.Sort(sort.Reverse(snapMetaSlice(snapMeta))) + + return snapMeta, nil +} + +// readMeta is used to read the meta data for a given named backup +func (f *FileSnapshotStore) readMeta(name string) (*fileSnapshotMeta, error) { + // Open the meta file + metaPath := filepath.Join(f.path, name, metaFilePath) + fh, err := os.Open(metaPath) + if err != nil { + return nil, err + } + defer fh.Close() + + // Buffer the file IO + buffered := bufio.NewReader(fh) + + // Read in the JSON + meta := &fileSnapshotMeta{} + dec := json.NewDecoder(buffered) + if err := dec.Decode(meta); err != nil { + return nil, err + } + return meta, nil +} + +// Open takes a snapshot ID and returns a ReadCloser for that snapshot. +func (f *FileSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + // Get the metadata + meta, err := f.readMeta(id) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to get meta data to open snapshot: %v", err) + return nil, nil, err + } + + // Open the state file + statePath := filepath.Join(f.path, id, stateFilePath) + fh, err := os.Open(statePath) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to open state file: %v", err) + return nil, nil, err + } + + // Create a CRC64 hash + stateHash := crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Compute the hash + _, err = io.Copy(stateHash, fh) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to read state file: %v", err) + fh.Close() + return nil, nil, err + } + + // Verify the hash + computed := stateHash.Sum(nil) + if bytes.Compare(meta.CRC, computed) != 0 { + f.logger.Printf("[ERR] snapshot: CRC checksum failed (stored: %v computed: %v)", + meta.CRC, computed) + fh.Close() + return nil, nil, fmt.Errorf("CRC mismatch") + } + + // Seek to the start + if _, err := fh.Seek(0, 0); err != nil { + f.logger.Printf("[ERR] snapshot: State file seek failed: %v", err) + fh.Close() + return nil, nil, err + } + + // Return a buffered file + buffered := &bufferedFile{ + bh: bufio.NewReader(fh), + fh: fh, + } + + return &meta.SnapshotMeta, buffered, nil +} + +// ReapSnapshots reaps any snapshots beyond the retain count. +func (f *FileSnapshotStore) ReapSnapshots() error { + snapshots, err := f.getSnapshots() + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err) + return err + } + + for i := f.retain; i < len(snapshots); i++ { + path := filepath.Join(f.path, snapshots[i].ID) + f.logger.Printf("[INFO] snapshot: reaping snapshot %v", path) + if err := os.RemoveAll(path); err != nil { + f.logger.Printf("[ERR] snapshot: Failed to reap snapshot %v: %v", path, err) + return err + } + } + return nil +} + +// ID returns the ID of the snapshot, can be used with Open() +// after the snapshot is finalized. +func (s *FileSnapshotSink) ID() string { + return s.meta.ID +} + +// Write is used to append to the state file. We write to the +// buffered IO object to reduce the amount of context switches. +func (s *FileSnapshotSink) Write(b []byte) (int, error) { + return s.buffered.Write(b) +} + +// Close is used to indicate a successful end. +func (s *FileSnapshotSink) Close() error { + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + // Close the open handles + if err := s.finalize(); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err) + return err + } + + // Write out the meta data + if err := s.writeMeta(); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err) + return err + } + + // Move the directory into place + newPath := strings.TrimSuffix(s.dir, tmpSuffix) + if err := os.Rename(s.dir, newPath); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to move snapshot into place: %v", err) + return err + } + + // Reap any old snapshots + if err := s.store.ReapSnapshots(); err != nil { + return err + } + + return nil +} + +// Cancel is used to indicate an unsuccessful end. +func (s *FileSnapshotSink) Cancel() error { + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + // Close the open handles + if err := s.finalize(); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err) + return err + } + + // Attempt to remove all artifacts + return os.RemoveAll(s.dir) +} + +// finalize is used to close all of our resources. +func (s *FileSnapshotSink) finalize() error { + // Flush any remaining data + if err := s.buffered.Flush(); err != nil { + return err + } + + // Get the file size + stat, statErr := s.stateFile.Stat() + + // Close the file + if err := s.stateFile.Close(); err != nil { + return err + } + + // Set the file size, check after we close + if statErr != nil { + return statErr + } + s.meta.Size = stat.Size() + + // Set the CRC + s.meta.CRC = s.stateHash.Sum(nil) + return nil +} + +// writeMeta is used to write out the metadata we have. +func (s *FileSnapshotSink) writeMeta() error { + // Open the meta file + metaPath := filepath.Join(s.dir, metaFilePath) + fh, err := os.Create(metaPath) + if err != nil { + return err + } + defer fh.Close() + + // Buffer the file IO + buffered := bufio.NewWriter(fh) + defer buffered.Flush() + + // Write out as JSON + enc := json.NewEncoder(buffered) + if err := enc.Encode(&s.meta); err != nil { + return err + } + return nil +} + +// Implement the sort interface for []*fileSnapshotMeta. +func (s snapMetaSlice) Len() int { + return len(s) +} + +func (s snapMetaSlice) Less(i, j int) bool { + if s[i].Term != s[j].Term { + return s[i].Term < s[j].Term + } + if s[i].Index != s[j].Index { + return s[i].Index < s[j].Index + } + return s[i].ID < s[j].ID +} + +func (s snapMetaSlice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/fsm.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/fsm.go new file mode 100644 index 00000000000..23da1e99b38 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/fsm.go @@ -0,0 +1,116 @@ +package raft + +import ( + "fmt" + "io" + "time" + + "github.com/armon/go-metrics" +) + +// FSM provides an interface that can be implemented by +// clients to make use of the replicated log. +type FSM interface { + // Apply log is invoked once a log entry is committed. + // It returns a value which will be made available in the + // ApplyFuture returned by Raft.Apply method if that + // method was called on the same Raft node as the FSM. + Apply(*Log) interface{} + + // Snapshot is used to support log compaction. This call should + // return an FSMSnapshot which can be used to save a point-in-time + // snapshot of the FSM. Apply and Snapshot are not called in multiple + // threads, but Apply will be called concurrently with Persist. This means + // the FSM should be implemented in a fashion that allows for concurrent + // updates while a snapshot is happening. + Snapshot() (FSMSnapshot, error) + + // Restore is used to restore an FSM from a snapshot. It is not called + // concurrently with any other command. The FSM must discard all previous + // state. + Restore(io.ReadCloser) error +} + +// FSMSnapshot is returned by an FSM in response to a Snapshot +// It must be safe to invoke FSMSnapshot methods with concurrent +// calls to Apply. +type FSMSnapshot interface { + // Persist should dump all necessary state to the WriteCloser 'sink', + // and call sink.Close() when finished or call sink.Cancel() on error. + Persist(sink SnapshotSink) error + + // Release is invoked when we are finished with the snapshot. + Release() +} + +// runFSM is a long running goroutine responsible for applying logs +// to the FSM. This is done async of other logs since we don't want +// the FSM to block our internal operations. +func (r *Raft) runFSM() { + var lastIndex, lastTerm uint64 + for { + select { + case req := <-r.fsmRestoreCh: + // Open the snapshot + meta, source, err := r.snapshots.Open(req.ID) + if err != nil { + req.respond(fmt.Errorf("failed to open snapshot %v: %v", req.ID, err)) + continue + } + + // Attempt to restore + start := time.Now() + if err := r.fsm.Restore(source); err != nil { + req.respond(fmt.Errorf("failed to restore snapshot %v: %v", req.ID, err)) + source.Close() + continue + } + source.Close() + metrics.MeasureSince([]string{"raft", "fsm", "restore"}, start) + + // Update the last index and term + lastIndex = meta.Index + lastTerm = meta.Term + req.respond(nil) + + case req := <-r.fsmSnapshotCh: + // Is there something to snapshot? + if lastIndex == 0 { + req.respond(ErrNothingNewToSnapshot) + continue + } + + // Start a snapshot + start := time.Now() + snap, err := r.fsm.Snapshot() + metrics.MeasureSince([]string{"raft", "fsm", "snapshot"}, start) + + // Respond to the request + req.index = lastIndex + req.term = lastTerm + req.snapshot = snap + req.respond(err) + + case commitEntry := <-r.fsmCommitCh: + // Apply the log if a command + var resp interface{} + if commitEntry.log.Type == LogCommand { + start := time.Now() + resp = r.fsm.Apply(commitEntry.log) + metrics.MeasureSince([]string{"raft", "fsm", "apply"}, start) + } + + // Update the indexes + lastIndex = commitEntry.log.Index + lastTerm = commitEntry.log.Term + + // Invoke the future if given + if commitEntry.future != nil { + commitEntry.future.response = resp + commitEntry.future.respond(nil) + } + case <-r.shutdownCh: + return + } + } +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/future.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/future.go new file mode 100644 index 00000000000..67c74fc42ee --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/future.go @@ -0,0 +1,245 @@ +package raft + +import ( + "sync" + "time" +) + +// Future is used to represent an action that may occur in the future. +type Future interface { + // Error blocks until the future arrives and then + // returns the error status of the future. + // This may be called any number of times - all + // calls will return the same value. + // Note that it is not OK to call this method + // twice concurrently on the same Future instance. + Error() error +} + +// IndexFuture is used for future actions that can result in a raft log entry +// being created. +type IndexFuture interface { + Future + + // Index holds the index of the newly applied log entry. + // This must not be called until after the Error method has returned. + Index() uint64 +} + +// ApplyFuture is used for Apply and can return the FSM response. +type ApplyFuture interface { + IndexFuture + + // Response returns the FSM response as returned + // by the FSM.Apply method. This must not be called + // until after the Error method has returned. + Response() interface{} +} + +// ConfigurationFuture is used for GetConfiguration and can return the +// latest configuration in use by Raft. +type ConfigurationFuture interface { + IndexFuture + + // Configuration contains the latest configuration. This must + // not be called until after the Error method has returned. + Configuration() Configuration +} + +// errorFuture is used to return a static error. +type errorFuture struct { + err error +} + +func (e errorFuture) Error() error { + return e.err +} + +func (e errorFuture) Response() interface{} { + return nil +} + +func (e errorFuture) Index() uint64 { + return 0 +} + +// deferError can be embedded to allow a future +// to provide an error in the future. +type deferError struct { + err error + errCh chan error + responded bool +} + +func (d *deferError) init() { + d.errCh = make(chan error, 1) +} + +func (d *deferError) Error() error { + if d.err != nil { + // Note that when we've received a nil error, this + // won't trigger, but the channel is closed after + // send so we'll still return nil below. + return d.err + } + if d.errCh == nil { + panic("waiting for response on nil channel") + } + d.err = <-d.errCh + return d.err +} + +func (d *deferError) respond(err error) { + if d.errCh == nil { + return + } + if d.responded { + return + } + d.errCh <- err + close(d.errCh) + d.responded = true +} + +// There are several types of requests that cause a configuration entry to +// be appended to the log. These are encoded here for leaderLoop() to process. +// This is internal to a single server. +type configurationChangeFuture struct { + logFuture + req configurationChangeRequest +} + +// bootstrapFuture is used to attempt a live bootstrap of the cluster. See the +// Raft object's BootstrapCluster member function for more details. +type bootstrapFuture struct { + deferError + + // configuration is the proposed bootstrap configuration to apply. + configuration Configuration +} + +// logFuture is used to apply a log entry and waits until +// the log is considered committed. +type logFuture struct { + deferError + log Log + response interface{} + dispatch time.Time +} + +func (l *logFuture) Response() interface{} { + return l.response +} + +func (l *logFuture) Index() uint64 { + return l.log.Index +} + +type shutdownFuture struct { + raft *Raft +} + +func (s *shutdownFuture) Error() error { + if s.raft == nil { + return nil + } + s.raft.waitShutdown() + if closeable, ok := s.raft.trans.(WithClose); ok { + closeable.Close() + } + return nil +} + +// snapshotFuture is used for waiting on a snapshot to complete. +type snapshotFuture struct { + deferError +} + +// reqSnapshotFuture is used for requesting a snapshot start. +// It is only used internally. +type reqSnapshotFuture struct { + deferError + + // snapshot details provided by the FSM runner before responding + index uint64 + term uint64 + snapshot FSMSnapshot +} + +// restoreFuture is used for requesting an FSM to perform a +// snapshot restore. Used internally only. +type restoreFuture struct { + deferError + ID string +} + +// verifyFuture is used to verify the current node is still +// the leader. This is to prevent a stale read. +type verifyFuture struct { + deferError + notifyCh chan *verifyFuture + quorumSize int + votes int + voteLock sync.Mutex +} + +// configurationsFuture is used to retrieve the current configurations. This is +// used to allow safe access to this information outside of the main thread. +type configurationsFuture struct { + deferError + configurations configurations +} + +// Configuration returns the latest configuration in use by Raft. +func (c *configurationsFuture) Configuration() Configuration { + return c.configurations.latest +} + +// Index returns the index of the latest configuration in use by Raft. +func (c *configurationsFuture) Index() uint64 { + return c.configurations.latestIndex +} + +// vote is used to respond to a verifyFuture. +// This may block when responding on the notifyCh. +func (v *verifyFuture) vote(leader bool) { + v.voteLock.Lock() + defer v.voteLock.Unlock() + + // Guard against having notified already + if v.notifyCh == nil { + return + } + + if leader { + v.votes++ + if v.votes >= v.quorumSize { + v.notifyCh <- v + v.notifyCh = nil + } + } else { + v.notifyCh <- v + v.notifyCh = nil + } +} + +// appendFuture is used for waiting on a pipelined append +// entries RPC. +type appendFuture struct { + deferError + start time.Time + args *AppendEntriesRequest + resp *AppendEntriesResponse +} + +func (a *appendFuture) Start() time.Time { + return a.start +} + +func (a *appendFuture) Request() *AppendEntriesRequest { + return a.args +} + +func (a *appendFuture) Response() *AppendEntriesResponse { + return a.resp +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/inmem_store.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/inmem_store.go new file mode 100644 index 00000000000..e5d579e1b31 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/inmem_store.go @@ -0,0 +1,125 @@ +package raft + +import ( + "sync" +) + +// InmemStore implements the LogStore and StableStore interface. +// It should NOT EVER be used for production. It is used only for +// unit tests. Use the MDBStore implementation instead. +type InmemStore struct { + l sync.RWMutex + lowIndex uint64 + highIndex uint64 + logs map[uint64]*Log + kv map[string][]byte + kvInt map[string]uint64 +} + +// NewInmemStore returns a new in-memory backend. Do not ever +// use for production. Only for testing. +func NewInmemStore() *InmemStore { + i := &InmemStore{ + logs: make(map[uint64]*Log), + kv: make(map[string][]byte), + kvInt: make(map[string]uint64), + } + return i +} + +// FirstIndex implements the LogStore interface. +func (i *InmemStore) FirstIndex() (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.lowIndex, nil +} + +// LastIndex implements the LogStore interface. +func (i *InmemStore) LastIndex() (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.highIndex, nil +} + +// GetLog implements the LogStore interface. +func (i *InmemStore) GetLog(index uint64, log *Log) error { + i.l.RLock() + defer i.l.RUnlock() + l, ok := i.logs[index] + if !ok { + return ErrLogNotFound + } + *log = *l + return nil +} + +// StoreLog implements the LogStore interface. +func (i *InmemStore) StoreLog(log *Log) error { + return i.StoreLogs([]*Log{log}) +} + +// StoreLogs implements the LogStore interface. +func (i *InmemStore) StoreLogs(logs []*Log) error { + i.l.Lock() + defer i.l.Unlock() + for _, l := range logs { + i.logs[l.Index] = l + if i.lowIndex == 0 { + i.lowIndex = l.Index + } + if l.Index > i.highIndex { + i.highIndex = l.Index + } + } + return nil +} + +// DeleteRange implements the LogStore interface. +func (i *InmemStore) DeleteRange(min, max uint64) error { + i.l.Lock() + defer i.l.Unlock() + for j := min; j <= max; j++ { + delete(i.logs, j) + } + if min <= i.lowIndex { + i.lowIndex = max + 1 + } + if max >= i.highIndex { + i.highIndex = min - 1 + } + if i.lowIndex > i.highIndex { + i.lowIndex = 0 + i.highIndex = 0 + } + return nil +} + +// Set implements the StableStore interface. +func (i *InmemStore) Set(key []byte, val []byte) error { + i.l.Lock() + defer i.l.Unlock() + i.kv[string(key)] = val + return nil +} + +// Get implements the StableStore interface. +func (i *InmemStore) Get(key []byte) ([]byte, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.kv[string(key)], nil +} + +// SetUint64 implements the StableStore interface. +func (i *InmemStore) SetUint64(key []byte, val uint64) error { + i.l.Lock() + defer i.l.Unlock() + i.kvInt[string(key)] = val + return nil +} + +// GetUint64 implements the StableStore interface. +func (i *InmemStore) GetUint64(key []byte) (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.kvInt[string(key)], nil +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/inmem_transport.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/inmem_transport.go new file mode 100644 index 00000000000..3693cd5ad1e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/inmem_transport.go @@ -0,0 +1,322 @@ +package raft + +import ( + "fmt" + "io" + "sync" + "time" +) + +// NewInmemAddr returns a new in-memory addr with +// a randomly generate UUID as the ID. +func NewInmemAddr() ServerAddress { + return ServerAddress(generateUUID()) +} + +// inmemPipeline is used to pipeline requests for the in-mem transport. +type inmemPipeline struct { + trans *InmemTransport + peer *InmemTransport + peerAddr ServerAddress + + doneCh chan AppendFuture + inprogressCh chan *inmemPipelineInflight + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +type inmemPipelineInflight struct { + future *appendFuture + respCh <-chan RPCResponse +} + +// InmemTransport Implements the Transport interface, to allow Raft to be +// tested in-memory without going over a network. +type InmemTransport struct { + sync.RWMutex + consumerCh chan RPC + localAddr ServerAddress + peers map[ServerAddress]*InmemTransport + pipelines []*inmemPipeline + timeout time.Duration +} + +// NewInmemTransport is used to initialize a new transport +// and generates a random local address if none is specified +func NewInmemTransport(addr ServerAddress) (ServerAddress, *InmemTransport) { + if string(addr) == "" { + addr = NewInmemAddr() + } + trans := &InmemTransport{ + consumerCh: make(chan RPC, 16), + localAddr: addr, + peers: make(map[ServerAddress]*InmemTransport), + timeout: 50 * time.Millisecond, + } + return addr, trans +} + +// SetHeartbeatHandler is used to set optional fast-path for +// heartbeats, not supported for this transport. +func (i *InmemTransport) SetHeartbeatHandler(cb func(RPC)) { +} + +// Consumer implements the Transport interface. +func (i *InmemTransport) Consumer() <-chan RPC { + return i.consumerCh +} + +// LocalAddr implements the Transport interface. +func (i *InmemTransport) LocalAddr() ServerAddress { + return i.localAddr +} + +// AppendEntriesPipeline returns an interface that can be used to pipeline +// AppendEntries requests. +func (i *InmemTransport) AppendEntriesPipeline(target ServerAddress) (AppendPipeline, error) { + i.RLock() + peer, ok := i.peers[target] + i.RUnlock() + if !ok { + return nil, fmt.Errorf("failed to connect to peer: %v", target) + } + pipeline := newInmemPipeline(i, peer, target) + i.Lock() + i.pipelines = append(i.pipelines, pipeline) + i.Unlock() + return pipeline, nil +} + +// AppendEntries implements the Transport interface. +func (i *InmemTransport) AppendEntries(target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*AppendEntriesResponse) + *resp = *out + return nil +} + +// RequestVote implements the Transport interface. +func (i *InmemTransport) RequestVote(target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*RequestVoteResponse) + *resp = *out + return nil +} + +// InstallSnapshot implements the Transport interface. +func (i *InmemTransport) InstallSnapshot(target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { + rpcResp, err := i.makeRPC(target, args, data, 10*i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*InstallSnapshotResponse) + *resp = *out + return nil +} + +func (i *InmemTransport) makeRPC(target ServerAddress, args interface{}, r io.Reader, timeout time.Duration) (rpcResp RPCResponse, err error) { + i.RLock() + peer, ok := i.peers[target] + i.RUnlock() + + if !ok { + err = fmt.Errorf("failed to connect to peer: %v", target) + return + } + + // Send the RPC over + respCh := make(chan RPCResponse) + peer.consumerCh <- RPC{ + Command: args, + Reader: r, + RespChan: respCh, + } + + // Wait for a response + select { + case rpcResp = <-respCh: + if rpcResp.Error != nil { + err = rpcResp.Error + } + case <-time.After(timeout): + err = fmt.Errorf("command timed out") + } + return +} + +// EncodePeer implements the Transport interface. +func (i *InmemTransport) EncodePeer(p ServerAddress) []byte { + return []byte(p) +} + +// DecodePeer implements the Transport interface. +func (i *InmemTransport) DecodePeer(buf []byte) ServerAddress { + return ServerAddress(buf) +} + +// Connect is used to connect this transport to another transport for +// a given peer name. This allows for local routing. +func (i *InmemTransport) Connect(peer ServerAddress, t Transport) { + trans := t.(*InmemTransport) + i.Lock() + defer i.Unlock() + i.peers[peer] = trans +} + +// Disconnect is used to remove the ability to route to a given peer. +func (i *InmemTransport) Disconnect(peer ServerAddress) { + i.Lock() + defer i.Unlock() + delete(i.peers, peer) + + // Disconnect any pipelines + n := len(i.pipelines) + for idx := 0; idx < n; idx++ { + if i.pipelines[idx].peerAddr == peer { + i.pipelines[idx].Close() + i.pipelines[idx], i.pipelines[n-1] = i.pipelines[n-1], nil + idx-- + n-- + } + } + i.pipelines = i.pipelines[:n] +} + +// DisconnectAll is used to remove all routes to peers. +func (i *InmemTransport) DisconnectAll() { + i.Lock() + defer i.Unlock() + i.peers = make(map[ServerAddress]*InmemTransport) + + // Handle pipelines + for _, pipeline := range i.pipelines { + pipeline.Close() + } + i.pipelines = nil +} + +// Close is used to permanently disable the transport +func (i *InmemTransport) Close() error { + i.DisconnectAll() + return nil +} + +func newInmemPipeline(trans *InmemTransport, peer *InmemTransport, addr ServerAddress) *inmemPipeline { + i := &inmemPipeline{ + trans: trans, + peer: peer, + peerAddr: addr, + doneCh: make(chan AppendFuture, 16), + inprogressCh: make(chan *inmemPipelineInflight, 16), + shutdownCh: make(chan struct{}), + } + go i.decodeResponses() + return i +} + +func (i *inmemPipeline) decodeResponses() { + timeout := i.trans.timeout + for { + select { + case inp := <-i.inprogressCh: + var timeoutCh <-chan time.Time + if timeout > 0 { + timeoutCh = time.After(timeout) + } + + select { + case rpcResp := <-inp.respCh: + // Copy the result back + *inp.future.resp = *rpcResp.Response.(*AppendEntriesResponse) + inp.future.respond(rpcResp.Error) + + select { + case i.doneCh <- inp.future: + case <-i.shutdownCh: + return + } + + case <-timeoutCh: + inp.future.respond(fmt.Errorf("command timed out")) + select { + case i.doneCh <- inp.future: + case <-i.shutdownCh: + return + } + + case <-i.shutdownCh: + return + } + case <-i.shutdownCh: + return + } + } +} + +func (i *inmemPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { + // Create a new future + future := &appendFuture{ + start: time.Now(), + args: args, + resp: resp, + } + future.init() + + // Handle a timeout + var timeout <-chan time.Time + if i.trans.timeout > 0 { + timeout = time.After(i.trans.timeout) + } + + // Send the RPC over + respCh := make(chan RPCResponse, 1) + rpc := RPC{ + Command: args, + RespChan: respCh, + } + select { + case i.peer.consumerCh <- rpc: + case <-timeout: + return nil, fmt.Errorf("command enqueue timeout") + case <-i.shutdownCh: + return nil, ErrPipelineShutdown + } + + // Send to be decoded + select { + case i.inprogressCh <- &inmemPipelineInflight{future, respCh}: + return future, nil + case <-i.shutdownCh: + return nil, ErrPipelineShutdown + } +} + +func (i *inmemPipeline) Consumer() <-chan AppendFuture { + return i.doneCh +} + +func (i *inmemPipeline) Close() error { + i.shutdownLock.Lock() + defer i.shutdownLock.Unlock() + if i.shutdown { + return nil + } + + i.shutdown = true + close(i.shutdownCh) + return nil +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/log.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/log.go new file mode 100644 index 00000000000..4ade38ecc12 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/log.go @@ -0,0 +1,72 @@ +package raft + +// LogType describes various types of log entries. +type LogType uint8 + +const ( + // LogCommand is applied to a user FSM. + LogCommand LogType = iota + + // LogNoop is used to assert leadership. + LogNoop + + // LogAddPeer is used to add a new peer. This should only be used with + // older protocol versions designed to be compatible with unversioned + // Raft servers. See comments in config.go for details. + LogAddPeerDeprecated + + // LogRemovePeer is used to remove an existing peer. This should only be + // used with older protocol versions designed to be compatible with + // unversioned Raft servers. See comments in config.go for details. + LogRemovePeerDeprecated + + // LogBarrier is used to ensure all preceding operations have been + // applied to the FSM. It is similar to LogNoop, but instead of returning + // once committed, it only returns once the FSM manager acks it. Otherwise + // it is possible there are operations committed but not yet applied to + // the FSM. + LogBarrier + + // LogConfiguration establishes a membership change configuration. It is + // created when a server is added, removed, promoted, etc. Only used + // when protocol version 1 or greater is in use. + LogConfiguration +) + +// Log entries are replicated to all members of the Raft cluster +// and form the heart of the replicated state machine. +type Log struct { + // Index holds the index of the log entry. + Index uint64 + + // Term holds the election term of the log entry. + Term uint64 + + // Type holds the type of the log entry. + Type LogType + + // Data holds the log entry's type-specific data. + Data []byte +} + +// LogStore is used to provide an interface for storing +// and retrieving logs in a durable fashion. +type LogStore interface { + // FirstIndex returns the first index written. 0 for no entries. + FirstIndex() (uint64, error) + + // LastIndex returns the last index written. 0 for no entries. + LastIndex() (uint64, error) + + // GetLog gets a log entry at a given index. + GetLog(index uint64, log *Log) error + + // StoreLog stores a log entry. + StoreLog(log *Log) error + + // StoreLogs stores multiple log entries. + StoreLogs(logs []*Log) error + + // DeleteRange deletes a range of log entries. The range is inclusive. + DeleteRange(min, max uint64) error +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/log_cache.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/log_cache.go new file mode 100644 index 00000000000..952e98c2282 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/log_cache.go @@ -0,0 +1,79 @@ +package raft + +import ( + "fmt" + "sync" +) + +// LogCache wraps any LogStore implementation to provide an +// in-memory ring buffer. This is used to cache access to +// the recently written entries. For implementations that do not +// cache themselves, this can provide a substantial boost by +// avoiding disk I/O on recent entries. +type LogCache struct { + store LogStore + + cache []*Log + l sync.RWMutex +} + +// NewLogCache is used to create a new LogCache with the +// given capacity and backend store. +func NewLogCache(capacity int, store LogStore) (*LogCache, error) { + if capacity <= 0 { + return nil, fmt.Errorf("capacity must be positive") + } + c := &LogCache{ + store: store, + cache: make([]*Log, capacity), + } + return c, nil +} + +func (c *LogCache) GetLog(idx uint64, log *Log) error { + // Check the buffer for an entry + c.l.RLock() + cached := c.cache[idx%uint64(len(c.cache))] + c.l.RUnlock() + + // Check if entry is valid + if cached != nil && cached.Index == idx { + *log = *cached + return nil + } + + // Forward request on cache miss + return c.store.GetLog(idx, log) +} + +func (c *LogCache) StoreLog(log *Log) error { + return c.StoreLogs([]*Log{log}) +} + +func (c *LogCache) StoreLogs(logs []*Log) error { + // Insert the logs into the ring buffer + c.l.Lock() + for _, l := range logs { + c.cache[l.Index%uint64(len(c.cache))] = l + } + c.l.Unlock() + + return c.store.StoreLogs(logs) +} + +func (c *LogCache) FirstIndex() (uint64, error) { + return c.store.FirstIndex() +} + +func (c *LogCache) LastIndex() (uint64, error) { + return c.store.LastIndex() +} + +func (c *LogCache) DeleteRange(min, max uint64) error { + // Invalidate the cache on deletes + c.l.Lock() + c.cache = make([]*Log, len(c.cache)) + c.l.Unlock() + + return c.store.DeleteRange(min, max) +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/membership.md b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/membership.md new file mode 100644 index 00000000000..df1f83e27f6 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/membership.md @@ -0,0 +1,83 @@ +Simon (@superfell) and I (@ongardie) talked through reworking this library's cluster membership changes last Friday. We don't see a way to split this into independent patches, so we're taking the next best approach: submitting the plan here for review, then working on an enormous PR. Your feedback would be appreciated. (@superfell is out this week, however, so don't expect him to respond quickly.) + +These are the main goals: + - Bringing things in line with the description in my PhD dissertation; + - Catching up new servers prior to granting them a vote, as well as allowing permanent non-voting members; and + - Eliminating the `peers.json` file, to avoid issues of consistency between that and the log/snapshot. + +## Data-centric view + +We propose to re-define a *configuration* as a set of servers, where each server includes an address (as it does today) and a mode that is either: + - *Voter*: a server whose vote is counted in elections and whose match index is used in advancing the leader's commit index. + - *Nonvoter*: a server that receives log entries but is not considered for elections or commitment purposes. + - *Staging*: a server that acts like a nonvoter with one exception: once a staging server receives enough log entries to catch up sufficiently to the leader's log, the leader will invoke a membership change to change the staging server to a voter. + +All changes to the configuration will be done by writing a new configuration to the log. The new configuration will be in affect as soon as it is appended to the log (not when it is committed like a normal state machine command). Note that, per my dissertation, there can be at most one uncommitted configuration at a time (the next configuration may not be created until the prior one has been committed). It's not strictly necessary to follow these same rules for the nonvoter/staging servers, but we think its best to treat all changes uniformly. + +Each server will track two configurations: + 1. its *committed configuration*: the latest configuration in the log/snapshot that has been committed, along with its index. + 2. its *latest configuration*: the latest configuration in the log/snapshot (may be committed or uncommitted), along with its index. + +When there's no membership change happening, these two will be the same. The latest configuration is almost always the one used, except: + - When followers truncate the suffix of their logs, they may need to fall back to the committed configuration. + - When snapshotting, the committed configuration is written, to correspond with the committed log prefix that is being snapshotted. + + +## Application API + +We propose the following operations for clients to manipulate the cluster configuration: + - AddVoter: server becomes staging unless voter, + - AddNonvoter: server becomes nonvoter unless staging or voter, + - DemoteVoter: server becomes nonvoter unless absent, + - RemovePeer: server removed from configuration, + - GetConfiguration: waits for latest config to commit, returns committed config. + +This diagram, of which I'm quite proud, shows the possible transitions: +``` ++-----------------------------------------------------------------------------+ +| | +| Start -> +--------+ | +| ,------<------------| | | +| / | absent | | +| / RemovePeer--> | | <---RemovePeer | +| / | +--------+ \ | +| / | | \ | +| AddNonvoter | AddVoter \ | +| | ,->---' `--<-. | \ | +| v / \ v \ | +| +----------+ +----------+ +----------+ | +| | | ---AddVoter--> | | -log caught up --> | | | +| | nonvoter | | staging | | voter | | +| | | <-DemoteVoter- | | ,- | | | +| +----------+ \ +----------+ / +----------+ | +| \ / | +| `--------------<---------------' | +| | ++-----------------------------------------------------------------------------+ +``` + +While these operations aren't quite symmetric, we think they're a good set to capture +the possible intent of the user. For example, if I want to make sure a server doesn't have a vote, but the server isn't part of the configuration at all, it probably shouldn't be added as a nonvoting server. + +Each of these application-level operations will be interpreted by the leader and, if it has an effect, will cause the leader to write a new configuration entry to its log. Which particular application-level operation caused the log entry to be written need not be part of the log entry. + +## Code implications + +This is a non-exhaustive list, but we came up with a few things: +- Remove the PeerStore: the `peers.json` file introduces the possibility of getting out of sync with the log and snapshot, and it's hard to maintain this atomically as the log changes. It's not clear whether it's meant to track the committed or latest configuration, either. +- Servers will have to search their snapshot and log to find the committed configuration and the latest configuration on startup. +- Bootstrap will no longer use `peers.json` but should initialize the log or snapshot with an application-provided configuration entry. +- Snapshots should store the index of their configuration along with the configuration itself. In my experience with LogCabin, the original log index of the configuration is very useful to include in debug log messages. +- As noted in hashicorp/raft#84, configuration change requests should come in via a separate channel, and one may not proceed until the last has been committed. +- As to deciding when a log is sufficiently caught up, implementing a sophisticated algorithm *is* something that can be done in a separate PR. An easy and decent placeholder is: once the staging server has reached 95% of the leader's commit index, promote it. + +## Feedback + +Again, we're looking for feedback here before we start working on this. Here are some questions to think about: + - Does this seem like where we want things to go? + - Is there anything here that should be left out? + - Is there anything else we're forgetting about? + - Is there a good way to break this up? + - What do we need to worry about in terms of backwards compatibility? + - What implication will this have on current tests? + - What's the best way to test this code, in particular the small changes that will be sprinkled all over the library? diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/net_transport.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/net_transport.go new file mode 100644 index 00000000000..7c55ac5371f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/net_transport.go @@ -0,0 +1,622 @@ +package raft + +import ( + "bufio" + "errors" + "fmt" + "io" + "log" + "net" + "os" + "sync" + "time" + + "github.com/hashicorp/go-msgpack/codec" +) + +const ( + rpcAppendEntries uint8 = iota + rpcRequestVote + rpcInstallSnapshot + + // DefaultTimeoutScale is the default TimeoutScale in a NetworkTransport. + DefaultTimeoutScale = 256 * 1024 // 256KB + + // rpcMaxPipeline controls the maximum number of outstanding + // AppendEntries RPC calls. + rpcMaxPipeline = 128 +) + +var ( + // ErrTransportShutdown is returned when operations on a transport are + // invoked after it's been terminated. + ErrTransportShutdown = errors.New("transport shutdown") + + // ErrPipelineShutdown is returned when the pipeline is closed. + ErrPipelineShutdown = errors.New("append pipeline closed") +) + +/* + +NetworkTransport provides a network based transport that can be +used to communicate with Raft on remote machines. It requires +an underlying stream layer to provide a stream abstraction, which can +be simple TCP, TLS, etc. + +This transport is very simple and lightweight. Each RPC request is +framed by sending a byte that indicates the message type, followed +by the MsgPack encoded request. + +The response is an error string followed by the response object, +both are encoded using MsgPack. + +InstallSnapshot is special, in that after the RPC request we stream +the entire state. That socket is not re-used as the connection state +is not known if there is an error. + +*/ +type NetworkTransport struct { + connPool map[ServerAddress][]*netConn + connPoolLock sync.Mutex + + consumeCh chan RPC + + heartbeatFn func(RPC) + heartbeatFnLock sync.Mutex + + logger *log.Logger + + maxPool int + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex + + stream StreamLayer + + timeout time.Duration + TimeoutScale int +} + +// StreamLayer is used with the NetworkTransport to provide +// the low level stream abstraction. +type StreamLayer interface { + net.Listener + + // Dial is used to create a new outgoing connection + Dial(address ServerAddress, timeout time.Duration) (net.Conn, error) +} + +type netConn struct { + target ServerAddress + conn net.Conn + r *bufio.Reader + w *bufio.Writer + dec *codec.Decoder + enc *codec.Encoder +} + +func (n *netConn) Release() error { + return n.conn.Close() +} + +type netPipeline struct { + conn *netConn + trans *NetworkTransport + + doneCh chan AppendFuture + inprogressCh chan *appendFuture + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// NewNetworkTransport creates a new network transport with the given dialer +// and listener. The maxPool controls how many connections we will pool. The +// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply +// the timeout by (SnapshotSize / TimeoutScale). +func NewNetworkTransport( + stream StreamLayer, + maxPool int, + timeout time.Duration, + logOutput io.Writer, +) *NetworkTransport { + if logOutput == nil { + logOutput = os.Stderr + } + return NewNetworkTransportWithLogger(stream, maxPool, timeout, log.New(logOutput, "", log.LstdFlags)) +} + +// NewNetworkTransportWithLogger creates a new network transport with the given dialer +// and listener. The maxPool controls how many connections we will pool. The +// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply +// the timeout by (SnapshotSize / TimeoutScale). +func NewNetworkTransportWithLogger( + stream StreamLayer, + maxPool int, + timeout time.Duration, + logger *log.Logger, +) *NetworkTransport { + if logger == nil { + logger = log.New(os.Stderr, "", log.LstdFlags) + } + trans := &NetworkTransport{ + connPool: make(map[ServerAddress][]*netConn), + consumeCh: make(chan RPC), + logger: logger, + maxPool: maxPool, + shutdownCh: make(chan struct{}), + stream: stream, + timeout: timeout, + TimeoutScale: DefaultTimeoutScale, + } + go trans.listen() + return trans +} + +// SetHeartbeatHandler is used to setup a heartbeat handler +// as a fast-pass. This is to avoid head-of-line blocking from +// disk IO. +func (n *NetworkTransport) SetHeartbeatHandler(cb func(rpc RPC)) { + n.heartbeatFnLock.Lock() + defer n.heartbeatFnLock.Unlock() + n.heartbeatFn = cb +} + +// Close is used to stop the network transport. +func (n *NetworkTransport) Close() error { + n.shutdownLock.Lock() + defer n.shutdownLock.Unlock() + + if !n.shutdown { + close(n.shutdownCh) + n.stream.Close() + n.shutdown = true + } + return nil +} + +// Consumer implements the Transport interface. +func (n *NetworkTransport) Consumer() <-chan RPC { + return n.consumeCh +} + +// LocalAddr implements the Transport interface. +func (n *NetworkTransport) LocalAddr() ServerAddress { + return ServerAddress(n.stream.Addr().String()) +} + +// IsShutdown is used to check if the transport is shutdown. +func (n *NetworkTransport) IsShutdown() bool { + select { + case <-n.shutdownCh: + return true + default: + return false + } +} + +// getExistingConn is used to grab a pooled connection. +func (n *NetworkTransport) getPooledConn(target ServerAddress) *netConn { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + conns, ok := n.connPool[target] + if !ok || len(conns) == 0 { + return nil + } + + var conn *netConn + num := len(conns) + conn, conns[num-1] = conns[num-1], nil + n.connPool[target] = conns[:num-1] + return conn +} + +// getConn is used to get a connection from the pool. +func (n *NetworkTransport) getConn(target ServerAddress) (*netConn, error) { + // Check for a pooled conn + if conn := n.getPooledConn(target); conn != nil { + return conn, nil + } + + // Dial a new connection + conn, err := n.stream.Dial(target, n.timeout) + if err != nil { + return nil, err + } + + // Wrap the conn + netConn := &netConn{ + target: target, + conn: conn, + r: bufio.NewReader(conn), + w: bufio.NewWriter(conn), + } + + // Setup encoder/decoders + netConn.dec = codec.NewDecoder(netConn.r, &codec.MsgpackHandle{}) + netConn.enc = codec.NewEncoder(netConn.w, &codec.MsgpackHandle{}) + + // Done + return netConn, nil +} + +// returnConn returns a connection back to the pool. +func (n *NetworkTransport) returnConn(conn *netConn) { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + key := conn.target + conns, _ := n.connPool[key] + + if !n.IsShutdown() && len(conns) < n.maxPool { + n.connPool[key] = append(conns, conn) + } else { + conn.Release() + } +} + +// AppendEntriesPipeline returns an interface that can be used to pipeline +// AppendEntries requests. +func (n *NetworkTransport) AppendEntriesPipeline(target ServerAddress) (AppendPipeline, error) { + // Get a connection + conn, err := n.getConn(target) + if err != nil { + return nil, err + } + + // Create the pipeline + return newNetPipeline(n, conn), nil +} + +// AppendEntries implements the Transport interface. +func (n *NetworkTransport) AppendEntries(target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { + return n.genericRPC(target, rpcAppendEntries, args, resp) +} + +// RequestVote implements the Transport interface. +func (n *NetworkTransport) RequestVote(target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error { + return n.genericRPC(target, rpcRequestVote, args, resp) +} + +// genericRPC handles a simple request/response RPC. +func (n *NetworkTransport) genericRPC(target ServerAddress, rpcType uint8, args interface{}, resp interface{}) error { + // Get a conn + conn, err := n.getConn(target) + if err != nil { + return err + } + + // Set a deadline + if n.timeout > 0 { + conn.conn.SetDeadline(time.Now().Add(n.timeout)) + } + + // Send the RPC + if err = sendRPC(conn, rpcType, args); err != nil { + return err + } + + // Decode the response + canReturn, err := decodeResponse(conn, resp) + if canReturn { + n.returnConn(conn) + } + return err +} + +// InstallSnapshot implements the Transport interface. +func (n *NetworkTransport) InstallSnapshot(target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { + // Get a conn, always close for InstallSnapshot + conn, err := n.getConn(target) + if err != nil { + return err + } + defer conn.Release() + + // Set a deadline, scaled by request size + if n.timeout > 0 { + timeout := n.timeout * time.Duration(args.Size/int64(n.TimeoutScale)) + if timeout < n.timeout { + timeout = n.timeout + } + conn.conn.SetDeadline(time.Now().Add(timeout)) + } + + // Send the RPC + if err = sendRPC(conn, rpcInstallSnapshot, args); err != nil { + return err + } + + // Stream the state + if _, err = io.Copy(conn.w, data); err != nil { + return err + } + + // Flush + if err = conn.w.Flush(); err != nil { + return err + } + + // Decode the response, do not return conn + _, err = decodeResponse(conn, resp) + return err +} + +// EncodePeer implements the Transport interface. +func (n *NetworkTransport) EncodePeer(p ServerAddress) []byte { + return []byte(p) +} + +// DecodePeer implements the Transport interface. +func (n *NetworkTransport) DecodePeer(buf []byte) ServerAddress { + return ServerAddress(buf) +} + +// listen is used to handling incoming connections. +func (n *NetworkTransport) listen() { + for { + // Accept incoming connections + conn, err := n.stream.Accept() + if err != nil { + if n.IsShutdown() { + return + } + n.logger.Printf("[ERR] raft-net: Failed to accept connection: %v", err) + continue + } + n.logger.Printf("[DEBUG] raft-net: %v accepted connection from: %v", n.LocalAddr(), conn.RemoteAddr()) + + // Handle the connection in dedicated routine + go n.handleConn(conn) + } +} + +// handleConn is used to handle an inbound connection for its lifespan. +func (n *NetworkTransport) handleConn(conn net.Conn) { + defer conn.Close() + r := bufio.NewReader(conn) + w := bufio.NewWriter(conn) + dec := codec.NewDecoder(r, &codec.MsgpackHandle{}) + enc := codec.NewEncoder(w, &codec.MsgpackHandle{}) + + for { + if err := n.handleCommand(r, dec, enc); err != nil { + if err != io.EOF { + n.logger.Printf("[ERR] raft-net: Failed to decode incoming command: %v", err) + } + return + } + if err := w.Flush(); err != nil { + n.logger.Printf("[ERR] raft-net: Failed to flush response: %v", err) + return + } + } +} + +// handleCommand is used to decode and dispatch a single command. +func (n *NetworkTransport) handleCommand(r *bufio.Reader, dec *codec.Decoder, enc *codec.Encoder) error { + // Get the rpc type + rpcType, err := r.ReadByte() + if err != nil { + return err + } + + // Create the RPC object + respCh := make(chan RPCResponse, 1) + rpc := RPC{ + RespChan: respCh, + } + + // Decode the command + isHeartbeat := false + switch rpcType { + case rpcAppendEntries: + var req AppendEntriesRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + + // Check if this is a heartbeat + if req.Term != 0 && req.Leader != nil && + req.PrevLogEntry == 0 && req.PrevLogTerm == 0 && + len(req.Entries) == 0 && req.LeaderCommitIndex == 0 { + isHeartbeat = true + } + + case rpcRequestVote: + var req RequestVoteRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + + case rpcInstallSnapshot: + var req InstallSnapshotRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + rpc.Reader = io.LimitReader(r, req.Size) + + default: + return fmt.Errorf("unknown rpc type %d", rpcType) + } + + // Check for heartbeat fast-path + if isHeartbeat { + n.heartbeatFnLock.Lock() + fn := n.heartbeatFn + n.heartbeatFnLock.Unlock() + if fn != nil { + fn(rpc) + goto RESP + } + } + + // Dispatch the RPC + select { + case n.consumeCh <- rpc: + case <-n.shutdownCh: + return ErrTransportShutdown + } + + // Wait for response +RESP: + select { + case resp := <-respCh: + // Send the error first + respErr := "" + if resp.Error != nil { + respErr = resp.Error.Error() + } + if err := enc.Encode(respErr); err != nil { + return err + } + + // Send the response + if err := enc.Encode(resp.Response); err != nil { + return err + } + case <-n.shutdownCh: + return ErrTransportShutdown + } + return nil +} + +// decodeResponse is used to decode an RPC response and reports whether +// the connection can be reused. +func decodeResponse(conn *netConn, resp interface{}) (bool, error) { + // Decode the error if any + var rpcError string + if err := conn.dec.Decode(&rpcError); err != nil { + conn.Release() + return false, err + } + + // Decode the response + if err := conn.dec.Decode(resp); err != nil { + conn.Release() + return false, err + } + + // Format an error if any + if rpcError != "" { + return true, fmt.Errorf(rpcError) + } + return true, nil +} + +// sendRPC is used to encode and send the RPC. +func sendRPC(conn *netConn, rpcType uint8, args interface{}) error { + // Write the request type + if err := conn.w.WriteByte(rpcType); err != nil { + conn.Release() + return err + } + + // Send the request + if err := conn.enc.Encode(args); err != nil { + conn.Release() + return err + } + + // Flush + if err := conn.w.Flush(); err != nil { + conn.Release() + return err + } + return nil +} + +// newNetPipeline is used to construct a netPipeline from a given +// transport and connection. +func newNetPipeline(trans *NetworkTransport, conn *netConn) *netPipeline { + n := &netPipeline{ + conn: conn, + trans: trans, + doneCh: make(chan AppendFuture, rpcMaxPipeline), + inprogressCh: make(chan *appendFuture, rpcMaxPipeline), + shutdownCh: make(chan struct{}), + } + go n.decodeResponses() + return n +} + +// decodeResponses is a long running routine that decodes the responses +// sent on the connection. +func (n *netPipeline) decodeResponses() { + timeout := n.trans.timeout + for { + select { + case future := <-n.inprogressCh: + if timeout > 0 { + n.conn.conn.SetReadDeadline(time.Now().Add(timeout)) + } + + _, err := decodeResponse(n.conn, future.resp) + future.respond(err) + select { + case n.doneCh <- future: + case <-n.shutdownCh: + return + } + case <-n.shutdownCh: + return + } + } +} + +// AppendEntries is used to pipeline a new append entries request. +func (n *netPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { + // Create a new future + future := &appendFuture{ + start: time.Now(), + args: args, + resp: resp, + } + future.init() + + // Add a send timeout + if timeout := n.trans.timeout; timeout > 0 { + n.conn.conn.SetWriteDeadline(time.Now().Add(timeout)) + } + + // Send the RPC + if err := sendRPC(n.conn, rpcAppendEntries, future.args); err != nil { + return nil, err + } + + // Hand-off for decoding, this can also cause back-pressure + // to prevent too many inflight requests + select { + case n.inprogressCh <- future: + return future, nil + case <-n.shutdownCh: + return nil, ErrPipelineShutdown + } +} + +// Consumer returns a channel that can be used to consume complete futures. +func (n *netPipeline) Consumer() <-chan AppendFuture { + return n.doneCh +} + +// Closed is used to shutdown the pipeline connection. +func (n *netPipeline) Close() error { + n.shutdownLock.Lock() + defer n.shutdownLock.Unlock() + if n.shutdown { + return nil + } + + // Release the connection + n.conn.Release() + + n.shutdown = true + close(n.shutdownCh) + return nil +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/observer.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/observer.go new file mode 100644 index 00000000000..22500fa875f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/observer.go @@ -0,0 +1,115 @@ +package raft + +import ( + "sync/atomic" +) + +// Observation is sent along the given channel to observers when an event occurs. +type Observation struct { + // Raft holds the Raft instance generating the observation. + Raft *Raft + // Data holds observation-specific data. Possible types are + // *RequestVoteRequest and RaftState. + Data interface{} +} + +// nextObserverId is used to provide a unique ID for each observer to aid in +// deregistration. +var nextObserverID uint64 + +// FilterFn is a function that can be registered in order to filter observations. +// The function reports whether the observation should be included - if +// it returns false, the observation will be filtered out. +type FilterFn func(o *Observation) bool + +// Observer describes what to do with a given observation. +type Observer struct { + // channel receives observations. + channel chan Observation + + // blocking, if true, will cause Raft to block when sending an observation + // to this observer. This should generally be set to false. + blocking bool + + // filter will be called to determine if an observation should be sent to + // the channel. + filter FilterFn + + // id is the ID of this observer in the Raft map. + id uint64 + + // numObserved and numDropped are performance counters for this observer. + numObserved uint64 + numDropped uint64 +} + +// NewObserver creates a new observer that can be registered +// to make observations on a Raft instance. Observations +// will be sent on the given channel if they satisfy the +// given filter. +// +// If blocking is true, the observer will block when it can't +// send on the channel, otherwise it may discard events. +func NewObserver(channel chan Observation, blocking bool, filter FilterFn) *Observer { + return &Observer{ + channel: channel, + blocking: blocking, + filter: filter, + id: atomic.AddUint64(&nextObserverID, 1), + } +} + +// GetNumObserved returns the number of observations. +func (or *Observer) GetNumObserved() uint64 { + return atomic.LoadUint64(&or.numObserved) +} + +// GetNumDropped returns the number of dropped observations due to blocking. +func (or *Observer) GetNumDropped() uint64 { + return atomic.LoadUint64(&or.numDropped) +} + +// RegisterObserver registers a new observer. +func (r *Raft) RegisterObserver(or *Observer) { + r.observersLock.Lock() + defer r.observersLock.Unlock() + r.observers[or.id] = or +} + +// DeregisterObserver deregisters an observer. +func (r *Raft) DeregisterObserver(or *Observer) { + r.observersLock.Lock() + defer r.observersLock.Unlock() + delete(r.observers, or.id) +} + +// observe sends an observation to every observer. +func (r *Raft) observe(o interface{}) { + // In general observers should not block. But in any case this isn't + // disastrous as we only hold a read lock, which merely prevents + // registration / deregistration of observers. + r.observersLock.RLock() + defer r.observersLock.RUnlock() + for _, or := range r.observers { + // It's wasteful to do this in the loop, but for the common case + // where there are no observers we won't create any objects. + ob := Observation{Raft: r, Data: o} + if or.filter != nil && !or.filter(&ob) { + continue + } + if or.channel == nil { + continue + } + if or.blocking { + or.channel <- ob + atomic.AddUint64(&or.numObserved, 1) + } else { + select { + case or.channel <- ob: + atomic.AddUint64(&or.numObserved, 1) + default: + atomic.AddUint64(&or.numDropped, 1) + } + } + } +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/peersjson.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/peersjson.go new file mode 100644 index 00000000000..c55fdbb43dd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/peersjson.go @@ -0,0 +1,46 @@ +package raft + +import ( + "bytes" + "encoding/json" + "io/ioutil" +) + +// ReadPeersJSON consumes a legacy peers.json file in the format of the old JSON +// peer store and creates a new-style configuration structure. This can be used +// to migrate this data or perform manual recovery when running protocol versions +// that can interoperate with older, unversioned Raft servers. This should not be +// used once server IDs are in use, because the old peers.json file didn't have +// support for these, nor non-voter suffrage types. +func ReadPeersJSON(path string) (Configuration, error) { + // Read in the file. + buf, err := ioutil.ReadFile(path) + if err != nil { + return Configuration{}, err + } + + // Parse it as JSON. + var peers []string + dec := json.NewDecoder(bytes.NewReader(buf)) + if err := dec.Decode(&peers); err != nil { + return Configuration{}, err + } + + // Map it into the new-style configuration structure. We can only specify + // voter roles here, and the ID has to be the same as the address. + var configuration Configuration + for _, peer := range peers { + server := Server{ + Suffrage: Voter, + ID: ServerID(peer), + Address: ServerAddress(peer), + } + configuration.Servers = append(configuration.Servers, server) + } + + // We should only ingest valid configurations. + if err := checkConfiguration(configuration); err != nil { + return Configuration{}, err + } + return configuration, nil +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/raft.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/raft.go new file mode 100644 index 00000000000..a6c729413a8 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/raft.go @@ -0,0 +1,1348 @@ +package raft + +import ( + "bytes" + "container/list" + "fmt" + "io" + "time" + + "github.com/armon/go-metrics" +) + +const ( + minCheckInterval = 10 * time.Millisecond +) + +var ( + keyCurrentTerm = []byte("CurrentTerm") + keyLastVoteTerm = []byte("LastVoteTerm") + keyLastVoteCand = []byte("LastVoteCand") +) + +// getRPCHeader returns an initialized RPCHeader struct for the given +// Raft instance. This structure is sent along with RPC requests and +// responses. +func (r *Raft) getRPCHeader() RPCHeader { + return RPCHeader{ + ProtocolVersion: r.conf.ProtocolVersion, + } +} + +// checkRPCHeader houses logic about whether this instance of Raft can process +// the given RPC message. +func (r *Raft) checkRPCHeader(rpc RPC) error { + // Get the header off the RPC message. + wh, ok := rpc.Command.(WithRPCHeader) + if !ok { + return fmt.Errorf("RPC does not have a header") + } + header := wh.GetRPCHeader() + + // First check is to just make sure the code can understand the + // protocol at all. + if header.ProtocolVersion < ProtocolVersionMin || + header.ProtocolVersion > ProtocolVersionMax { + return ErrUnsupportedProtocol + } + + // Second check is whether we should support this message, given the + // current protocol we are configured to run. This will drop support + // for protocol version 0 starting at protocol version 2, which is + // currently what we want, and in general support one version back. We + // may need to revisit this policy depending on how future protocol + // changes evolve. + if header.ProtocolVersion < r.conf.ProtocolVersion-1 { + return ErrUnsupportedProtocol + } + + return nil +} + +// getSnapshotVersion returns the snapshot version that should be used when +// creating snapshots, given the protocol version in use. +func getSnapshotVersion(protocolVersion ProtocolVersion) SnapshotVersion { + // Right now we only have two versions and they are backwards compatible + // so we don't need to look at the protocol version. + return 1 +} + +// commitTuple is used to send an index that was committed, +// with an optional associated future that should be invoked. +type commitTuple struct { + log *Log + future *logFuture +} + +// leaderState is state that is used while we are a leader. +type leaderState struct { + commitCh chan struct{} + commitment *commitment + inflight *list.List // list of logFuture in log index order + replState map[ServerID]*followerReplication + notify map[*verifyFuture]struct{} + stepDown chan struct{} +} + +// setLeader is used to modify the current leader of the cluster +func (r *Raft) setLeader(leader ServerAddress) { + r.leaderLock.Lock() + r.leader = leader + r.leaderLock.Unlock() +} + +// requestConfigChange is a helper for the above functions that make +// configuration change requests. 'req' describes the change. For timeout, +// see AddVoter. +func (r *Raft) requestConfigChange(req configurationChangeRequest, timeout time.Duration) IndexFuture { + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + future := &configurationChangeFuture{ + req: req, + } + future.init() + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case r.configurationChangeCh <- future: + return future + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + } +} + +// run is a long running goroutine that runs the Raft FSM. +func (r *Raft) run() { + for { + // Check if we are doing a shutdown + select { + case <-r.shutdownCh: + // Clear the leader to prevent forwarding + r.setLeader("") + return + default: + } + + // Enter into a sub-FSM + switch r.getState() { + case Follower: + r.runFollower() + case Candidate: + r.runCandidate() + case Leader: + r.runLeader() + } + } +} + +// runFollower runs the FSM for a follower. +func (r *Raft) runFollower() { + didWarn := false + r.logger.Printf("[INFO] raft: %v entering Follower state (Leader: %q)", r, r.Leader()) + metrics.IncrCounter([]string{"raft", "state", "follower"}, 1) + heartbeatTimer := randomTimeout(r.conf.HeartbeatTimeout) + for { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case c := <-r.configurationChangeCh: + // Reject any operations since we are not the leader + c.respond(ErrNotLeader) + + case a := <-r.applyCh: + // Reject any operations since we are not the leader + a.respond(ErrNotLeader) + + case v := <-r.verifyCh: + // Reject any operations since we are not the leader + v.respond(ErrNotLeader) + + case c := <-r.configurationsCh: + c.configurations = r.configurations.Clone() + c.respond(nil) + + case b := <-r.bootstrapCh: + b.respond(r.liveBootstrap(b.configuration)) + + case <-heartbeatTimer: + // Restart the heartbeat timer + heartbeatTimer = randomTimeout(r.conf.HeartbeatTimeout) + + // Check if we have had a successful contact + lastContact := r.LastContact() + if time.Now().Sub(lastContact) < r.conf.HeartbeatTimeout { + continue + } + + // Heartbeat failed! Transition to the candidate state + lastLeader := r.Leader() + r.setLeader("") + + if r.configurations.latestIndex == 0 { + if !didWarn { + r.logger.Printf("[WARN] raft: no known peers, aborting election") + didWarn = true + } + } else if r.configurations.latestIndex == r.configurations.committedIndex && + !hasVote(r.configurations.latest, r.localID) { + if !didWarn { + r.logger.Printf("[WARN] raft: not part of stable configuration, aborting election") + didWarn = true + } + } else { + r.logger.Printf(`[WARN] raft: Heartbeat timeout from %q reached, starting election`, lastLeader) + metrics.IncrCounter([]string{"raft", "transition", "heartbeat_timeout"}, 1) + r.setState(Candidate) + return + } + + case <-r.shutdownCh: + return + } + } +} + +// liveBootstrap attempts to seed an initial configuration for the cluster. See +// the Raft object's member BootstrapCluster for more details. This must only be +// called on the main thread, and only makes sense in the follower state. +func (r *Raft) liveBootstrap(configuration Configuration) error { + // Use the pre-init API to make the static updates. + err := BootstrapCluster(&r.conf, r.logs, r.stable, r.snapshots, + r.trans, configuration) + if err != nil { + return err + } + + // Make the configuration live. + var entry Log + if err := r.logs.GetLog(1, &entry); err != nil { + panic(err) + } + r.setCurrentTerm(1) + r.setLastLog(entry.Index, entry.Term) + r.processConfigurationLogEntry(&entry) + return nil +} + +// runCandidate runs the FSM for a candidate. +func (r *Raft) runCandidate() { + r.logger.Printf("[INFO] raft: %v entering Candidate state in term %v", + r, r.getCurrentTerm()+1) + metrics.IncrCounter([]string{"raft", "state", "candidate"}, 1) + + // Start vote for us, and set a timeout + voteCh := r.electSelf() + electionTimer := randomTimeout(r.conf.ElectionTimeout) + + // Tally the votes, need a simple majority + grantedVotes := 0 + votesNeeded := r.quorumSize() + r.logger.Printf("[DEBUG] raft: Votes needed: %d", votesNeeded) + + for r.getState() == Candidate { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case vote := <-voteCh: + // Check if the term is greater than ours, bail + if vote.Term > r.getCurrentTerm() { + r.logger.Printf("[DEBUG] raft: Newer term discovered, fallback to follower") + r.setState(Follower) + r.setCurrentTerm(vote.Term) + return + } + + // Check if the vote is granted + if vote.Granted { + grantedVotes++ + r.logger.Printf("[DEBUG] raft: Vote granted from %s in term %v. Tally: %d", + vote.voterID, vote.Term, grantedVotes) + } + + // Check if we've become the leader + if grantedVotes >= votesNeeded { + r.logger.Printf("[INFO] raft: Election won. Tally: %d", grantedVotes) + r.setState(Leader) + r.setLeader(r.localAddr) + return + } + + case c := <-r.configurationChangeCh: + // Reject any operations since we are not the leader + c.respond(ErrNotLeader) + + case a := <-r.applyCh: + // Reject any operations since we are not the leader + a.respond(ErrNotLeader) + + case v := <-r.verifyCh: + // Reject any operations since we are not the leader + v.respond(ErrNotLeader) + + case c := <-r.configurationsCh: + c.configurations = r.configurations.Clone() + c.respond(nil) + + case b := <-r.bootstrapCh: + b.respond(ErrCantBootstrap) + + case <-electionTimer: + // Election failed! Restart the election. We simply return, + // which will kick us back into runCandidate + r.logger.Printf("[WARN] raft: Election timeout reached, restarting election") + return + + case <-r.shutdownCh: + return + } + } +} + +// runLeader runs the FSM for a leader. Do the setup here and drop into +// the leaderLoop for the hot loop. +func (r *Raft) runLeader() { + r.logger.Printf("[INFO] raft: %v entering Leader state", r) + metrics.IncrCounter([]string{"raft", "state", "leader"}, 1) + + // Notify that we are the leader + asyncNotifyBool(r.leaderCh, true) + + // Push to the notify channel if given + if notify := r.conf.NotifyCh; notify != nil { + select { + case notify <- true: + case <-r.shutdownCh: + } + } + + // Setup leader state + r.leaderState.commitCh = make(chan struct{}, 1) + r.leaderState.commitment = newCommitment(r.leaderState.commitCh, + r.configurations.latest, + r.getLastIndex()+1 /* first index that may be committed in this term */) + r.leaderState.inflight = list.New() + r.leaderState.replState = make(map[ServerID]*followerReplication) + r.leaderState.notify = make(map[*verifyFuture]struct{}) + r.leaderState.stepDown = make(chan struct{}, 1) + + // Cleanup state on step down + defer func() { + // Since we were the leader previously, we update our + // last contact time when we step down, so that we are not + // reporting a last contact time from before we were the + // leader. Otherwise, to a client it would seem our data + // is extremely stale. + r.setLastContact() + + // Stop replication + for _, p := range r.leaderState.replState { + close(p.stopCh) + } + + // Respond to all inflight operations + for e := r.leaderState.inflight.Front(); e != nil; e = e.Next() { + e.Value.(*logFuture).respond(ErrLeadershipLost) + } + + // Respond to any pending verify requests + for future := range r.leaderState.notify { + future.respond(ErrLeadershipLost) + } + + // Clear all the state + r.leaderState.commitCh = nil + r.leaderState.commitment = nil + r.leaderState.inflight = nil + r.leaderState.replState = nil + r.leaderState.notify = nil + r.leaderState.stepDown = nil + + // If we are stepping down for some reason, no known leader. + // We may have stepped down due to an RPC call, which would + // provide the leader, so we cannot always blank this out. + r.leaderLock.Lock() + if r.leader == r.localAddr { + r.leader = "" + } + r.leaderLock.Unlock() + + // Notify that we are not the leader + asyncNotifyBool(r.leaderCh, false) + + // Push to the notify channel if given + if notify := r.conf.NotifyCh; notify != nil { + select { + case notify <- false: + case <-r.shutdownCh: + // On shutdown, make a best effort but do not block + select { + case notify <- false: + default: + } + } + } + }() + + // Start a replication routine for each peer + r.startStopReplication() + + // Dispatch a no-op log entry first. This gets this leader up to the latest + // possible commit index, even in the absence of client commands. This used + // to append a configuration entry instead of a noop. However, that permits + // an unbounded number of uncommitted configurations in the log. We now + // maintain that there exists at most one uncommitted configuration entry in + // any log, so we have to do proper no-ops here. + noop := &logFuture{ + log: Log{ + Type: LogNoop, + }, + } + r.dispatchLogs([]*logFuture{noop}) + + // Sit in the leader loop until we step down + r.leaderLoop() +} + +// startStopReplication will set up state and start asynchronous replication to +// new peers, and stop replication to removed peers. Before removing a peer, +// it'll instruct the replication routines to try to replicate to the current +// index. This must only be called from the main thread. +func (r *Raft) startStopReplication() { + inConfig := make(map[ServerID]bool, len(r.configurations.latest.Servers)) + lastIdx := r.getLastIndex() + + // Start replication goroutines that need starting + for _, server := range r.configurations.latest.Servers { + if server.ID == r.localID { + continue + } + inConfig[server.ID] = true + if _, ok := r.leaderState.replState[server.ID]; !ok { + r.logger.Printf("[INFO] raft: Added peer %v, starting replication", server.ID) + s := &followerReplication{ + peer: server, + commitment: r.leaderState.commitment, + stopCh: make(chan uint64, 1), + triggerCh: make(chan struct{}, 1), + currentTerm: r.getCurrentTerm(), + nextIndex: lastIdx + 1, + lastContact: time.Now(), + notifyCh: make(chan struct{}, 1), + stepDown: r.leaderState.stepDown, + } + r.leaderState.replState[server.ID] = s + r.goFunc(func() { r.replicate(s) }) + asyncNotifyCh(s.triggerCh) + } + } + + // Stop replication goroutines that need stopping + for serverID, repl := range r.leaderState.replState { + if inConfig[serverID] { + continue + } + // Replicate up to lastIdx and stop + r.logger.Printf("[INFO] raft: Removed peer %v, stopping replication after %v", serverID, lastIdx) + repl.stopCh <- lastIdx + close(repl.stopCh) + delete(r.leaderState.replState, serverID) + } +} + +// configurationChangeChIfStable returns r.configurationChangeCh if it's safe +// to process requests from it, or nil otherwise. This must only be called +// from the main thread. +// +// Note that if the conditions here were to change outside of leaderLoop to take +// this from nil to non-nil, we would need leaderLoop to be kicked. +func (r *Raft) configurationChangeChIfStable() chan *configurationChangeFuture { + // Have to wait until: + // 1. The latest configuration is committed, and + // 2. This leader has committed some entry (the noop) in this term + // https://groups.google.com/forum/#!msg/raft-dev/t4xj6dJTP6E/d2D9LrWRza8J + if r.configurations.latestIndex == r.configurations.committedIndex && + r.getCommitIndex() >= r.leaderState.commitment.startIndex { + return r.configurationChangeCh + } + return nil +} + +// leaderLoop is the hot loop for a leader. It is invoked +// after all the various leader setup is done. +func (r *Raft) leaderLoop() { + // stepDown is used to track if there is an inflight log that + // would cause us to lose leadership (specifically a RemovePeer of + // ourselves). If this is the case, we must not allow any logs to + // be processed in parallel, otherwise we are basing commit on + // only a single peer (ourself) and replicating to an undefined set + // of peers. + stepDown := false + + lease := time.After(r.conf.LeaderLeaseTimeout) + for r.getState() == Leader { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case <-r.leaderState.stepDown: + r.setState(Follower) + + case <-r.leaderState.commitCh: + // Process the newly committed entries + oldCommitIndex := r.getCommitIndex() + commitIndex := r.leaderState.commitment.getCommitIndex() + r.setCommitIndex(commitIndex) + + if r.configurations.latestIndex > oldCommitIndex && + r.configurations.latestIndex <= commitIndex { + r.configurations.committed = r.configurations.latest + r.configurations.committedIndex = r.configurations.latestIndex + if !hasVote(r.configurations.committed, r.localID) { + stepDown = true + } + } + + for { + e := r.leaderState.inflight.Front() + if e == nil { + break + } + commitLog := e.Value.(*logFuture) + idx := commitLog.log.Index + if idx > commitIndex { + break + } + // Measure the commit time + metrics.MeasureSince([]string{"raft", "commitTime"}, commitLog.dispatch) + r.processLogs(idx, commitLog) + r.leaderState.inflight.Remove(e) + } + + if stepDown { + if r.conf.ShutdownOnRemove { + r.logger.Printf("[INFO] raft: Removed ourself, shutting down") + r.Shutdown() + } else { + r.logger.Printf("[INFO] raft: Removed ourself, transitioning to follower") + r.setState(Follower) + } + } + + case v := <-r.verifyCh: + if v.quorumSize == 0 { + // Just dispatched, start the verification + r.verifyLeader(v) + + } else if v.votes < v.quorumSize { + // Early return, means there must be a new leader + r.logger.Printf("[WARN] raft: New leader elected, stepping down") + r.setState(Follower) + delete(r.leaderState.notify, v) + v.respond(ErrNotLeader) + + } else { + // Quorum of members agree, we are still leader + delete(r.leaderState.notify, v) + v.respond(nil) + } + + case c := <-r.configurationsCh: + c.configurations = r.configurations.Clone() + c.respond(nil) + + case future := <-r.configurationChangeChIfStable(): + r.appendConfigurationEntry(future) + + case b := <-r.bootstrapCh: + b.respond(ErrCantBootstrap) + + case newLog := <-r.applyCh: + // Group commit, gather all the ready commits + ready := []*logFuture{newLog} + for i := 0; i < r.conf.MaxAppendEntries; i++ { + select { + case newLog := <-r.applyCh: + ready = append(ready, newLog) + default: + break + } + } + + // Dispatch the logs + if stepDown { + // we're in the process of stepping down as leader, don't process anything new + for i := range ready { + ready[i].respond(ErrNotLeader) + } + } else { + r.dispatchLogs(ready) + } + + case <-lease: + // Check if we've exceeded the lease, potentially stepping down + maxDiff := r.checkLeaderLease() + + // Next check interval should adjust for the last node we've + // contacted, without going negative + checkInterval := r.conf.LeaderLeaseTimeout - maxDiff + if checkInterval < minCheckInterval { + checkInterval = minCheckInterval + } + + // Renew the lease timer + lease = time.After(checkInterval) + + case <-r.shutdownCh: + return + } + } +} + +// verifyLeader must be called from the main thread for safety. +// Causes the followers to attempt an immediate heartbeat. +func (r *Raft) verifyLeader(v *verifyFuture) { + // Current leader always votes for self + v.votes = 1 + + // Set the quorum size, hot-path for single node + v.quorumSize = r.quorumSize() + if v.quorumSize == 1 { + v.respond(nil) + return + } + + // Track this request + v.notifyCh = r.verifyCh + r.leaderState.notify[v] = struct{}{} + + // Trigger immediate heartbeats + for _, repl := range r.leaderState.replState { + repl.notifyLock.Lock() + repl.notify = append(repl.notify, v) + repl.notifyLock.Unlock() + asyncNotifyCh(repl.notifyCh) + } +} + +// checkLeaderLease is used to check if we can contact a quorum of nodes +// within the last leader lease interval. If not, we need to step down, +// as we may have lost connectivity. Returns the maximum duration without +// contact. This must only be called from the main thread. +func (r *Raft) checkLeaderLease() time.Duration { + // Track contacted nodes, we can always contact ourself + contacted := 1 + + // Check each follower + var maxDiff time.Duration + now := time.Now() + for peer, f := range r.leaderState.replState { + diff := now.Sub(f.LastContact()) + if diff <= r.conf.LeaderLeaseTimeout { + contacted++ + if diff > maxDiff { + maxDiff = diff + } + } else { + // Log at least once at high value, then debug. Otherwise it gets very verbose. + if diff <= 3*r.conf.LeaderLeaseTimeout { + r.logger.Printf("[WARN] raft: Failed to contact %v in %v", peer, diff) + } else { + r.logger.Printf("[DEBUG] raft: Failed to contact %v in %v", peer, diff) + } + } + metrics.AddSample([]string{"raft", "leader", "lastContact"}, float32(diff/time.Millisecond)) + } + + // Verify we can contact a quorum + quorum := r.quorumSize() + if contacted < quorum { + r.logger.Printf("[WARN] raft: Failed to contact quorum of nodes, stepping down") + r.setState(Follower) + metrics.IncrCounter([]string{"raft", "transition", "leader_lease_timeout"}, 1) + } + return maxDiff +} + +// quorumSize is used to return the quorum size. This must only be called on +// the main thread. +// TODO: revisit usage +func (r *Raft) quorumSize() int { + voters := 0 + for _, server := range r.configurations.latest.Servers { + if server.Suffrage == Voter { + voters++ + } + } + return voters/2 + 1 +} + +// appendConfigurationEntry changes the configuration and adds a new +// configuration entry to the log. This must only be called from the +// main thread. +func (r *Raft) appendConfigurationEntry(future *configurationChangeFuture) { + configuration, err := nextConfiguration(r.configurations.latest, r.configurations.latestIndex, future.req) + if err != nil { + future.respond(err) + return + } + + r.logger.Printf("[INFO] raft: Updating configuration with %s (%v, %v) to %+v", + future.req.command, future.req.serverID, future.req.serverAddress, configuration.Servers) + + // In pre-ID compatibility mode we translate all configuration changes + // in to an old remove peer message, which can handle all supported + // cases for peer changes in the pre-ID world (adding and removing + // voters). Both add peer and remove peer log entries are handled + // similarly on old Raft servers, but remove peer does extra checks to + // see if a leader needs to step down. Since they both assert the full + // configuration, then we can safely call remove peer for everything. + if r.protocolVersion < 2 { + future.log = Log{ + Type: LogRemovePeerDeprecated, + Data: encodePeers(configuration, r.trans), + } + } else { + future.log = Log{ + Type: LogConfiguration, + Data: encodeConfiguration(configuration), + } + } + + r.dispatchLogs([]*logFuture{&future.logFuture}) + index := future.Index() + r.configurations.latest = configuration + r.configurations.latestIndex = index + r.leaderState.commitment.setConfiguration(configuration) + r.startStopReplication() +} + +// dispatchLog is called on the leader to push a log to disk, mark it +// as inflight and begin replication of it. +func (r *Raft) dispatchLogs(applyLogs []*logFuture) { + now := time.Now() + defer metrics.MeasureSince([]string{"raft", "leader", "dispatchLog"}, now) + + term := r.getCurrentTerm() + lastIndex := r.getLastIndex() + logs := make([]*Log, len(applyLogs)) + + for idx, applyLog := range applyLogs { + applyLog.dispatch = now + lastIndex++ + applyLog.log.Index = lastIndex + applyLog.log.Term = term + logs[idx] = &applyLog.log + r.leaderState.inflight.PushBack(applyLog) + } + + // Write the log entry locally + if err := r.logs.StoreLogs(logs); err != nil { + r.logger.Printf("[ERR] raft: Failed to commit logs: %v", err) + for _, applyLog := range applyLogs { + applyLog.respond(err) + } + r.setState(Follower) + return + } + r.leaderState.commitment.match(r.localID, lastIndex) + + // Update the last log since it's on disk now + r.setLastLog(lastIndex, term) + + // Notify the replicators of the new log + for _, f := range r.leaderState.replState { + asyncNotifyCh(f.triggerCh) + } +} + +// processLogs is used to apply all the committed entires that haven't been +// applied up to the given index limit. +// This can be called from both leaders and followers. +// Followers call this from AppendEntires, for n entires at a time, and always +// pass future=nil. +// Leaders call this once per inflight when entries are committed. They pass +// the future from inflights. +func (r *Raft) processLogs(index uint64, future *logFuture) { + // Reject logs we've applied already + lastApplied := r.getLastApplied() + if index <= lastApplied { + r.logger.Printf("[WARN] raft: Skipping application of old log: %d", index) + return + } + + // Apply all the preceding logs + for idx := r.getLastApplied() + 1; idx <= index; idx++ { + // Get the log, either from the future or from our log store + if future != nil && future.log.Index == idx { + r.processLog(&future.log, future) + + } else { + l := new(Log) + if err := r.logs.GetLog(idx, l); err != nil { + r.logger.Printf("[ERR] raft: Failed to get log at %d: %v", idx, err) + panic(err) + } + r.processLog(l, nil) + } + + // Update the lastApplied index and term + r.setLastApplied(idx) + } +} + +// processLog is invoked to process the application of a single committed log entry. +func (r *Raft) processLog(l *Log, future *logFuture) { + switch l.Type { + case LogBarrier: + // Barrier is handled by the FSM + fallthrough + + case LogCommand: + // Forward to the fsm handler + select { + case r.fsmCommitCh <- commitTuple{l, future}: + case <-r.shutdownCh: + if future != nil { + future.respond(ErrRaftShutdown) + } + } + + // Return so that the future is only responded to + // by the FSM handler when the application is done + return + + case LogConfiguration: + case LogAddPeerDeprecated: + case LogRemovePeerDeprecated: + case LogNoop: + // Ignore the no-op + + default: + panic(fmt.Errorf("unrecognized log type: %#v", l)) + } + + // Invoke the future if given + if future != nil { + future.respond(nil) + } +} + +// processRPC is called to handle an incoming RPC request. This must only be +// called from the main thread. +func (r *Raft) processRPC(rpc RPC) { + if err := r.checkRPCHeader(rpc); err != nil { + rpc.Respond(nil, err) + return + } + + switch cmd := rpc.Command.(type) { + case *AppendEntriesRequest: + r.appendEntries(rpc, cmd) + case *RequestVoteRequest: + r.requestVote(rpc, cmd) + case *InstallSnapshotRequest: + r.installSnapshot(rpc, cmd) + default: + r.logger.Printf("[ERR] raft: Got unexpected command: %#v", rpc.Command) + rpc.Respond(nil, fmt.Errorf("unexpected command")) + } +} + +// processHeartbeat is a special handler used just for heartbeat requests +// so that they can be fast-pathed if a transport supports it. This must only +// be called from the main thread. +func (r *Raft) processHeartbeat(rpc RPC) { + defer metrics.MeasureSince([]string{"raft", "rpc", "processHeartbeat"}, time.Now()) + + // Check if we are shutdown, just ignore the RPC + select { + case <-r.shutdownCh: + return + default: + } + + // Ensure we are only handling a heartbeat + switch cmd := rpc.Command.(type) { + case *AppendEntriesRequest: + r.appendEntries(rpc, cmd) + default: + r.logger.Printf("[ERR] raft: Expected heartbeat, got command: %#v", rpc.Command) + rpc.Respond(nil, fmt.Errorf("unexpected command")) + } +} + +// appendEntries is invoked when we get an append entries RPC call. This must +// only be called from the main thread. +func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "appendEntries"}, time.Now()) + // Setup a response + resp := &AppendEntriesResponse{ + RPCHeader: r.getRPCHeader(), + Term: r.getCurrentTerm(), + LastLog: r.getLastIndex(), + Success: false, + NoRetryBackoff: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Ignore an older term + if a.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one, also transition to follower + // if we ever get an appendEntries call + if a.Term > r.getCurrentTerm() || r.getState() != Follower { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(a.Term) + resp.Term = a.Term + } + + // Save the current leader + r.setLeader(ServerAddress(r.trans.DecodePeer(a.Leader))) + + // Verify the last log entry + if a.PrevLogEntry > 0 { + lastIdx, lastTerm := r.getLastEntry() + + var prevLogTerm uint64 + if a.PrevLogEntry == lastIdx { + prevLogTerm = lastTerm + + } else { + var prevLog Log + if err := r.logs.GetLog(a.PrevLogEntry, &prevLog); err != nil { + r.logger.Printf("[WARN] raft: Failed to get previous log: %d %v (last: %d)", + a.PrevLogEntry, err, lastIdx) + resp.NoRetryBackoff = true + return + } + prevLogTerm = prevLog.Term + } + + if a.PrevLogTerm != prevLogTerm { + r.logger.Printf("[WARN] raft: Previous log term mis-match: ours: %d remote: %d", + prevLogTerm, a.PrevLogTerm) + resp.NoRetryBackoff = true + return + } + } + + // Process any new entries + if len(a.Entries) > 0 { + start := time.Now() + + // Delete any conflicting entries, skip any duplicates + lastLogIdx, _ := r.getLastLog() + var newEntries []*Log + for i, entry := range a.Entries { + if entry.Index > lastLogIdx { + newEntries = a.Entries[i:] + break + } + var storeEntry Log + if err := r.logs.GetLog(entry.Index, &storeEntry); err != nil { + r.logger.Printf("[WARN] raft: Failed to get log entry %d: %v", + entry.Index, err) + return + } + if entry.Term != storeEntry.Term { + r.logger.Printf("[WARN] raft: Clearing log suffix from %d to %d", entry.Index, lastLogIdx) + if err := r.logs.DeleteRange(entry.Index, lastLogIdx); err != nil { + r.logger.Printf("[ERR] raft: Failed to clear log suffix: %v", err) + return + } + if entry.Index <= r.configurations.latestIndex { + r.configurations.latest = r.configurations.committed + r.configurations.latestIndex = r.configurations.committedIndex + } + newEntries = a.Entries[i:] + break + } + } + + if n := len(newEntries); n > 0 { + // Append the new entries + if err := r.logs.StoreLogs(newEntries); err != nil { + r.logger.Printf("[ERR] raft: Failed to append to logs: %v", err) + // TODO: leaving r.getLastLog() in the wrong + // state if there was a truncation above + return + } + + // Handle any new configuration changes + for _, newEntry := range newEntries { + r.processConfigurationLogEntry(newEntry) + } + + // Update the lastLog + last := newEntries[n-1] + r.setLastLog(last.Index, last.Term) + } + + metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "storeLogs"}, start) + } + + // Update the commit index + if a.LeaderCommitIndex > 0 && a.LeaderCommitIndex > r.getCommitIndex() { + start := time.Now() + idx := min(a.LeaderCommitIndex, r.getLastIndex()) + r.setCommitIndex(idx) + if r.configurations.latestIndex <= idx { + r.configurations.committed = r.configurations.latest + r.configurations.committedIndex = r.configurations.latestIndex + } + r.processLogs(idx, nil) + metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "processLogs"}, start) + } + + // Everything went well, set success + resp.Success = true + r.setLastContact() + return +} + +// processConfigurationLogEntry takes a log entry and updates the latest +// configuration if the entry results in a new configuration. This must only be +// called from the main thread, or from NewRaft() before any threads have begun. +func (r *Raft) processConfigurationLogEntry(entry *Log) { + if entry.Type == LogConfiguration { + r.configurations.committed = r.configurations.latest + r.configurations.committedIndex = r.configurations.latestIndex + r.configurations.latest = decodeConfiguration(entry.Data) + r.configurations.latestIndex = entry.Index + } else if entry.Type == LogAddPeerDeprecated || entry.Type == LogRemovePeerDeprecated { + r.configurations.committed = r.configurations.latest + r.configurations.committedIndex = r.configurations.latestIndex + r.configurations.latest = decodePeers(entry.Data, r.trans) + r.configurations.latestIndex = entry.Index + } +} + +// requestVote is invoked when we get an request vote RPC call. +func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "requestVote"}, time.Now()) + r.observe(*req) + + // Setup a response + resp := &RequestVoteResponse{ + RPCHeader: r.getRPCHeader(), + Term: r.getCurrentTerm(), + Granted: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Version 0 servers will panic unless the peers is present. It's only + // used on them to produce a warning message. + if r.protocolVersion < 2 { + resp.Peers = encodePeers(r.configurations.latest, r.trans) + } + + // Check if we have an existing leader [who's not the candidate] + candidate := r.trans.DecodePeer(req.Candidate) + if leader := r.Leader(); leader != "" && leader != candidate { + r.logger.Printf("[WARN] raft: Rejecting vote request from %v since we have a leader: %v", + candidate, leader) + return + } + + // Ignore an older term + if req.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one + if req.Term > r.getCurrentTerm() { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(req.Term) + resp.Term = req.Term + } + + // Check if we have voted yet + lastVoteTerm, err := r.stable.GetUint64(keyLastVoteTerm) + if err != nil && err.Error() != "not found" { + r.logger.Printf("[ERR] raft: Failed to get last vote term: %v", err) + return + } + lastVoteCandBytes, err := r.stable.Get(keyLastVoteCand) + if err != nil && err.Error() != "not found" { + r.logger.Printf("[ERR] raft: Failed to get last vote candidate: %v", err) + return + } + + // Check if we've voted in this election before + if lastVoteTerm == req.Term && lastVoteCandBytes != nil { + r.logger.Printf("[INFO] raft: Duplicate RequestVote for same term: %d", req.Term) + if bytes.Compare(lastVoteCandBytes, req.Candidate) == 0 { + r.logger.Printf("[WARN] raft: Duplicate RequestVote from candidate: %s", req.Candidate) + resp.Granted = true + } + return + } + + // Reject if their term is older + lastIdx, lastTerm := r.getLastEntry() + if lastTerm > req.LastLogTerm { + r.logger.Printf("[WARN] raft: Rejecting vote request from %v since our last term is greater (%d, %d)", + candidate, lastTerm, req.LastLogTerm) + return + } + + if lastTerm == req.LastLogTerm && lastIdx > req.LastLogIndex { + r.logger.Printf("[WARN] raft: Rejecting vote request from %v since our last index is greater (%d, %d)", + candidate, lastIdx, req.LastLogIndex) + return + } + + // Persist a vote for safety + if err := r.persistVote(req.Term, req.Candidate); err != nil { + r.logger.Printf("[ERR] raft: Failed to persist vote: %v", err) + return + } + + resp.Granted = true + r.setLastContact() + return +} + +// installSnapshot is invoked when we get a InstallSnapshot RPC call. +// We must be in the follower state for this, since it means we are +// too far behind a leader for log replay. This must only be called +// from the main thread. +func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "installSnapshot"}, time.Now()) + // Setup a response + resp := &InstallSnapshotResponse{ + Term: r.getCurrentTerm(), + Success: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Sanity check the version + if req.SnapshotVersion < SnapshotVersionMin || + req.SnapshotVersion > SnapshotVersionMax { + rpcErr = fmt.Errorf("unsupported snapshot version %d", req.SnapshotVersion) + return + } + + // Ignore an older term + if req.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one + if req.Term > r.getCurrentTerm() { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(req.Term) + resp.Term = req.Term + } + + // Save the current leader + r.setLeader(ServerAddress(r.trans.DecodePeer(req.Leader))) + + // Create a new snapshot + var reqConfiguration Configuration + var reqConfigurationIndex uint64 + if req.SnapshotVersion > 0 { + reqConfiguration = decodeConfiguration(req.Configuration) + reqConfigurationIndex = req.ConfigurationIndex + } else { + reqConfiguration = decodePeers(req.Peers, r.trans) + reqConfigurationIndex = req.LastLogIndex + } + version := getSnapshotVersion(r.protocolVersion) + sink, err := r.snapshots.Create(version, req.LastLogIndex, req.LastLogTerm, + reqConfiguration, reqConfigurationIndex, r.trans) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to create snapshot to install: %v", err) + rpcErr = fmt.Errorf("failed to create snapshot: %v", err) + return + } + + // Spill the remote snapshot to disk + n, err := io.Copy(sink, rpc.Reader) + if err != nil { + sink.Cancel() + r.logger.Printf("[ERR] raft: Failed to copy snapshot: %v", err) + rpcErr = err + return + } + + // Check that we received it all + if n != req.Size { + sink.Cancel() + r.logger.Printf("[ERR] raft: Failed to receive whole snapshot: %d / %d", n, req.Size) + rpcErr = fmt.Errorf("short read") + return + } + + // Finalize the snapshot + if err := sink.Close(); err != nil { + r.logger.Printf("[ERR] raft: Failed to finalize snapshot: %v", err) + rpcErr = err + return + } + r.logger.Printf("[INFO] raft: Copied %d bytes to local snapshot", n) + + // Restore snapshot + future := &restoreFuture{ID: sink.ID()} + future.init() + select { + case r.fsmRestoreCh <- future: + case <-r.shutdownCh: + future.respond(ErrRaftShutdown) + return + } + + // Wait for the restore to happen + if err := future.Error(); err != nil { + r.logger.Printf("[ERR] raft: Failed to restore snapshot: %v", err) + rpcErr = err + return + } + + // Update the lastApplied so we don't replay old logs + r.setLastApplied(req.LastLogIndex) + + // Update the last stable snapshot info + r.setLastSnapshot(req.LastLogIndex, req.LastLogTerm) + + // Restore the peer set + r.configurations.latest = reqConfiguration + r.configurations.latestIndex = reqConfigurationIndex + r.configurations.committed = reqConfiguration + r.configurations.committedIndex = reqConfigurationIndex + + // Compact logs, continue even if this fails + if err := r.compactLogs(req.LastLogIndex); err != nil { + r.logger.Printf("[ERR] raft: Failed to compact logs: %v", err) + } + + r.logger.Printf("[INFO] raft: Installed remote snapshot") + resp.Success = true + r.setLastContact() + return +} + +// setLastContact is used to set the last contact time to now +func (r *Raft) setLastContact() { + r.lastContactLock.Lock() + r.lastContact = time.Now() + r.lastContactLock.Unlock() +} + +type voteResult struct { + RequestVoteResponse + voterID ServerID +} + +// electSelf is used to send a RequestVote RPC to all peers, and vote for +// ourself. This has the side affecting of incrementing the current term. The +// response channel returned is used to wait for all the responses (including a +// vote for ourself). This must only be called from the main thread. +func (r *Raft) electSelf() <-chan *voteResult { + // Create a response channel + respCh := make(chan *voteResult, len(r.configurations.latest.Servers)) + + // Increment the term + r.setCurrentTerm(r.getCurrentTerm() + 1) + + // Construct the request + lastIdx, lastTerm := r.getLastEntry() + req := &RequestVoteRequest{ + RPCHeader: r.getRPCHeader(), + Term: r.getCurrentTerm(), + Candidate: r.trans.EncodePeer(r.localAddr), + LastLogIndex: lastIdx, + LastLogTerm: lastTerm, + } + + // Construct a function to ask for a vote + askPeer := func(peer Server) { + r.goFunc(func() { + defer metrics.MeasureSince([]string{"raft", "candidate", "electSelf"}, time.Now()) + resp := &voteResult{voterID: peer.ID} + err := r.trans.RequestVote(peer.Address, req, &resp.RequestVoteResponse) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to make RequestVote RPC to %v: %v", peer, err) + resp.Term = req.Term + resp.Granted = false + } + respCh <- resp + }) + } + + // For each peer, request a vote + for _, server := range r.configurations.latest.Servers { + if server.Suffrage == Voter { + if server.ID == r.localID { + // Persist a vote for ourselves + if err := r.persistVote(req.Term, req.Candidate); err != nil { + r.logger.Printf("[ERR] raft: Failed to persist vote : %v", err) + return nil + } + // Include our own vote + respCh <- &voteResult{ + RequestVoteResponse: RequestVoteResponse{ + RPCHeader: r.getRPCHeader(), + Term: req.Term, + Granted: true, + }, + voterID: r.localID, + } + } else { + askPeer(server) + } + } + } + + return respCh +} + +// persistVote is used to persist our vote for safety. +func (r *Raft) persistVote(term uint64, candidate []byte) error { + if err := r.stable.SetUint64(keyLastVoteTerm, term); err != nil { + return err + } + if err := r.stable.Set(keyLastVoteCand, candidate); err != nil { + return err + } + return nil +} + +// setCurrentTerm is used to set the current term in a durable manner. +func (r *Raft) setCurrentTerm(t uint64) { + // Persist to disk first + if err := r.stable.SetUint64(keyCurrentTerm, t); err != nil { + panic(fmt.Errorf("failed to save current term: %v", err)) + } + r.raftState.setCurrentTerm(t) +} + +// setState is used to update the current state. Any state +// transition causes the known leader to be cleared. This means +// that leader should be set only after updating the state. +func (r *Raft) setState(state RaftState) { + r.setLeader("") + oldState := r.raftState.getState() + r.raftState.setState(state) + if oldState != state { + r.observe(state) + } +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/replication.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/replication.go new file mode 100644 index 00000000000..68392734397 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/replication.go @@ -0,0 +1,561 @@ +package raft + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/armon/go-metrics" +) + +const ( + maxFailureScale = 12 + failureWait = 10 * time.Millisecond +) + +var ( + // ErrLogNotFound indicates a given log entry is not available. + ErrLogNotFound = errors.New("log not found") + + // ErrPipelineReplicationNotSupported can be returned by the transport to + // signal that pipeline replication is not supported in general, and that + // no error message should be produced. + ErrPipelineReplicationNotSupported = errors.New("pipeline replication not supported") +) + +// followerReplication is in charge of sending snapshots and log entries from +// this leader during this particular term to a remote follower. +type followerReplication struct { + // peer contains the network address and ID of the remote follower. + peer Server + + // commitment tracks the entries acknowledged by followers so that the + // leader's commit index can advance. It is updated on successsful + // AppendEntries responses. + commitment *commitment + + // stopCh is notified/closed when this leader steps down or the follower is + // removed from the cluster. In the follower removed case, it carries a log + // index; replication should be attempted with a best effort up through that + // index, before exiting. + stopCh chan uint64 + // triggerCh is notified every time new entries are appended to the log. + triggerCh chan struct{} + + // currentTerm is the term of this leader, to be included in AppendEntries + // requests. + currentTerm uint64 + // nextIndex is the index of the next log entry to send to the follower, + // which may fall past the end of the log. + nextIndex uint64 + + // lastContact is updated to the current time whenever any response is + // received from the follower (successful or not). This is used to check + // whether the leader should step down (Raft.checkLeaderLease()). + lastContact time.Time + // lastContactLock protects 'lastContact'. + lastContactLock sync.RWMutex + + // failures counts the number of failed RPCs since the last success, which is + // used to apply backoff. + failures uint64 + + // notifyCh is notified to send out a heartbeat, which is used to check that + // this server is still leader. + notifyCh chan struct{} + // notify is a list of futures to be resolved upon receipt of an + // acknowledgement, then cleared from this list. + notify []*verifyFuture + // notifyLock protects 'notify'. + notifyLock sync.Mutex + + // stepDown is used to indicate to the leader that we + // should step down based on information from a follower. + stepDown chan struct{} + + // allowPipeline is used to determine when to pipeline the AppendEntries RPCs. + // It is private to this replication goroutine. + allowPipeline bool +} + +// notifyAll is used to notify all the waiting verify futures +// if the follower believes we are still the leader. +func (s *followerReplication) notifyAll(leader bool) { + // Clear the waiting notifies minimizing lock time + s.notifyLock.Lock() + n := s.notify + s.notify = nil + s.notifyLock.Unlock() + + // Submit our votes + for _, v := range n { + v.vote(leader) + } +} + +// LastContact returns the time of last contact. +func (s *followerReplication) LastContact() time.Time { + s.lastContactLock.RLock() + last := s.lastContact + s.lastContactLock.RUnlock() + return last +} + +// setLastContact sets the last contact to the current time. +func (s *followerReplication) setLastContact() { + s.lastContactLock.Lock() + s.lastContact = time.Now() + s.lastContactLock.Unlock() +} + +// replicate is a long running routine that replicates log entries to a single +// follower. +func (r *Raft) replicate(s *followerReplication) { + // Start an async heartbeating routing + stopHeartbeat := make(chan struct{}) + defer close(stopHeartbeat) + r.goFunc(func() { r.heartbeat(s, stopHeartbeat) }) + +RPC: + shouldStop := false + for !shouldStop { + select { + case maxIndex := <-s.stopCh: + // Make a best effort to replicate up to this index + if maxIndex > 0 { + r.replicateTo(s, maxIndex) + } + return + case <-s.triggerCh: + lastLogIdx, _ := r.getLastLog() + shouldStop = r.replicateTo(s, lastLogIdx) + case <-randomTimeout(r.conf.CommitTimeout): // TODO: what is this? + lastLogIdx, _ := r.getLastLog() + shouldStop = r.replicateTo(s, lastLogIdx) + } + + // If things looks healthy, switch to pipeline mode + if !shouldStop && s.allowPipeline { + goto PIPELINE + } + } + return + +PIPELINE: + // Disable until re-enabled + s.allowPipeline = false + + // Replicates using a pipeline for high performance. This method + // is not able to gracefully recover from errors, and so we fall back + // to standard mode on failure. + if err := r.pipelineReplicate(s); err != nil { + if err != ErrPipelineReplicationNotSupported { + r.logger.Printf("[ERR] raft: Failed to start pipeline replication to %s: %s", s.peer, err) + } + } + goto RPC +} + +// replicateTo is a hepler to replicate(), used to replicate the logs up to a +// given last index. +// If the follower log is behind, we take care to bring them up to date. +func (r *Raft) replicateTo(s *followerReplication, lastIndex uint64) (shouldStop bool) { + // Create the base request + var req AppendEntriesRequest + var resp AppendEntriesResponse + var start time.Time +START: + // Prevent an excessive retry rate on errors + if s.failures > 0 { + select { + case <-time.After(backoff(failureWait, s.failures, maxFailureScale)): + case <-r.shutdownCh: + } + } + + // Setup the request + if err := r.setupAppendEntries(s, &req, s.nextIndex, lastIndex); err == ErrLogNotFound { + goto SEND_SNAP + } else if err != nil { + return + } + + // Make the RPC call + start = time.Now() + if err := r.trans.AppendEntries(s.peer.Address, &req, &resp); err != nil { + r.logger.Printf("[ERR] raft: Failed to AppendEntries to %v: %v", s.peer, err) + s.failures++ + return + } + appendStats(string(s.peer.ID), start, float32(len(req.Entries))) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return true + } + + // Update the last contact + s.setLastContact() + + // Update s based on success + if resp.Success { + // Update our replication state + updateLastAppended(s, &req) + + // Clear any failures, allow pipelining + s.failures = 0 + s.allowPipeline = true + } else { + s.nextIndex = max(min(s.nextIndex-1, resp.LastLog+1), 1) + if resp.NoRetryBackoff { + s.failures = 0 + } else { + s.failures++ + } + r.logger.Printf("[WARN] raft: AppendEntries to %v rejected, sending older logs (next: %d)", s.peer, s.nextIndex) + } + +CHECK_MORE: + // Poll the stop channel here in case we are looping and have been asked + // to stop, or have stepped down as leader. Even for the best effort case + // where we are asked to replicate to a given index and then shutdown, + // it's better to not loop in here to send lots of entries to a straggler + // that's leaving the cluster anyways. + select { + case <-s.stopCh: + return true + default: + } + + // Check if there are more logs to replicate + if s.nextIndex <= lastIndex { + goto START + } + return + + // SEND_SNAP is used when we fail to get a log, usually because the follower + // is too far behind, and we must ship a snapshot down instead +SEND_SNAP: + if stop, err := r.sendLatestSnapshot(s); stop { + return true + } else if err != nil { + r.logger.Printf("[ERR] raft: Failed to send snapshot to %v: %v", s.peer, err) + return + } + + // Check if there is more to replicate + goto CHECK_MORE +} + +// sendLatestSnapshot is used to send the latest snapshot we have +// down to our follower. +func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) { + // Get the snapshots + snapshots, err := r.snapshots.List() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to list snapshots: %v", err) + return false, err + } + + // Check we have at least a single snapshot + if len(snapshots) == 0 { + return false, fmt.Errorf("no snapshots found") + } + + // Open the most recent snapshot + snapID := snapshots[0].ID + meta, snapshot, err := r.snapshots.Open(snapID) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to open snapshot %v: %v", snapID, err) + return false, err + } + defer snapshot.Close() + + // Setup the request + req := InstallSnapshotRequest{ + RPCHeader: r.getRPCHeader(), + SnapshotVersion: meta.Version, + Term: s.currentTerm, + Leader: r.trans.EncodePeer(r.localAddr), + LastLogIndex: meta.Index, + LastLogTerm: meta.Term, + Peers: meta.Peers, + Size: meta.Size, + Configuration: encodeConfiguration(meta.Configuration), + ConfigurationIndex: meta.ConfigurationIndex, + } + + // Make the call + start := time.Now() + var resp InstallSnapshotResponse + if err := r.trans.InstallSnapshot(s.peer.Address, &req, &resp, snapshot); err != nil { + r.logger.Printf("[ERR] raft: Failed to install snapshot %v: %v", snapID, err) + s.failures++ + return false, err + } + metrics.MeasureSince([]string{"raft", "replication", "installSnapshot", string(s.peer.ID)}, start) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return true, nil + } + + // Update the last contact + s.setLastContact() + + // Check for success + if resp.Success { + // Update the indexes + s.nextIndex = meta.Index + 1 + s.commitment.match(s.peer.ID, meta.Index) + + // Clear any failures + s.failures = 0 + + // Notify we are still leader + s.notifyAll(true) + } else { + s.failures++ + r.logger.Printf("[WARN] raft: InstallSnapshot to %v rejected", s.peer) + } + return false, nil +} + +// heartbeat is used to periodically invoke AppendEntries on a peer +// to ensure they don't time out. This is done async of replicate(), +// since that routine could potentially be blocked on disk IO. +func (r *Raft) heartbeat(s *followerReplication, stopCh chan struct{}) { + var failures uint64 + req := AppendEntriesRequest{ + RPCHeader: r.getRPCHeader(), + Term: s.currentTerm, + Leader: r.trans.EncodePeer(r.localAddr), + } + var resp AppendEntriesResponse + for { + // Wait for the next heartbeat interval or forced notify + select { + case <-s.notifyCh: + case <-randomTimeout(r.conf.HeartbeatTimeout / 10): + case <-stopCh: + return + } + + start := time.Now() + if err := r.trans.AppendEntries(s.peer.Address, &req, &resp); err != nil { + r.logger.Printf("[ERR] raft: Failed to heartbeat to %v: %v", s.peer.Address, err) + failures++ + select { + case <-time.After(backoff(failureWait, failures, maxFailureScale)): + case <-stopCh: + } + } else { + s.setLastContact() + failures = 0 + metrics.MeasureSince([]string{"raft", "replication", "heartbeat", string(s.peer.ID)}, start) + s.notifyAll(resp.Success) + } + } +} + +// pipelineReplicate is used when we have synchronized our state with the follower, +// and want to switch to a higher performance pipeline mode of replication. +// We only pipeline AppendEntries commands, and if we ever hit an error, we fall +// back to the standard replication which can handle more complex situations. +func (r *Raft) pipelineReplicate(s *followerReplication) error { + // Create a new pipeline + pipeline, err := r.trans.AppendEntriesPipeline(s.peer.Address) + if err != nil { + return err + } + defer pipeline.Close() + + // Log start and stop of pipeline + r.logger.Printf("[INFO] raft: pipelining replication to peer %v", s.peer) + defer r.logger.Printf("[INFO] raft: aborting pipeline replication to peer %v", s.peer) + + // Create a shutdown and finish channel + stopCh := make(chan struct{}) + finishCh := make(chan struct{}) + + // Start a dedicated decoder + r.goFunc(func() { r.pipelineDecode(s, pipeline, stopCh, finishCh) }) + + // Start pipeline sends at the last good nextIndex + nextIndex := s.nextIndex + + shouldStop := false +SEND: + for !shouldStop { + select { + case <-finishCh: + break SEND + case maxIndex := <-s.stopCh: + // Make a best effort to replicate up to this index + if maxIndex > 0 { + r.pipelineSend(s, pipeline, &nextIndex, maxIndex) + } + break SEND + case <-s.triggerCh: + lastLogIdx, _ := r.getLastLog() + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx) + case <-randomTimeout(r.conf.CommitTimeout): + lastLogIdx, _ := r.getLastLog() + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx) + } + } + + // Stop our decoder, and wait for it to finish + close(stopCh) + select { + case <-finishCh: + case <-r.shutdownCh: + } + return nil +} + +// pipelineSend is used to send data over a pipeline. It is a helper to +// pipelineReplicate. +func (r *Raft) pipelineSend(s *followerReplication, p AppendPipeline, nextIdx *uint64, lastIndex uint64) (shouldStop bool) { + // Create a new append request + req := new(AppendEntriesRequest) + if err := r.setupAppendEntries(s, req, *nextIdx, lastIndex); err != nil { + return true + } + + // Pipeline the append entries + if _, err := p.AppendEntries(req, new(AppendEntriesResponse)); err != nil { + r.logger.Printf("[ERR] raft: Failed to pipeline AppendEntries to %v: %v", s.peer, err) + return true + } + + // Increase the next send log to avoid re-sending old logs + if n := len(req.Entries); n > 0 { + last := req.Entries[n-1] + *nextIdx = last.Index + 1 + } + return false +} + +// pipelineDecode is used to decode the responses of pipelined requests. +func (r *Raft) pipelineDecode(s *followerReplication, p AppendPipeline, stopCh, finishCh chan struct{}) { + defer close(finishCh) + respCh := p.Consumer() + for { + select { + case ready := <-respCh: + req, resp := ready.Request(), ready.Response() + appendStats(string(s.peer.ID), ready.Start(), float32(len(req.Entries))) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return + } + + // Update the last contact + s.setLastContact() + + // Abort pipeline if not successful + if !resp.Success { + return + } + + // Update our replication state + updateLastAppended(s, req) + case <-stopCh: + return + } + } +} + +// setupAppendEntries is used to setup an append entries request. +func (r *Raft) setupAppendEntries(s *followerReplication, req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { + req.RPCHeader = r.getRPCHeader() + req.Term = s.currentTerm + req.Leader = r.trans.EncodePeer(r.localAddr) + req.LeaderCommitIndex = r.getCommitIndex() + if err := r.setPreviousLog(req, nextIndex); err != nil { + return err + } + if err := r.setNewLogs(req, nextIndex, lastIndex); err != nil { + return err + } + return nil +} + +// setPreviousLog is used to setup the PrevLogEntry and PrevLogTerm for an +// AppendEntriesRequest given the next index to replicate. +func (r *Raft) setPreviousLog(req *AppendEntriesRequest, nextIndex uint64) error { + // Guard for the first index, since there is no 0 log entry + // Guard against the previous index being a snapshot as well + lastSnapIdx, lastSnapTerm := r.getLastSnapshot() + if nextIndex == 1 { + req.PrevLogEntry = 0 + req.PrevLogTerm = 0 + + } else if (nextIndex - 1) == lastSnapIdx { + req.PrevLogEntry = lastSnapIdx + req.PrevLogTerm = lastSnapTerm + + } else { + var l Log + if err := r.logs.GetLog(nextIndex-1, &l); err != nil { + r.logger.Printf("[ERR] raft: Failed to get log at index %d: %v", + nextIndex-1, err) + return err + } + + // Set the previous index and term (0 if nextIndex is 1) + req.PrevLogEntry = l.Index + req.PrevLogTerm = l.Term + } + return nil +} + +// setNewLogs is used to setup the logs which should be appended for a request. +func (r *Raft) setNewLogs(req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { + // Append up to MaxAppendEntries or up to the lastIndex + req.Entries = make([]*Log, 0, r.conf.MaxAppendEntries) + maxIndex := min(nextIndex+uint64(r.conf.MaxAppendEntries)-1, lastIndex) + for i := nextIndex; i <= maxIndex; i++ { + oldLog := new(Log) + if err := r.logs.GetLog(i, oldLog); err != nil { + r.logger.Printf("[ERR] raft: Failed to get log at index %d: %v", i, err) + return err + } + req.Entries = append(req.Entries, oldLog) + } + return nil +} + +// appendStats is used to emit stats about an AppendEntries invocation. +func appendStats(peer string, start time.Time, logs float32) { + metrics.MeasureSince([]string{"raft", "replication", "appendEntries", "rpc", peer}, start) + metrics.IncrCounter([]string{"raft", "replication", "appendEntries", "logs", peer}, logs) +} + +// handleStaleTerm is used when a follower indicates that we have a stale term. +func (r *Raft) handleStaleTerm(s *followerReplication) { + r.logger.Printf("[ERR] raft: peer %v has newer term, stopping replication", s.peer) + s.notifyAll(false) // No longer leader + asyncNotifyCh(s.stepDown) +} + +// updateLastAppended is used to update follower replication state after a +// successful AppendEntries RPC. +// TODO: This isn't used during InstallSnapshot, but the code there is similar. +func updateLastAppended(s *followerReplication, req *AppendEntriesRequest) { + // Mark any inflight logs as committed + if logs := req.Entries; len(logs) > 0 { + last := logs[len(logs)-1] + s.nextIndex = last.Index + 1 + s.commitment.match(s.peer.ID, last.Index) + } + + // Notify still leader + s.notifyAll(true) +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/snapshot.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/snapshot.go new file mode 100644 index 00000000000..8402e093807 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/snapshot.go @@ -0,0 +1,234 @@ +package raft + +import ( + "fmt" + "io" + "time" + + "github.com/armon/go-metrics" +) + +// SnapshotMeta is for metadata of a snapshot. +type SnapshotMeta struct { + // Version is the version number of the snapshot metadata. This does not cover + // the application's data in the snapshot, that should be versioned + // separately. + Version SnapshotVersion + + // ID is opaque to the store, and is used for opening. + ID string + + // Index and Term store when the snapshot was taken. + Index uint64 + Term uint64 + + // Peers is deprecated and used to support version 0 snapshots, but will + // be populated in version 1 snapshots as well to help with upgrades. + Peers []byte + + // Configuration and ConfigurationIndex are present in version 1 + // snapshots and later. + Configuration Configuration + ConfigurationIndex uint64 + + // Size is the size of the snapshot in bytes. + Size int64 +} + +// SnapshotStore interface is used to allow for flexible implementations +// of snapshot storage and retrieval. For example, a client could implement +// a shared state store such as S3, allowing new nodes to restore snapshots +// without streaming from the leader. +type SnapshotStore interface { + // Create is used to begin a snapshot at a given index and term, and with + // the given committed configuration. The version parameter controls + // which snapshot version to create. + Create(version SnapshotVersion, index, term uint64, configuration Configuration, + configurationIndex uint64, trans Transport) (SnapshotSink, error) + + // List is used to list the available snapshots in the store. + // It should return then in descending order, with the highest index first. + List() ([]*SnapshotMeta, error) + + // Open takes a snapshot ID and provides a ReadCloser. Once close is + // called it is assumed the snapshot is no longer needed. + Open(id string) (*SnapshotMeta, io.ReadCloser, error) +} + +// SnapshotSink is returned by StartSnapshot. The FSM will Write state +// to the sink and call Close on completion. On error, Cancel will be invoked. +type SnapshotSink interface { + io.WriteCloser + ID() string + Cancel() error +} + +// runSnapshots is a long running goroutine used to manage taking +// new snapshots of the FSM. It runs in parallel to the FSM and +// main goroutines, so that snapshots do not block normal operation. +func (r *Raft) runSnapshots() { + for { + select { + case <-randomTimeout(r.conf.SnapshotInterval): + // Check if we should snapshot + if !r.shouldSnapshot() { + continue + } + + // Trigger a snapshot + if err := r.takeSnapshot(); err != nil { + r.logger.Printf("[ERR] raft: Failed to take snapshot: %v", err) + } + + case future := <-r.snapshotCh: + // User-triggered, run immediately + err := r.takeSnapshot() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to take snapshot: %v", err) + } + future.respond(err) + + case <-r.shutdownCh: + return + } + } +} + +// shouldSnapshot checks if we meet the conditions to take +// a new snapshot. +func (r *Raft) shouldSnapshot() bool { + // Check the last snapshot index + lastSnap, _ := r.getLastSnapshot() + + // Check the last log index + lastIdx, err := r.logs.LastIndex() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to get last log index: %v", err) + return false + } + + // Compare the delta to the threshold + delta := lastIdx - lastSnap + return delta >= r.conf.SnapshotThreshold +} + +// takeSnapshot is used to take a new snapshot. This must only be called from +// the snapshot thread, never the main thread. +func (r *Raft) takeSnapshot() error { + defer metrics.MeasureSince([]string{"raft", "snapshot", "takeSnapshot"}, time.Now()) + + // Create a request for the FSM to perform a snapshot. + snapReq := &reqSnapshotFuture{} + snapReq.init() + + // Wait for dispatch or shutdown. + select { + case r.fsmSnapshotCh <- snapReq: + case <-r.shutdownCh: + return ErrRaftShutdown + } + + // Wait until we get a response + if err := snapReq.Error(); err != nil { + if err != ErrNothingNewToSnapshot { + err = fmt.Errorf("failed to start snapshot: %v", err) + } + return err + } + defer snapReq.snapshot.Release() + + // Make a request for the configurations and extract the committed info. + // We have to use the future here to safely get this information since + // it is owned by the main thread. + configReq := &configurationsFuture{} + configReq.init() + select { + case r.configurationsCh <- configReq: + case <-r.shutdownCh: + return ErrRaftShutdown + } + if err := configReq.Error(); err != nil { + return err + } + committed := configReq.configurations.committed + committedIndex := configReq.configurations.committedIndex + + // We don't support snapshots while there's a config change outstanding + // since the snapshot doesn't have a means to represent this state. This + // is a little weird because we need the FSM to apply an index that's + // past the configuration change, even though the FSM itself doesn't see + // the configuration changes. It should be ok in practice with normal + // application traffic flowing through the FSM. If there's none of that + // then it's not crucial that we snapshot, since there's not much going + // on Raft-wise. + if snapReq.index < committedIndex { + return fmt.Errorf("cannot take snapshot now, wait until the configuration entry at %v has been applied (have applied %v)", + committedIndex, snapReq.index) + } + + // Create a new snapshot. + r.logger.Printf("[INFO] raft: Starting snapshot up to %d", snapReq.index) + start := time.Now() + version := getSnapshotVersion(r.protocolVersion) + sink, err := r.snapshots.Create(version, snapReq.index, snapReq.term, committed, committedIndex, r.trans) + if err != nil { + return fmt.Errorf("failed to create snapshot: %v", err) + } + metrics.MeasureSince([]string{"raft", "snapshot", "create"}, start) + + // Try to persist the snapshot. + start = time.Now() + if err := snapReq.snapshot.Persist(sink); err != nil { + sink.Cancel() + return fmt.Errorf("failed to persist snapshot: %v", err) + } + metrics.MeasureSince([]string{"raft", "snapshot", "persist"}, start) + + // Close and check for error. + if err := sink.Close(); err != nil { + return fmt.Errorf("failed to close snapshot: %v", err) + } + + // Update the last stable snapshot info. + r.setLastSnapshot(snapReq.index, snapReq.term) + + // Compact the logs. + if err := r.compactLogs(snapReq.index); err != nil { + return err + } + + r.logger.Printf("[INFO] raft: Snapshot to %d complete", snapReq.index) + return nil +} + +// compactLogs takes the last inclusive index of a snapshot +// and trims the logs that are no longer needed. +func (r *Raft) compactLogs(snapIdx uint64) error { + defer metrics.MeasureSince([]string{"raft", "compactLogs"}, time.Now()) + // Determine log ranges to compact + minLog, err := r.logs.FirstIndex() + if err != nil { + return fmt.Errorf("failed to get first log index: %v", err) + } + + // Check if we have enough logs to truncate + lastLogIdx, _ := r.getLastLog() + if lastLogIdx <= r.conf.TrailingLogs { + return nil + } + + // Truncate up to the end of the snapshot, or `TrailingLogs` + // back from the head, which ever is further back. This ensures + // at least `TrailingLogs` entries, but does not allow logs + // after the snapshot to be removed. + maxLog := min(snapIdx, lastLogIdx-r.conf.TrailingLogs) + + // Log this + r.logger.Printf("[INFO] raft: Compacting logs from %d to %d", minLog, maxLog) + + // Compact the logs + if err := r.logs.DeleteRange(minLog, maxLog); err != nil { + return fmt.Errorf("log compaction failed: %v", err) + } + return nil +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/stable.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/stable.go new file mode 100644 index 00000000000..ff59a8c570a --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/stable.go @@ -0,0 +1,15 @@ +package raft + +// StableStore is used to provide stable storage +// of key configurations to ensure safety. +type StableStore interface { + Set(key []byte, val []byte) error + + // Get returns the value for key, or an empty byte slice if key was not found. + Get(key []byte) ([]byte, error) + + SetUint64(key []byte, val uint64) error + + // GetUint64 returns the uint64 value for key, or 0 if key was not found. + GetUint64(key []byte) (uint64, error) +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/state.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/state.go new file mode 100644 index 00000000000..f6d658b8bb4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/state.go @@ -0,0 +1,167 @@ +package raft + +import ( + "sync" + "sync/atomic" +) + +// RaftState captures the state of a Raft node: Follower, Candidate, Leader, +// or Shutdown. +type RaftState uint32 + +const ( + // Follower is the initial state of a Raft node. + Follower RaftState = iota + + // Candidate is one of the valid states of a Raft node. + Candidate + + // Leader is one of the valid states of a Raft node. + Leader + + // Shutdown is the terminal state of a Raft node. + Shutdown +) + +func (s RaftState) String() string { + switch s { + case Follower: + return "Follower" + case Candidate: + return "Candidate" + case Leader: + return "Leader" + case Shutdown: + return "Shutdown" + default: + return "Unknown" + } +} + +// raftState is used to maintain various state variables +// and provides an interface to set/get the variables in a +// thread safe manner. +type raftState struct { + // The current term, cache of StableStore + currentTerm uint64 + + // Highest committed log entry + commitIndex uint64 + + // Last applied log to the FSM + lastApplied uint64 + + // protects 4 next fields + lastLock sync.Mutex + + // Cache the latest snapshot index/term + lastSnapshotIndex uint64 + lastSnapshotTerm uint64 + + // Cache the latest log from LogStore + lastLogIndex uint64 + lastLogTerm uint64 + + // Tracks running goroutines + routinesGroup sync.WaitGroup + + // The current state + state RaftState +} + +func (r *raftState) getState() RaftState { + stateAddr := (*uint32)(&r.state) + return RaftState(atomic.LoadUint32(stateAddr)) +} + +func (r *raftState) setState(s RaftState) { + stateAddr := (*uint32)(&r.state) + atomic.StoreUint32(stateAddr, uint32(s)) +} + +func (r *raftState) getCurrentTerm() uint64 { + return atomic.LoadUint64(&r.currentTerm) +} + +func (r *raftState) setCurrentTerm(term uint64) { + atomic.StoreUint64(&r.currentTerm, term) +} + +func (r *raftState) getLastLog() (index, term uint64) { + r.lastLock.Lock() + index = r.lastLogIndex + term = r.lastLogTerm + r.lastLock.Unlock() + return +} + +func (r *raftState) setLastLog(index, term uint64) { + r.lastLock.Lock() + r.lastLogIndex = index + r.lastLogTerm = term + r.lastLock.Unlock() +} + +func (r *raftState) getLastSnapshot() (index, term uint64) { + r.lastLock.Lock() + index = r.lastSnapshotIndex + term = r.lastSnapshotTerm + r.lastLock.Unlock() + return +} + +func (r *raftState) setLastSnapshot(index, term uint64) { + r.lastLock.Lock() + r.lastSnapshotIndex = index + r.lastSnapshotTerm = term + r.lastLock.Unlock() +} + +func (r *raftState) getCommitIndex() uint64 { + return atomic.LoadUint64(&r.commitIndex) +} + +func (r *raftState) setCommitIndex(index uint64) { + atomic.StoreUint64(&r.commitIndex, index) +} + +func (r *raftState) getLastApplied() uint64 { + return atomic.LoadUint64(&r.lastApplied) +} + +func (r *raftState) setLastApplied(index uint64) { + atomic.StoreUint64(&r.lastApplied, index) +} + +// Start a goroutine and properly handle the race between a routine +// starting and incrementing, and exiting and decrementing. +func (r *raftState) goFunc(f func()) { + r.routinesGroup.Add(1) + go func() { + defer r.routinesGroup.Done() + f() + }() +} + +func (r *raftState) waitShutdown() { + r.routinesGroup.Wait() +} + +// getLastIndex returns the last index in stable storage. +// Either from the last log or from the last snapshot. +func (r *raftState) getLastIndex() uint64 { + r.lastLock.Lock() + defer r.lastLock.Unlock() + return max(r.lastLogIndex, r.lastSnapshotIndex) +} + +// getLastEntry returns the last index and term in stable storage. +// Either from the last log or from the last snapshot. +func (r *raftState) getLastEntry() (uint64, uint64) { + r.lastLock.Lock() + defer r.lastLock.Unlock() + if r.lastLogIndex >= r.lastSnapshotIndex { + return r.lastLogIndex, r.lastLogTerm + } + return r.lastSnapshotIndex, r.lastSnapshotTerm +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/tcp_transport.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/tcp_transport.go new file mode 100644 index 00000000000..9281508a050 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/tcp_transport.go @@ -0,0 +1,105 @@ +package raft + +import ( + "errors" + "io" + "log" + "net" + "time" +) + +var ( + errNotAdvertisable = errors.New("local bind address is not advertisable") + errNotTCP = errors.New("local address is not a TCP address") +) + +// TCPStreamLayer implements StreamLayer interface for plain TCP. +type TCPStreamLayer struct { + advertise net.Addr + listener *net.TCPListener +} + +// NewTCPTransport returns a NetworkTransport that is built on top of +// a TCP streaming transport layer. +func NewTCPTransport( + bindAddr string, + advertise net.Addr, + maxPool int, + timeout time.Duration, + logOutput io.Writer, +) (*NetworkTransport, error) { + return newTCPTransport(bindAddr, advertise, maxPool, timeout, func(stream StreamLayer) *NetworkTransport { + return NewNetworkTransport(stream, maxPool, timeout, logOutput) + }) +} + +// NewTCPTransportWithLogger returns a NetworkTransport that is built on top of +// a TCP streaming transport layer, with log output going to the supplied Logger +func NewTCPTransportWithLogger( + bindAddr string, + advertise net.Addr, + maxPool int, + timeout time.Duration, + logger *log.Logger, +) (*NetworkTransport, error) { + return newTCPTransport(bindAddr, advertise, maxPool, timeout, func(stream StreamLayer) *NetworkTransport { + return NewNetworkTransportWithLogger(stream, maxPool, timeout, logger) + }) +} + +func newTCPTransport(bindAddr string, + advertise net.Addr, + maxPool int, + timeout time.Duration, + transportCreator func(stream StreamLayer) *NetworkTransport) (*NetworkTransport, error) { + // Try to bind + list, err := net.Listen("tcp", bindAddr) + if err != nil { + return nil, err + } + + // Create stream + stream := &TCPStreamLayer{ + advertise: advertise, + listener: list.(*net.TCPListener), + } + + // Verify that we have a usable advertise address + addr, ok := stream.Addr().(*net.TCPAddr) + if !ok { + list.Close() + return nil, errNotTCP + } + if addr.IP.IsUnspecified() { + list.Close() + return nil, errNotAdvertisable + } + + // Create the network transport + trans := transportCreator(stream) + return trans, nil +} + +// Dial implements the StreamLayer interface. +func (t *TCPStreamLayer) Dial(address ServerAddress, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("tcp", string(address), timeout) +} + +// Accept implements the net.Listener interface. +func (t *TCPStreamLayer) Accept() (c net.Conn, err error) { + return t.listener.Accept() +} + +// Close implements the net.Listener interface. +func (t *TCPStreamLayer) Close() (err error) { + return t.listener.Close() +} + +// Addr implements the net.Listener interface. +func (t *TCPStreamLayer) Addr() net.Addr { + // Use an advertise addr if provided + if t.advertise != nil { + return t.advertise + } + return t.listener.Addr() +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/transport.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/transport.go new file mode 100644 index 00000000000..633f97a8c5c --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/transport.go @@ -0,0 +1,124 @@ +package raft + +import ( + "io" + "time" +) + +// RPCResponse captures both a response and a potential error. +type RPCResponse struct { + Response interface{} + Error error +} + +// RPC has a command, and provides a response mechanism. +type RPC struct { + Command interface{} + Reader io.Reader // Set only for InstallSnapshot + RespChan chan<- RPCResponse +} + +// Respond is used to respond with a response, error or both +func (r *RPC) Respond(resp interface{}, err error) { + r.RespChan <- RPCResponse{resp, err} +} + +// Transport provides an interface for network transports +// to allow Raft to communicate with other nodes. +type Transport interface { + // Consumer returns a channel that can be used to + // consume and respond to RPC requests. + Consumer() <-chan RPC + + // LocalAddr is used to return our local address to distinguish from our peers. + LocalAddr() ServerAddress + + // AppendEntriesPipeline returns an interface that can be used to pipeline + // AppendEntries requests. + AppendEntriesPipeline(target ServerAddress) (AppendPipeline, error) + + // AppendEntries sends the appropriate RPC to the target node. + AppendEntries(target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error + + // RequestVote sends the appropriate RPC to the target node. + RequestVote(target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error + + // InstallSnapshot is used to push a snapshot down to a follower. The data is read from + // the ReadCloser and streamed to the client. + InstallSnapshot(target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error + + // EncodePeer is used to serialize a peer's address. + EncodePeer(ServerAddress) []byte + + // DecodePeer is used to deserialize a peer's address. + DecodePeer([]byte) ServerAddress + + // SetHeartbeatHandler is used to setup a heartbeat handler + // as a fast-pass. This is to avoid head-of-line blocking from + // disk IO. If a Transport does not support this, it can simply + // ignore the call, and push the heartbeat onto the Consumer channel. + SetHeartbeatHandler(cb func(rpc RPC)) +} + +// WithClose is an interface that a transport may provide which +// allows a transport to be shut down cleanly when a Raft instance +// shuts down. +// +// It is defined separately from Transport as unfortunately it wasn't in the +// original interface specification. +type WithClose interface { + // Close permanently closes a transport, stopping + // any associated goroutines and freeing other resources. + Close() error +} + +// LoopbackTransport is an interface that provides a loopback transport suitable for testing +// e.g. InmemTransport. It's there so we don't have to rewrite tests. +type LoopbackTransport interface { + Transport // Embedded transport reference + WithPeers // Embedded peer management + WithClose // with a close routine +} + +// WithPeers is an interface that a transport may provide which allows for connection and +// disconnection. Unless the transport is a loopback transport, the transport specified to +// "Connect" is likely to be nil. +type WithPeers interface { + Connect(peer ServerAddress, t Transport) // Connect a peer + Disconnect(peer ServerAddress) // Disconnect a given peer + DisconnectAll() // Disconnect all peers, possibly to reconnect them later +} + +// AppendPipeline is used for pipelining AppendEntries requests. It is used +// to increase the replication throughput by masking latency and better +// utilizing bandwidth. +type AppendPipeline interface { + // AppendEntries is used to add another request to the pipeline. + // The send may block which is an effective form of back-pressure. + AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) + + // Consumer returns a channel that can be used to consume + // response futures when they are ready. + Consumer() <-chan AppendFuture + + // Close closes the pipeline and cancels all inflight RPCs + Close() error +} + +// AppendFuture is used to return information about a pipelined AppendEntries request. +type AppendFuture interface { + Future + + // Start returns the time that the append request was started. + // It is always OK to call this method. + Start() time.Time + + // Request holds the parameters of the AppendEntries call. + // It is always OK to call this method. + Request() *AppendEntriesRequest + + // Response holds the results of the AppendEntries call. + // This method must only be called after the Error + // method returns, and will only be valid on success. + Response() *AppendEntriesResponse +} diff --git a/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/util.go b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/util.go new file mode 100644 index 00000000000..90428d7437e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/github.com/hashicorp/raft/util.go @@ -0,0 +1,133 @@ +package raft + +import ( + "bytes" + crand "crypto/rand" + "fmt" + "math" + "math/big" + "math/rand" + "time" + + "github.com/hashicorp/go-msgpack/codec" +) + +func init() { + // Ensure we use a high-entropy seed for the psuedo-random generator + rand.Seed(newSeed()) +} + +// returns an int64 from a crypto random source +// can be used to seed a source for a math/rand. +func newSeed() int64 { + r, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + return r.Int64() +} + +// randomTimeout returns a value that is between the minVal and 2x minVal. +func randomTimeout(minVal time.Duration) <-chan time.Time { + if minVal == 0 { + return nil + } + extra := (time.Duration(rand.Int63()) % minVal) + return time.After(minVal + extra) +} + +// min returns the minimum. +func min(a, b uint64) uint64 { + if a <= b { + return a + } + return b +} + +// max returns the maximum. +func max(a, b uint64) uint64 { + if a >= b { + return a + } + return b +} + +// generateUUID is used to generate a random UUID. +func generateUUID() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} + +// asyncNotifyCh is used to do an async channel send +// to a single channel without blocking. +func asyncNotifyCh(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// drainNotifyCh empties out a single-item notification channel without +// blocking, and returns whether it received anything. +func drainNotifyCh(ch chan struct{}) bool { + select { + case <-ch: + return true + default: + return false + } +} + +// asyncNotifyBool is used to do an async notification +// on a bool channel. +func asyncNotifyBool(ch chan bool, v bool) { + select { + case ch <- v: + default: + } +} + +// Decode reverses the encode operation on a byte slice input. +func decodeMsgPack(buf []byte, out interface{}) error { + r := bytes.NewBuffer(buf) + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(r, &hd) + return dec.Decode(out) +} + +// Encode writes an encoded object to a new bytes buffer. +func encodeMsgPack(in interface{}) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(buf, &hd) + err := enc.Encode(in) + return buf, err +} + +// backoff is used to compute an exponential backoff +// duration. Base time is scaled by the current round, +// up to some maximum scale factor. +func backoff(base time.Duration, round, limit uint64) time.Duration { + power := min(round, limit) + for power > 2 { + base *= 2 + power-- + } + return base +} + +// Needed for sorting []uint64, used to determine commitment +type uint64Slice []uint64 + +func (p uint64Slice) Len() int { return len(p) } +func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/hashicorp/consul/vendor/vendor.json b/vendor/github.com/hashicorp/consul/vendor/vendor.json new file mode 100644 index 00000000000..1d0e61fefa5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/vendor/vendor.json @@ -0,0 +1,471 @@ +{ + "comment": "", + "ignore": "test", + "package": [ + { + "path": "context", + "revision": "" + }, + { + "checksumSHA1": "JhyS/zIicgtrSasHSZ6WtXGWJVk=", + "path": "github.com/DataDog/datadog-go/statsd", + "revision": "cc2f4770f4d61871e19bfee967bc767fe730b0d9", + "revisionTime": "2016-03-29T13:52:53Z" + }, + { + "checksumSHA1": "yXrS4e4yxaRBu/MX+39X3cS4kbg=", + "path": "github.com/Sirupsen/logrus", + "revision": "a283a10442df8dc09befd873fab202bf8a253d6a", + "revisionTime": "2016-07-16T02:56:31Z" + }, + { + "checksumSHA1": "l0iFqayYAaEip6Olaq3/LCOa/Sg=", + "path": "github.com/armon/circbuf", + "revision": "bbbad097214e2918d8543d5201d12bfd7bca254d", + "revisionTime": "2015-08-27T00:49:46Z" + }, + { + "checksumSHA1": "d6798KSc0jDg2MHNxKdgyNfMK7A=", + "path": "github.com/armon/go-metrics", + "revision": "3df31a1ada83e310c2e24b267c8e8b68836547b4", + "revisionTime": "2016-07-17T04:34:58Z" + }, + { + "checksumSHA1": "OmqT9Y1mAHvlAKeJh0jBHC9SH78=", + "path": "github.com/armon/go-metrics/circonus", + "revision": "3df31a1ada83e310c2e24b267c8e8b68836547b4", + "revisionTime": "2016-07-17T04:34:58Z" + }, + { + "checksumSHA1": "mAzNU3zeZGEwqjDT4ZkspFvx3TI=", + "path": "github.com/armon/go-metrics/datadog", + "revision": "3df31a1ada83e310c2e24b267c8e8b68836547b4", + "revisionTime": "2016-07-17T04:34:58Z" + }, + { + "checksumSHA1": "gNO0JNpLzYOdInGeq7HqMZUzx9M=", + "path": "github.com/armon/go-radix", + "revision": "4239b77079c7b5d1243b7b4736304ce8ddb6f0f2", + "revisionTime": "2016-01-15T23:47:25Z" + }, + { + "checksumSHA1": "dvd7Su+WNmHRP1+w1HezrPUCDsc=", + "path": "github.com/bgentry/speakeasy", + "revision": "e1439544d8ecd0f3e9373a636d447668096a8f81", + "revisionTime": "2016-05-20T23:26:10Z" + }, + { + "checksumSHA1": "twtRfb6484vfr2qqjiFkLThTjcQ=", + "path": "github.com/bgentry/speakeasy/example", + "revision": "e1439544d8ecd0f3e9373a636d447668096a8f81", + "revisionTime": "2016-05-20T23:26:10Z" + }, + { + "checksumSHA1": "C1BwfmTAUmldO4V3++tvYWzT49Y=", + "comment": "v1.2.1", + "path": "github.com/boltdb/bolt", + "revision": "dfb21201d9270c1082d5fb0f07f500311ff72f18", + "revisionTime": "2016-05-16T15:40:46Z", + "version": "v1.2.1", + "versionExact": "v1.2.1" + }, + { + "checksumSHA1": "b5zgHT9TxBAVh/KP9kQi7QVoz9w=", + "path": "github.com/circonus-labs/circonus-gometrics", + "revision": "a7c30e0dcc6e2341053132470dcedc12bc7705ef", + "revisionTime": "2016-07-22T17:27:10Z" + }, + { + "checksumSHA1": "IFiYTxu8jshL4A8BCttUaDhp1m4=", + "path": "github.com/circonus-labs/circonus-gometrics/api", + "revision": "a7c30e0dcc6e2341053132470dcedc12bc7705ef", + "revisionTime": "2016-07-22T17:27:10Z" + }, + { + "checksumSHA1": "+9vcRzlTdvEjH/Uf8fKC5MXdjNw=", + "path": "github.com/circonus-labs/circonus-gometrics/checkmgr", + "revision": "a7c30e0dcc6e2341053132470dcedc12bc7705ef", + "revisionTime": "2016-07-22T17:27:10Z" + }, + { + "checksumSHA1": "C4Z7+l5GOpOCW5DcvNYzheGvQRE=", + "path": "github.com/circonus-labs/circonusllhist", + "revision": "d724266ae5270ae8b87a5d2e8081f04e307c3c18", + "revisionTime": "2016-05-26T04:38:13Z" + }, + { + "checksumSHA1": "fXAinpJ5bOcborK7AiO1rnW60BI=", + "path": "github.com/docker/docker/opts", + "revision": "eb28dde01f165849bf372e18200e83042c76f26c", + "revisionTime": "2016-08-09T18:56:09Z" + }, + { + "checksumSHA1": "Kw89nXWMfcNR0KcRw3T8bmuaQlw=", + "path": "github.com/docker/docker/pkg/archive", + "revision": "eb28dde01f165849bf372e18200e83042c76f26c", + "revisionTime": "2016-08-09T18:56:09Z" + }, + { + "checksumSHA1": "OFqhPMitAwvJXnD68U2ZVwKhsTA=", + "path": "github.com/docker/docker/pkg/fileutils", + "revision": "eb28dde01f165849bf372e18200e83042c76f26c", + "revisionTime": "2016-08-09T18:56:09Z" + }, + { + "checksumSHA1": "p6Ud4Yf1ywWy20YxXF1RU4yhTio=", + "path": "github.com/docker/docker/pkg/homedir", + "revision": "eb28dde01f165849bf372e18200e83042c76f26c", + "revisionTime": "2016-08-09T18:56:09Z" + }, + { + "checksumSHA1": "iP5slJJPRZUm0rfdII8OiATAACA=", + "path": "github.com/docker/docker/pkg/idtools", + "revision": "eb28dde01f165849bf372e18200e83042c76f26c", + "revisionTime": "2016-08-09T18:56:09Z" + }, + { + "checksumSHA1": "QDpwKJCKQV6+0o0kkPN4SC5Aldc=", + "path": "github.com/docker/docker/pkg/ioutils", + "revision": "eb28dde01f165849bf372e18200e83042c76f26c", + "revisionTime": "2016-08-09T18:56:09Z" + }, + { + "checksumSHA1": "ndnAFCfsGC3upNQ6jAEwzxcurww=", + "path": "github.com/docker/docker/pkg/longpath", + "revision": "eb28dde01f165849bf372e18200e83042c76f26c", + "revisionTime": "2016-08-09T18:56:09Z" + }, + { + "checksumSHA1": "l4lJIW9JIddsKRqsoqRt9zXu/7M=", + "path": "github.com/docker/docker/pkg/pools", + "revision": "eb28dde01f165849bf372e18200e83042c76f26c", + "revisionTime": "2016-08-09T18:56:09Z" + }, + { + "checksumSHA1": "txf3EORYff4hO6PEvwBm2lyh1MU=", + "path": "github.com/docker/docker/pkg/promise", + "revision": "eb28dde01f165849bf372e18200e83042c76f26c", + "revisionTime": "2016-08-09T18:56:09Z" + }, + { + "checksumSHA1": "YDYbS5U2mDwfcOUJ6M09cP6Bubg=", + "path": "github.com/docker/docker/pkg/stdcopy", + "revision": "eb28dde01f165849bf372e18200e83042c76f26c", + "revisionTime": "2016-08-09T18:56:09Z" + }, + { + "checksumSHA1": "Eh3iu/9RzHzNY4vHHPKaZISAgBo=", + "path": "github.com/docker/docker/pkg/system", + "revision": "eb28dde01f165849bf372e18200e83042c76f26c", + "revisionTime": "2016-08-09T18:56:09Z" + }, + { + "checksumSHA1": "hZV62Xzt/i0e/WBKWww3rpkRAR4=", + "path": "github.com/docker/engine-api/types/filters", + "revision": "fc564829f64e3a820d7b896611ede92c8744d752", + "revisionTime": "2016-08-09T18:19:22Z" + }, + { + "checksumSHA1": "AE3TTpPWvv9ic71FiF0HnRr46mE=", + "path": "github.com/docker/engine-api/types/versions", + "revision": "fc564829f64e3a820d7b896611ede92c8744d752", + "revisionTime": "2016-08-09T18:19:22Z" + }, + { + "checksumSHA1": "KbWP8VsU9gVoZm9pCqe79AkfDmk=", + "path": "github.com/docker/go-units", + "revision": "eb879ae3e2b84e2a142af415b679ddeda47ec71c", + "revisionTime": "2016-08-02T14:55:05Z" + }, + { + "checksumSHA1": "5ftkjfUwI9A6xCQ1PwIAd5+qlo0=", + "path": "github.com/elazarl/go-bindata-assetfs", + "revision": "e1a2a7ec64b07d04ac9ebb072404fe8b7b60de1b", + "revisionTime": "2016-08-03T19:23:04Z" + }, + { + "checksumSHA1": "CaThXbumVxZtNlItiXma5B78PwQ=", + "path": "github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs", + "revision": "e1a2a7ec64b07d04ac9ebb072404fe8b7b60de1b", + "revisionTime": "2016-08-03T19:23:04Z" + }, + { + "checksumSHA1": "U+z+QQ323fAeBtBS17ztWmyVI3Q=", + "path": "github.com/fsouza/go-dockerclient", + "revision": "a53ba79627e888ef775bdcf15813f07d7a232867", + "revisionTime": "2016-08-09T01:24:47Z" + }, + { + "checksumSHA1": "cdOCt0Yb+hdErz8NAQqayxPmRsY=", + "path": "github.com/hashicorp/errwrap", + "revision": "7554cd9344cec97297fa6649b055a8c98c2a1e55", + "revisionTime": "2014-10-28T05:47:10Z" + }, + { + "checksumSHA1": "nd3S1qkFv7zZxA9be0bw4nT0pe0=", + "path": "github.com/hashicorp/go-checkpoint", + "revision": "e4b2dc34c0f698ee04750bf2035d8b9384233e1b", + "revisionTime": "2015-10-22T18:15:14Z" + }, + { + "checksumSHA1": "Uzyon2091lmwacNsl1hCytjhHtg=", + "path": "github.com/hashicorp/go-cleanhttp", + "revision": "ad28ea4487f05916463e2423a55166280e8254b5", + "revisionTime": "2016-04-07T17:41:26Z" + }, + { + "checksumSHA1": "qmE9mO0WW6ALLpUU81rXDyspP5M=", + "path": "github.com/hashicorp/go-immutable-radix", + "revision": "afc5a0dbb18abdf82c277a7bc01533e81fa1d6b8", + "revisionTime": "2016-06-09T02:05:29Z" + }, + { + "checksumSHA1": "/V57CyN7x2NUlHoOzVL5GgGXX84=", + "path": "github.com/hashicorp/go-memdb", + "revision": "98f52f52d7a476958fa9da671354d270c50661a7", + "revisionTime": "2016-03-01T23:01:42Z" + }, + { + "path": "github.com/hashicorp/go-msgpack/codec", + "revision": "fa3f63826f7c23912c15263591e65d54d080b458" + }, + { + "checksumSHA1": "lrSl49G23l6NhfilxPM0XFs5rZo=", + "path": "github.com/hashicorp/go-multierror", + "revision": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5", + "revisionTime": "2015-09-16T20:57:42Z" + }, + { + "checksumSHA1": "7qEpaAJA78EvQDs16x9mYOWfgqo=", + "path": "github.com/hashicorp/go-reap", + "revision": "2d85522212dcf5a84c6b357094f5c44710441912", + "revisionTime": "2016-01-13T17:25:55Z" + }, + { + "checksumSHA1": "bfVGm7xZ2VFpddJp3KEPUZ8Y9Po=", + "path": "github.com/hashicorp/go-retryablehttp", + "revision": "886ce0458bc81ccca0fb7044c1be0e9ab590bed7", + "revisionTime": "2016-07-18T23:34:41Z" + }, + { + "checksumSHA1": "xZ7Ban1x//6uUIU1xtrTbCYNHBc=", + "path": "github.com/hashicorp/go-syslog", + "revision": "42a2b573b664dbf281bd48c3cc12c086b17a39ba", + "revisionTime": "2015-02-18T18:19:46Z" + }, + { + "checksumSHA1": "mAkPa/RLuIwN53GbwIEMATexams=", + "path": "github.com/hashicorp/go-uuid", + "revision": "64130c7a86d732268a38cb04cfbaf0cc987fda98", + "revisionTime": "2016-07-17T02:21:40Z" + }, + { + "checksumSHA1": "d9PxF1XQGLMJZRct2R8qVM/eYlE=", + "path": "github.com/hashicorp/golang-lru", + "revision": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4", + "revisionTime": "2016-02-07T21:47:19Z" + }, + { + "checksumSHA1": "2nOpYjx8Sn57bqlZq17yM4YJuM4=", + "path": "github.com/hashicorp/golang-lru/simplelru", + "revision": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4", + "revisionTime": "2016-02-07T21:47:19Z" + }, + { + "checksumSHA1": "ydHBPi04mEh+Tir+2JkpSIMckcw=", + "path": "github.com/hashicorp/hcl", + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" + }, + { + "checksumSHA1": "IxyvRpCFeoJBGl2obLKJV7RCGjg=", + "path": "github.com/hashicorp/hcl/hcl/ast", + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" + }, + { + "checksumSHA1": "l2oQxBsZRwn6eZjf+whXr8c9+8c=", + "path": "github.com/hashicorp/hcl/hcl/parser", + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" + }, + { + "checksumSHA1": "vjhDQVlgHhdxml1V8/cj0vOe+j8=", + "path": "github.com/hashicorp/hcl/hcl/scanner", + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" + }, + { + "checksumSHA1": "JlZmnzqdmFFyb1+2afLyR3BOE/8=", + "path": "github.com/hashicorp/hcl/hcl/strconv", + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" + }, + { + "checksumSHA1": "c6yprzj06ASwCo18TtbbNNBHljA=", + "path": "github.com/hashicorp/hcl/hcl/token", + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" + }, + { + "checksumSHA1": "jQ45CCc1ed/nlV7bbSnx6z72q1M=", + "path": "github.com/hashicorp/hcl/json/parser", + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" + }, + { + "checksumSHA1": "S1e0F9ZKSnqgOLfjDTYazRL28tA=", + "path": "github.com/hashicorp/hcl/json/scanner", + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" + }, + { + "checksumSHA1": "fNlXQCQEnb+B3k5UDL/r15xtSJY=", + "path": "github.com/hashicorp/hcl/json/token", + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" + }, + { + "checksumSHA1": "kqCMCHy2b+RBMKC+ER+OPqp8C3E=", + "path": "github.com/hashicorp/hil", + "revision": "1e86c6b523c55d1fa6c6e930ce80b548664c95c2", + "revisionTime": "2016-07-11T23:18:37Z" + }, + { + "checksumSHA1": "UICubs001+Q4MsUf9zl2vcMzWQQ=", + "path": "github.com/hashicorp/hil/ast", + "revision": "1e86c6b523c55d1fa6c6e930ce80b548664c95c2", + "revisionTime": "2016-07-11T23:18:37Z" + }, + { + "checksumSHA1": "vt+P9D2yWDO3gdvdgCzwqunlhxU=", + "path": "github.com/hashicorp/logutils", + "revision": "0dc08b1671f34c4250ce212759ebd880f743d883", + "revisionTime": "2015-06-09T07:04:31Z" + }, + { + "checksumSHA1": "AY1/cRsuWpoJMG0J821TqFo9nDE=", + "path": "github.com/hashicorp/memberlist", + "revision": "0c5ba075f8520c65572f001331a1a43b756e01d7", + "revisionTime": "2016-08-12T18:27:57Z" + }, + { + "checksumSHA1": "qnlqWJYV81ENr61SZk9c65R1mDo=", + "path": "github.com/hashicorp/net-rpc-msgpackrpc", + "revision": "a14192a58a694c123d8fe5481d4a4727d6ae82f3", + "revisionTime": "2015-11-16T02:03:38Z" + }, + { + "checksumSHA1": "zMgiTV0dfJIQNRCJDF50bLomDvg=", + "path": "github.com/hashicorp/raft", + "revision": "c69c15dd73b6695ba75b3502ce6b332cc0042c83", + "revisionTime": "2016-08-01T21:27:18Z" + }, + { + "checksumSHA1": "QAxukkv54/iIvLfsUP6IK4R0m/A=", + "path": "github.com/hashicorp/raft-boltdb", + "revision": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee", + "revisionTime": "2015-02-01T20:08:39Z" + }, + { + "checksumSHA1": "u9qHbpIgMZ7/fjO0gFfds2m/1ck=", + "path": "github.com/hashicorp/scada-client", + "revision": "6e896784f66f82cdc6f17e00052db91699dc277d", + "revisionTime": "2016-06-01T22:40:23Z" + }, + { + "checksumSHA1": "fv3nX1vDZViW0tA7Aa5Va2lBUtM=", + "path": "github.com/hashicorp/scada-client/scada", + "revision": "6e896784f66f82cdc6f17e00052db91699dc277d", + "revisionTime": "2016-06-01T22:40:23Z" + }, + { + "checksumSHA1": "E3Xcanc9ouQwL+CZGOUyA/+giLg=", + "comment": "v0.7.0-66-g6c4672d", + "path": "github.com/hashicorp/serf/coordinate", + "revision": "114430d8210835d66defdc31cdc176c58e060005", + "revisionTime": "2016-08-09T01:42:04Z" + }, + { + "checksumSHA1": "vLyudzMEdik8IpRY1H2vRa2PeLU=", + "comment": "v0.7.0-66-g6c4672d", + "path": "github.com/hashicorp/serf/serf", + "revision": "114430d8210835d66defdc31cdc176c58e060005", + "revisionTime": "2016-08-09T01:42:04Z" + }, + { + "checksumSHA1": "ZhK6IO2XN81Y+3RAjTcVm1Ic7oU=", + "path": "github.com/hashicorp/yamux", + "revision": "d1caa6c97c9fc1cc9e83bbe34d0603f9ff0ce8bd", + "revisionTime": "2016-07-20T23:31:40Z" + }, + { + "checksumSHA1": "xZuhljnmBysJPta/lMyYmJdujCg=", + "path": "github.com/mattn/go-isatty", + "revision": "66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8", + "revisionTime": "2016-08-06T12:27:52Z" + }, + { + "checksumSHA1": "OUZ1FFXyKs+Cfg9M9rmXqqweQck=", + "path": "github.com/miekg/dns", + "revision": "db96a2b759cdef4f11a34506a42eb8d1290c598e", + "revisionTime": "2016-07-26T03:20:27Z" + }, + { + "checksumSHA1": "yF39M9MGatDbq2d2oqlLy44jsRc=", + "path": "github.com/mitchellh/cli", + "revision": "168daae10d6ff81b8b1201b0a4c9607d7e9b82e3", + "revisionTime": "2016-03-23T17:07:00Z" + }, + { + "checksumSHA1": "86nE93o1VIND0Doe8PuhCXnhUx0=", + "path": "github.com/mitchellh/copystructure", + "revision": "cdac8253d00f2ecf0a0b19fbff173a9a72de4f82", + "revisionTime": "2016-08-04T03:23:30Z" + }, + { + "checksumSHA1": "LUrnGREfnifW4WDMaavmc9MlLI0=", + "path": "github.com/mitchellh/mapstructure", + "revision": "ca63d7c062ee3c9f34db231e352b60012b4fd0c1", + "revisionTime": "2016-08-08T18:12:53Z" + }, + { + "checksumSHA1": "mrqMlK6gqe//WsJSrJ1HgkPM0lM=", + "path": "github.com/mitchellh/reflectwalk", + "revision": "eecf4c70c626c7cfbb95c90195bc34d386c74ac6", + "revisionTime": "2015-05-27T15:31:53Z" + }, + { + "checksumSHA1": "3AoPMXlmVq2+iWMpsdJZkcUKHB8=", + "path": "github.com/opencontainers/runc/libcontainer/user", + "revision": "0f764571384a3ff16c6fed25ace5b7c83f0f0379", + "revisionTime": "2016-08-09T12:22:04Z" + }, + { + "checksumSHA1": "ExnVEVNT8APpFTm26cUb5T09yR4=", + "comment": "v2.0.1-8-g983d3a5", + "path": "github.com/ryanuber/columnize", + "revision": "9b3edd62028f107d7cabb19353292afd29311a4e", + "revisionTime": "2016-07-12T16:32:29Z" + }, + { + "checksumSHA1": "9jjO5GjLa0XF/nfWihF02RoH4qc=", + "path": "golang.org/x/net/context", + "revision": "075e191f18186a8ff2becaf64478e30f4545cdad", + "revisionTime": "2016-08-05T06:12:51Z" + }, + { + "checksumSHA1": "WHc3uByvGaMcnSoI21fhzYgbOgg=", + "path": "golang.org/x/net/context/ctxhttp", + "revision": "075e191f18186a8ff2becaf64478e30f4545cdad", + "revisionTime": "2016-08-05T06:12:51Z" + }, + { + "checksumSHA1": "15doxxBfOxOhWExkxjPNo6Y7fEw=", + "path": "golang.org/x/sys/unix", + "revision": "20457ee8ea8546920d3f4e19e405da45250dc5a5", + "revisionTime": "2016-01-20T16:03:39Z" + } + ], + "rootPath": "github.com/hashicorp/consul" +} diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go new file mode 100644 index 00000000000..337d963296c --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/2q.go @@ -0,0 +1,212 @@ +package lru + +import ( + "fmt" + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // Default2QRecentRatio is the ratio of the 2Q cache dedicated + // to recently added entries that have only been accessed once. + Default2QRecentRatio = 0.25 + + // Default2QGhostEntries is the default ratio of ghost + // entries kept to track entries recently evicted + Default2QGhostEntries = 0.50 +) + +// TwoQueueCache is a thread-safe fixed size 2Q cache. +// 2Q is an enhancement over the standard LRU cache +// in that it tracks both frequently and recently used +// entries separately. This avoids a burst in access to new +// entries from evicting frequently used entries. It adds some +// additional tracking overhead to the standard LRU cache, and is +// computationally about 2x the cost, and adds some metadata over +// head. The ARCCache is similar, but does not require setting any +// parameters. +type TwoQueueCache struct { + size int + recentSize int + + recent *simplelru.LRU + frequent *simplelru.LRU + recentEvict *simplelru.LRU + lock sync.RWMutex +} + +// New2Q creates a new TwoQueueCache using the default +// values for the parameters. +func New2Q(size int) (*TwoQueueCache, error) { + return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) +} + +// New2QParams creates a new TwoQueueCache using the provided +// parameter values. +func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size") + } + if recentRatio < 0.0 || recentRatio > 1.0 { + return nil, fmt.Errorf("invalid recent ratio") + } + if ghostRatio < 0.0 || ghostRatio > 1.0 { + return nil, fmt.Errorf("invalid ghost ratio") + } + + // Determine the sub-sizes + recentSize := int(float64(size) * recentRatio) + evictSize := int(float64(size) * ghostRatio) + + // Allocate the LRUs + recent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + frequent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + recentEvict, err := simplelru.NewLRU(evictSize, nil) + if err != nil { + return nil, err + } + + // Initialize the cache + c := &TwoQueueCache{ + size: size, + recentSize: recentSize, + recent: recent, + frequent: frequent, + recentEvict: recentEvict, + } + return c, nil +} + +func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if this is a frequent value + if val, ok := c.frequent.Get(key); ok { + return val, ok + } + + // If the value is contained in recent, then we + // promote it to frequent + if val, ok := c.recent.Peek(key); ok { + c.recent.Remove(key) + c.frequent.Add(key, val) + return val, ok + } + + // No hit + return nil, false +} + +func (c *TwoQueueCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is frequently used already, + // and just update the value + if c.frequent.Contains(key) { + c.frequent.Add(key, value) + return + } + + // Check if the value is recently used, and promote + // the value into the frequent list + if c.recent.Contains(key) { + c.recent.Remove(key) + c.frequent.Add(key, value) + return + } + + // If the value was recently evicted, add it to the + // frequently used list + if c.recentEvict.Contains(key) { + c.ensureSpace(true) + c.recentEvict.Remove(key) + c.frequent.Add(key, value) + return + } + + // Add to the recently seen list + c.ensureSpace(false) + c.recent.Add(key, value) + return +} + +// ensureSpace is used to ensure we have space in the cache +func (c *TwoQueueCache) ensureSpace(recentEvict bool) { + // If we have space, nothing to do + recentLen := c.recent.Len() + freqLen := c.frequent.Len() + if recentLen+freqLen < c.size { + return + } + + // If the recent buffer is larger than + // the target, evict from there + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { + k, _, _ := c.recent.RemoveOldest() + c.recentEvict.Add(k, nil) + return + } + + // Remove from the frequent list otherwise + c.frequent.RemoveOldest() +} + +func (c *TwoQueueCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.recent.Len() + c.frequent.Len() +} + +func (c *TwoQueueCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.frequent.Keys() + k2 := c.recent.Keys() + return append(k1, k2...) +} + +func (c *TwoQueueCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.frequent.Remove(key) { + return + } + if c.recent.Remove(key) { + return + } + if c.recentEvict.Remove(key) { + return + } +} + +func (c *TwoQueueCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.recent.Purge() + c.frequent.Purge() + c.recentEvict.Purge() +} + +func (c *TwoQueueCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.frequent.Contains(key) || c.recent.Contains(key) +} + +func (c *TwoQueueCache) Peek(key interface{}) (interface{}, bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.frequent.Peek(key); ok { + return val, ok + } + return c.recent.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md new file mode 100644 index 00000000000..33e58cfaf97 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/README.md @@ -0,0 +1,25 @@ +golang-lru +========== + +This provides the `lru` package which implements a fixed-size +thread safe LRU cache. It is based on the cache in Groupcache. + +Documentation +============= + +Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru) + +Example +======= + +Using the LRU is very simple: + +```go +l, _ := New(128) +for i := 0; i < 256; i++ { + l.Add(i, nil) +} +if l.Len() != 128 { + panic(fmt.Sprintf("bad len: %v", l.Len())) +} +``` diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go new file mode 100644 index 00000000000..a2a25281733 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/arc.go @@ -0,0 +1,257 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). +// ARC is an enhancement over the standard LRU cache in that tracks both +// frequency and recency of use. This avoids a burst in access to new +// entries from evicting the frequently used older entries. It adds some +// additional tracking overhead to a standard LRU cache, computationally +// it is roughly 2x the cost, and the extra memory overhead is linear +// with the size of the cache. ARC has been patented by IBM, but is +// similar to the TwoQueueCache (2Q) which requires setting parameters. +type ARCCache struct { + size int // Size is the total capacity of the cache + p int // P is the dynamic preference towards T1 or T2 + + t1 *simplelru.LRU // T1 is the LRU for recently accessed items + b1 *simplelru.LRU // B1 is the LRU for evictions from t1 + + t2 *simplelru.LRU // T2 is the LRU for frequently accessed items + b2 *simplelru.LRU // B2 is the LRU for evictions from t2 + + lock sync.RWMutex +} + +// NewARC creates an ARC of the given size +func NewARC(size int) (*ARCCache, error) { + // Create the sub LRUs + b1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + b2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + + // Initialize the ARC + c := &ARCCache{ + size: size, + p: 0, + t1: t1, + b1: b1, + t2: t2, + b2: b2, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *ARCCache) Get(key interface{}) (interface{}, bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Ff the value is contained in T1 (recent), then + // promote it to T2 (frequent) + if val, ok := c.t1.Peek(key); ok { + c.t1.Remove(key) + c.t2.Add(key, val) + return val, ok + } + + // Check if the value is contained in T2 (frequent) + if val, ok := c.t2.Get(key); ok { + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *ARCCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is contained in T1 (recent), and potentially + // promote it to frequent T2 + if c.t1.Contains(key) { + c.t1.Remove(key) + c.t2.Add(key, value) + return + } + + // Check if the value is already in T2 (frequent) and update it + if c.t2.Contains(key) { + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // recently used list + if c.b1.Contains(key) { + // T1 set is too small, increase P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b2Len > b1Len { + delta = b2Len / b1Len + } + if c.p+delta >= c.size { + c.p = c.size + } else { + c.p += delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Remove from B1 + c.b1.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // frequently used list + if c.b2.Contains(key) { + // T2 set is too small, decrease P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b1Len > b2Len { + delta = b1Len / b2Len + } + if delta >= c.p { + c.p = 0 + } else { + c.p -= delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(true) + } + + // Remove from B2 + c.b2.Remove(key) + + // Add the key to the frequntly used list + c.t2.Add(key, value) + return + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Keep the size of the ghost buffers trim + if c.b1.Len() > c.size-c.p { + c.b1.RemoveOldest() + } + if c.b2.Len() > c.p { + c.b2.RemoveOldest() + } + + // Add to the recently seen list + c.t1.Add(key, value) + return +} + +// replace is used to adaptively evict from either T1 or T2 +// based on the current learned value of P +func (c *ARCCache) replace(b2ContainsKey bool) { + t1Len := c.t1.Len() + if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { + k, _, ok := c.t1.RemoveOldest() + if ok { + c.b1.Add(k, nil) + } + } else { + k, _, ok := c.t2.RemoveOldest() + if ok { + c.b2.Add(k, nil) + } + } +} + +// Len returns the number of cached entries +func (c *ARCCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Len() + c.t2.Len() +} + +// Keys returns all the cached keys +func (c *ARCCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.t1.Keys() + k2 := c.t2.Keys() + return append(k1, k2...) +} + +// Remove is used to purge a key from the cache +func (c *ARCCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.t1.Remove(key) { + return + } + if c.t2.Remove(key) { + return + } + if c.b1.Remove(key) { + return + } + if c.b2.Remove(key) { + return + } +} + +// Purge is used to clear the cache +func (c *ARCCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.t1.Purge() + c.t2.Purge() + c.b1.Purge() + c.b2.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *ARCCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Contains(key) || c.t2.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *ARCCache) Peek(key interface{}) (interface{}, bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.t1.Peek(key); ok { + return val, ok + } + return c.t2.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go new file mode 100644 index 00000000000..a6285f989e0 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -0,0 +1,114 @@ +// This package provides a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache struct { + lru *simplelru.LRU + lock sync.RWMutex +} + +// New creates an LRU of the given size +func New(size int) (*Cache, error) { + return NewWithEvict(size, nil) +} + +// NewWithEvict constructs a fixed size cache with the given eviction +// callback. +func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { + lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) + if err != nil { + return nil, err + } + c := &Cache{ + lru: lru, + } + return c, nil +} + +// Purge is used to completely clear the cache +func (c *Cache) Purge() { + c.lock.Lock() + c.lru.Purge() + c.lock.Unlock() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *Cache) Add(key, value interface{}) bool { + c.lock.Lock() + defer c.lock.Unlock() + return c.lru.Add(key, value) +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key interface{}) (interface{}, bool) { + c.lock.Lock() + defer c.lock.Unlock() + return c.lru.Get(key) +} + +// Check if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *Cache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Contains(key) +} + +// Returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *Cache) Peek(key interface{}) (interface{}, bool) { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Peek(key) +} + +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evict bool) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.lru.Contains(key) { + return true, false + } else { + evict := c.lru.Add(key, value) + return false, evict + } +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key interface{}) { + c.lock.Lock() + c.lru.Remove(key) + c.lock.Unlock() +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + c.lock.Lock() + c.lru.RemoveOldest() + c.lock.Unlock() +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *Cache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Keys() +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Len() +} diff --git a/vendor/github.com/mattn/go-shellwords/README.md b/vendor/github.com/mattn/go-shellwords/README.md new file mode 100644 index 00000000000..56f357fad7f --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/README.md @@ -0,0 +1,47 @@ +# go-shellwords + +[![Coverage Status](https://coveralls.io/repos/mattn/go-shellwords/badge.png?branch=master)](https://coveralls.io/r/mattn/go-shellwords?branch=master) +[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords) + +Parse line as shell words. + +## Usage + +```go +args, err := shellwords.Parse("./foo --bar=baz") +// args should be ["./foo", "--bar=baz"] +``` + +```go +os.Setenv("FOO", "bar") +p := shellwords.NewParser() +p.ParseEnv = true +args, err := p.Parse("./foo $FOO") +// args should be ["./foo", "bar"] +``` + +```go +p := shellwords.NewParser() +p.ParseBacktick = true +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +```go +shellwords.ParseBacktick = true +p := shellwords.NewParser() +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +# Thanks + +This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine). + +# License + +under the MIT License: http://mattn.mit-license.org/2014 + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go new file mode 100644 index 00000000000..1abaa6c9dfe --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/shellwords.go @@ -0,0 +1,134 @@ +package shellwords + +import ( + "errors" + "os" + "regexp" + "strings" +) + +var ( + ParseEnv bool = false + ParseBacktick bool = false +) + +var envRe = regexp.MustCompile(`\$({[a-zA-Z0-9_]+}|[a-zA-Z0-9_]+)`) + +func isSpace(r rune) bool { + switch r { + case ' ', '\t', '\r', '\n': + return true + } + return false +} + +func replaceEnv(s string) string { + return envRe.ReplaceAllStringFunc(s, func(s string) string { + s = s[1:] + if s[0] == '{' { + s = s[1 : len(s)-1] + } + return os.Getenv(s) + }) +} + +type Parser struct { + ParseEnv bool + ParseBacktick bool +} + +func NewParser() *Parser { + return &Parser{ParseEnv, ParseBacktick} +} + +func (p *Parser) Parse(line string) ([]string, error) { + line = strings.TrimSpace(line) + + args := []string{} + buf := "" + var escaped, doubleQuoted, singleQuoted, backQuote bool + backtick := "" + + for _, r := range line { + if escaped { + buf += string(r) + escaped = false + continue + } + + if r == '\\' { + if singleQuoted { + buf += string(r) + } else { + escaped = true + } + continue + } + + if isSpace(r) { + if singleQuoted || doubleQuoted || backQuote { + buf += string(r) + backtick += string(r) + } else if buf != "" { + if p.ParseEnv { + buf = replaceEnv(buf) + } + args = append(args, buf) + buf = "" + } + continue + } + + switch r { + case '`': + if !singleQuoted && !doubleQuoted { + if p.ParseBacktick { + if backQuote { + out, err := shellRun(backtick) + if err != nil { + return nil, err + } + buf = out + } + backtick = "" + backQuote = !backQuote + continue + } + backtick = "" + backQuote = !backQuote + } + case '"': + if !singleQuoted { + doubleQuoted = !doubleQuoted + continue + } + case '\'': + if !doubleQuoted { + singleQuoted = !singleQuoted + continue + } + } + + buf += string(r) + if backQuote { + backtick += string(r) + } + } + + if buf != "" { + if p.ParseEnv { + buf = replaceEnv(buf) + } + args = append(args, buf) + } + + if escaped || singleQuoted || doubleQuoted || backQuote { + return nil, errors.New("invalid command line string") + } + + return args, nil +} + +func Parse(line string) ([]string, error) { + return NewParser().Parse(line) +} diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go new file mode 100644 index 00000000000..4f8ac55e478 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_posix.go @@ -0,0 +1,19 @@ +// +build !windows + +package shellwords + +import ( + "errors" + "os" + "os/exec" + "strings" +) + +func shellRun(line string) (string, error) { + shell := os.Getenv("SHELL") + b, err := exec.Command(shell, "-c", line).Output() + if err != nil { + return "", errors.New(err.Error() + ":" + string(b)) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go new file mode 100644 index 00000000000..7cad4cf06fd --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_windows.go @@ -0,0 +1,17 @@ +package shellwords + +import ( + "errors" + "os" + "os/exec" + "strings" +) + +func shellRun(line string) (string, error) { + shell := os.Getenv("COMSPEC") + b, err := exec.Command(shell, "/c", line).Output() + if err != nil { + return "", errors.New(err.Error() + ":" + string(b)) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 00000000000..866d74a7ad7 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 00000000000..8da58fbf6f8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 00000000000..1884de6a7d7 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 00000000000..95ec014e8cc --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 00000000000..b13ab9f0796 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,683 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } + panic("unreachable") +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder() *decoder { + d := &decoder{mapType: defaultMapType} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 00000000000..2befd553ed0 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } + return false +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceeded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceeded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceeded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 00000000000..84f84995517 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 00000000000..0a7037ad1b2 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1096 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } + return false +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 00000000000..f450791717b --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,394 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 00000000000..93a86327434 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,203 @@ +package yaml + +import ( + "encoding/base64" + "math" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 00000000000..25808000f28 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2710 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && !(s[0] == '!' && s[1] == 0) { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the tag is non-empty. + if len(s) == 0 { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 00000000000..5958822f9c6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 00000000000..190362f25df --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 00000000000..36d6b883a6c --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,346 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 00000000000..d60a6b6b003 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occured. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 00000000000..8110ce3c37a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/vendor.json b/vendor/vendor.json index f28df25d87e..c75baaaddb3 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -181,6 +181,12 @@ "path": "github.com/boltdb/bolt", "revision": "c6ba97b89e0454fec9aa92e1d33a4e2c5fc1f631" }, + { + "checksumSHA1": "InIrfOI7Ys1QqZpCgTB4yW1G32M=", + "path": "github.com/burntsushi/toml", + "revision": "99064174e013895bbd9b025c31100bd1d9b590ca", + "revisionTime": "2016-07-17T15:07:09Z" + }, { "checksumSHA1": "qNHg4l2+bg50XQUR094857jPOoQ=", "path": "github.com/circonus-labs/circonus-gometrics", @@ -309,7 +315,7 @@ "revision": "02caa73df411debed164f520a6a1304778f8b88c", "revisionTime": "2016-05-28T10:48:36Z" }, - { + { "checksumSHA1": "BwY1agr5vzusgMT4bM7UeD3YVpw=", "path": "github.com/docker/engine-api/types/mount", "revision": "4290f40c056686fcaa5c9caf02eac1dde9315adf", @@ -477,14 +483,89 @@ "revision": "a557574d6c024ed6e36acc8b610f5f211c91568a" }, { + "checksumSHA1": "+JUQvWp1JUVeRT5weWL9hi6Fu4Y=", + "path": "github.com/hashicorp/consul-template/child", + "revision": "a8f654d612969519c9fde20bc8eb21418d763f73", + "revisionTime": "2016-10-03T19:46:06Z" + }, + { + "checksumSHA1": "tSuVPDoqSzoWmo2oEF5NGkIJHxQ=", + "path": "github.com/hashicorp/consul-template/config", + "revision": "a8f654d612969519c9fde20bc8eb21418d763f73", + "revisionTime": "2016-10-03T19:46:06Z" + }, + { + "checksumSHA1": "3xeTTZejqagwfwaYT8M3rq1Ixko=", + "path": "github.com/hashicorp/consul-template/dependency", + "revision": "a8f654d612969519c9fde20bc8eb21418d763f73", + "revisionTime": "2016-10-03T19:46:06Z" + }, + { + "checksumSHA1": "Z1QKRRJ/6/FjRIw1LJXTtOUxxX8=", + "path": "github.com/hashicorp/consul-template/manager", + "revision": "a8f654d612969519c9fde20bc8eb21418d763f73", + "revisionTime": "2016-10-03T19:46:06Z" + }, + { + "checksumSHA1": "ByMIKPf7bXpyhhy80IjKLKYrjpo=", + "path": "github.com/hashicorp/consul-template/signals", + "revision": "a8f654d612969519c9fde20bc8eb21418d763f73", + "revisionTime": "2016-10-03T19:46:06Z" + }, + { + "checksumSHA1": "pvmWk53vtXCCL6p+c3XU2aQfkx4=", + "path": "github.com/hashicorp/consul-template/template", + "revision": "a8f654d612969519c9fde20bc8eb21418d763f73", + "revisionTime": "2016-10-03T19:46:06Z" + }, + { + "checksumSHA1": "sR2e74n4zQEUDR6ssjvMr9lP7sI=", + "path": "github.com/hashicorp/consul-template/watch", + "revision": "a8f654d612969519c9fde20bc8eb21418d763f73", + "revisionTime": "2016-10-03T19:46:06Z" + }, + { + "checksumSHA1": "kWbL0V4o8vJL75mzeQzhF6p5jiQ=", + "path": "github.com/hashicorp/consul/acl", + "revision": "a189091a3530051285c12c726ca28ea55e015336", + "revisionTime": "2016-09-14T16:11:34Z" + }, + { + "checksumSHA1": "BMEJLBjl91k5k3vMMzzT7G2SO1U=", "comment": "v0.6.3-363-gae32a3c", "path": "github.com/hashicorp/consul/api", - "revision": "ae32a3ceae9fddb431b933ed7b2a82110e41e1bf" + "revision": "a189091a3530051285c12c726ca28ea55e015336", + "revisionTime": "2016-09-14T16:11:34Z" + }, + { + "checksumSHA1": "NrK9uDGSZ2WKMNLYicxDYmpRS3I=", + "path": "github.com/hashicorp/consul/consul/structs", + "revision": "a189091a3530051285c12c726ca28ea55e015336", + "revisionTime": "2016-09-14T16:11:34Z" + }, + { + "checksumSHA1": "0DPAA2cTBjrCGgXaxXil0vILcFs=", + "path": "github.com/hashicorp/consul/lib", + "revision": "a189091a3530051285c12c726ca28ea55e015336", + "revisionTime": "2016-09-14T16:11:34Z" + }, + { + "checksumSHA1": "BuOGVqQNO+iuqoGKAGGxCxC51ro=", + "path": "github.com/hashicorp/consul/testutil", + "revision": "a189091a3530051285c12c726ca28ea55e015336", + "revisionTime": "2016-09-14T16:11:34Z" }, { "comment": "v0.6.3-363-gae32a3c", "path": "github.com/hashicorp/consul/tlsutil", - "revision": "ae32a3ceae9fddb431b933ed7b2a82110e41e1bf" + "revision": "a189091a3530051285c12c726ca28ea55e015336", + "revisionTime": "2016-09-14T16:11:34Z" + }, + { + "checksumSHA1": "oQBVnohHMtF/4HuDCYKkhy+ijZ4=", + "path": "github.com/hashicorp/consul/types", + "revision": "a189091a3530051285c12c726ca28ea55e015336", + "revisionTime": "2016-09-14T16:11:34Z" }, { "path": "github.com/hashicorp/errwrap", @@ -552,6 +633,12 @@ "path": "github.com/hashicorp/go-version", "revision": "2e7f5ea8e27bb3fdf9baa0881d16757ac4637332" }, + { + "checksumSHA1": "d9PxF1XQGLMJZRct2R8qVM/eYlE=", + "path": "github.com/hashicorp/golang-lru", + "revision": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4", + "revisionTime": "2016-02-07T21:47:19Z" + }, { "path": "github.com/hashicorp/golang-lru/simplelru", "revision": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -695,6 +782,12 @@ "path": "github.com/mattn/go-isatty", "revision": "56b76bdf51f7708750eac80fa38b952bb9f32639" }, + { + "checksumSHA1": "ajImwVZHzsI+aNwsgzCSFSbmJqs=", + "path": "github.com/mattn/go-shellwords", + "revision": "f4e566c536cf69158e808ec28ef4182a37fdc981", + "revisionTime": "2015-03-21T17:42:21Z" + }, { "path": "github.com/miekg/dns", "revision": "7e024ce8ce18b21b475ac6baf8fa3c42536bf2fa" @@ -896,6 +989,12 @@ "path": "gopkg.in/tomb.v2", "revision": "14b3d72120e8d10ea6e6b7f87f7175734b1faab8", "revisionTime": "2014-06-26T14:46:23Z" + }, + { + "checksumSHA1": "12GqsW8PiRPnezDDy0v4brZrndM=", + "path": "gopkg.in/yaml.v2", + "revision": "a5b47d31c556af34a302ce5d659e6fea44d90de0", + "revisionTime": "2016-09-28T15:37:09Z" } ], "rootPath": "github.com/hashicorp/nomad"